code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
from _utils import *
import json
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
import seaborn as sns
datadir = "../PPO_Analysis/training_analysis/"
figdir = "../PPO_Analysis/"
fileList = os.listdir(datadir)
paraSetting = ['0503', '0504', '0505', '0506', '0507']
for file in fileList:
customSmooth(datadir, figdir, csv_name = file)
smooth_datadir = "../PPO_Analysis/training_analysis_smooth/"
#### n ########
# TEST-REWARD
N = np.arange(3, 8)
colors = sns.color_palette() #["#496B92", "#ffd9fe", "#ffa538","#024032"]
plotList = []
plt.style.use('seaborn')
plt.figure(figsize=(8, 5))
for i in range(len(paraSetting)):
par, n, color = paraSetting[i], N[i], colors[i]
parDataDir = smooth_datadir
data = pd.read_csv(parDataDir + par + "-Reward_greedy_evaluate.csv")
rewardTest = pd.DataFrame(data)
plt.plot(rewardTest['Step'][100:], rewardTest["Value"][100:], color = color, alpha = 0.3)
l, = plt.plot(rewardTest["Step"], rewardTest["SValue"], color = color)
plotList.append(l)
plt.legend(handles=plotList, loc = 'lower right', labels = ["$n$ = 3","$n$ = 4","$n$ = 5", "$n$ = 6", "$n$ = 7"], fontsize = 22)
# plt.axvspan(xmin =plt.xlim()[0], xmax=100000, facecolor="grey", alpha=0.3)
plt.xlabel("Steps", fontsize=20)
plt.ylabel("Test Reward", fontsize=20)
plt.savefig(figdir + "patient011_n_TestReward.png", dpi =300)
plt.show()
| [
"os.listdir",
"matplotlib.pyplot.savefig",
"seaborn.color_palette",
"matplotlib.pyplot.ylabel",
"pandas.read_csv",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure",
"pandas.DataFrame",
"numpy.arange",
... | [((225, 244), 'os.listdir', 'os.listdir', (['datadir'], {}), '(datadir)\n', (235, 244), False, 'import os\n'), ((468, 483), 'numpy.arange', 'np.arange', (['(3)', '(8)'], {}), '(3, 8)\n', (477, 483), True, 'import numpy as np\n'), ((493, 512), 'seaborn.color_palette', 'sns.color_palette', ([], {}), '()\n', (510, 512), True, 'import seaborn as sns\n'), ((572, 596), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn"""'], {}), "('seaborn')\n", (585, 596), True, 'import matplotlib.pyplot as plt\n'), ((597, 623), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (607, 623), True, 'import matplotlib.pyplot as plt\n'), ((1043, 1171), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': 'plotList', 'loc': '"""lower right"""', 'labels': "['$n$ = 3', '$n$ = 4', '$n$ = 5', '$n$ = 6', '$n$ = 7']", 'fontsize': '(22)'}), "(handles=plotList, loc='lower right', labels=['$n$ = 3',\n '$n$ = 4', '$n$ = 5', '$n$ = 6', '$n$ = 7'], fontsize=22)\n", (1053, 1171), True, 'import matplotlib.pyplot as plt\n'), ((1250, 1282), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Steps"""'], {'fontsize': '(20)'}), "('Steps', fontsize=20)\n", (1260, 1282), True, 'import matplotlib.pyplot as plt\n'), ((1284, 1322), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Test Reward"""'], {'fontsize': '(20)'}), "('Test Reward', fontsize=20)\n", (1294, 1322), True, 'import matplotlib.pyplot as plt\n'), ((1324, 1384), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(figdir + 'patient011_n_TestReward.png')"], {'dpi': '(300)'}), "(figdir + 'patient011_n_TestReward.png', dpi=300)\n", (1335, 1384), True, 'import matplotlib.pyplot as plt\n'), ((1386, 1396), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1394, 1396), True, 'import matplotlib.pyplot as plt\n'), ((753, 814), 'pandas.read_csv', 'pd.read_csv', (["(parDataDir + par + '-Reward_greedy_evaluate.csv')"], {}), "(parDataDir + par + '-Reward_greedy_evaluate.csv')\n", (764, 814), True, 'import pandas as pd\n'), ((832, 850), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (844, 850), True, 'import pandas as pd\n'), ((855, 944), 'matplotlib.pyplot.plot', 'plt.plot', (["rewardTest['Step'][100:]", "rewardTest['Value'][100:]"], {'color': 'color', 'alpha': '(0.3)'}), "(rewardTest['Step'][100:], rewardTest['Value'][100:], color=color,\n alpha=0.3)\n", (863, 944), True, 'import matplotlib.pyplot as plt\n'), ((954, 1017), 'matplotlib.pyplot.plot', 'plt.plot', (["rewardTest['Step']", "rewardTest['SValue']"], {'color': 'color'}), "(rewardTest['Step'], rewardTest['SValue'], color=color)\n", (962, 1017), True, 'import matplotlib.pyplot as plt\n')] |
"""Marmot Dataset Module."""
from pathlib import Path
from typing import List
import numpy as np
import pytorch_lightning as pl
from albumentations import Compose
from PIL import Image
from torch.utils.data import Dataset, DataLoader
class MarmotDataset(Dataset):
"""Marmot Dataset."""
def __init__(self, data: List[Path], transforms: Compose = None) -> None:
"""Marmot Dataset initialization.
Args:
data (List[Path]): A list of Path.
transforms (Optional[Compose]): Compose object from albumentations.
"""
self.data = data
self.transforms = transforms
def __len__(self):
"""Dataset Length."""
return len(self.data)
def __getitem__(self, item):
"""Get sample data.
Args:
item (int): sample id.
Returns (Tuple[tensor, tensor, tensor]): Image, Table Mask, Column Mask
"""
sample_id = self.data[item].stem
image_path = self.data[item]
table_path = self.data[item].parent.parent.joinpath("table_mask", sample_id + ".bmp")
column_path = self.data[item].parent.parent.joinpath("column_mask", sample_id + ".bmp")
image = np.array(Image.open(image_path))
table_mask = np.expand_dims(np.array(Image.open(table_path)), axis=2)
column_mask = np.expand_dims(np.array(Image.open(column_path)), axis=2)
mask = np.concatenate([table_mask, column_mask], axis=2) / 255
sample = {"image": image, "mask": mask}
if self.transforms:
sample = self.transforms(image=image, mask=mask)
image = sample["image"]
mask_table = sample["mask"][:, :, 0].unsqueeze(0)
mask_column = sample["mask"][:, :, 1].unsqueeze(0)
return image, mask_table, mask_column
class MarmotDataModule(pl.LightningDataModule):
"""Pytorch Lightning Data Module for Marmot."""
def __init__(self, data_dir: str = "./data", transforms_preprocessing: Compose = None,
transforms_augmentation: Compose = None, batch_size: int = 8, num_workers: int = 4):
"""Marmot Data Module initialization.
Args:
data_dir (str): Dataset directory.
transforms_preprocessing (Optional[Compose]): Compose object from albumentations applied
on validation an test dataset.
transforms_augmentation (Optional[Compose]): Compose object from albumentations applied
on training dataset.
batch_size (int): Define batch size.
num_workers (int): Define number of workers to process data.
"""
super().__init__()
self.data = list(Path(data_dir).rglob("*.bmp"))
self.transforms_preprocessing = transforms_preprocessing
self.transforms_augmentation = transforms_augmentation
self.batch_size = batch_size
self.num_workers = num_workers
self.setup()
def setup(self, stage: str = None) -> None:
"""Start training, validation and test datasets.
Args:
stage (Optional[str]): Used to separate setup logic for trainer.fit and trainer.test.
"""
n_samples = len(self.data)
self.data.sort()
train_slice = slice(0, int(n_samples * 0.8))
val_slice = slice(int(n_samples * 0.8), int(n_samples * 0.9))
test_slice = slice(int(n_samples * 0.9), n_samples)
self.complaint_train = MarmotDataset(self.data[train_slice], transforms=self.transforms_augmentation)
self.complaint_val = MarmotDataset(self.data[val_slice], transforms=self.transforms_preprocessing)
self.complaint_test = MarmotDataset(self.data[test_slice], transforms=self.transforms_preprocessing)
def train_dataloader(self, *args, **kwargs) -> DataLoader:
"""Create Dataloader.
Returns: DataLoader
"""
return DataLoader(self.complaint_train, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers)
def val_dataloader(self, *args, **kwargs) -> DataLoader:
"""Create Dataloader.
Returns: DataLoader
"""
return DataLoader(self.complaint_val, batch_size=self.batch_size, num_workers=self.num_workers)
def test_dataloader(self, *args, **kwargs) -> DataLoader:
"""Create Dataloader.
Returns: DataLoader
"""
return DataLoader(self.complaint_test, batch_size=self.batch_size, num_workers=self.num_workers)
| [
"PIL.Image.open",
"pathlib.Path",
"numpy.concatenate",
"torch.utils.data.DataLoader"
] | [((3878, 3986), 'torch.utils.data.DataLoader', 'DataLoader', (['self.complaint_train'], {'batch_size': 'self.batch_size', 'shuffle': '(True)', 'num_workers': 'self.num_workers'}), '(self.complaint_train, batch_size=self.batch_size, shuffle=True,\n num_workers=self.num_workers)\n', (3888, 3986), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((4131, 4224), 'torch.utils.data.DataLoader', 'DataLoader', (['self.complaint_val'], {'batch_size': 'self.batch_size', 'num_workers': 'self.num_workers'}), '(self.complaint_val, batch_size=self.batch_size, num_workers=self\n .num_workers)\n', (4141, 4224), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((4369, 4463), 'torch.utils.data.DataLoader', 'DataLoader', (['self.complaint_test'], {'batch_size': 'self.batch_size', 'num_workers': 'self.num_workers'}), '(self.complaint_test, batch_size=self.batch_size, num_workers=\n self.num_workers)\n', (4379, 4463), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((1215, 1237), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (1225, 1237), False, 'from PIL import Image\n'), ((1412, 1461), 'numpy.concatenate', 'np.concatenate', (['[table_mask, column_mask]'], {'axis': '(2)'}), '([table_mask, column_mask], axis=2)\n', (1426, 1461), True, 'import numpy as np\n'), ((1284, 1306), 'PIL.Image.open', 'Image.open', (['table_path'], {}), '(table_path)\n', (1294, 1306), False, 'from PIL import Image\n'), ((1363, 1386), 'PIL.Image.open', 'Image.open', (['column_path'], {}), '(column_path)\n', (1373, 1386), False, 'from PIL import Image\n'), ((2670, 2684), 'pathlib.Path', 'Path', (['data_dir'], {}), '(data_dir)\n', (2674, 2684), False, 'from pathlib import Path\n')] |
import time, os
from pynvml import *
from subprocess import Popen
import numpy as np
nvmlInit()
import pandas as pd
def run_command(cmd, minmem=2,use_env_variable=True, admissible_gpus=[1],sleep=60):
sufficient_memory = False
gpu_idx=0
while not sufficient_memory:
time.sleep(sleep)
# Check free memory
info = nvmlDeviceGetMemoryInfo(nvmlDeviceGetHandleByIndex(0))
free_0 = info.free/ 1024 / 1024 / 1024 if 0 in admissible_gpus else 0
info = nvmlDeviceGetMemoryInfo(nvmlDeviceGetHandleByIndex(1))
free_1 = info.free / 1024 / 1024 / 1024 if 1 in admissible_gpus else 0
if not use_env_variable: #safe mode
sufficient_memory = np.minimum(free_0,free_1) >=minmem # 4.5 Gb
else:
sufficient_memory = np.maximum(free_0, free_1) >= minmem # 4.5 Gb
gpu_idx = np.argmax([free_0,free_1])
# if not sufficient_memory:
# time.sleep(60)
if use_env_variable:
# os.system('CUDA_VISIBLE_DEVICES="{}" '.format(gpu_idx) +cmd)
proc = Popen(['CUDA_VISIBLE_DEVICES="{}" '.format(gpu_idx) + cmd.format(0)], shell=True,
stdin=None, stdout=None, stderr=None, close_fds=True)
print('CUDA_VISIBLE_DEVICES="{}" '.format(gpu_idx) + cmd.format(0))
else:
os.system(cmd.format(gpu_idx))
import sys
sys.path.append("../")
# dataset = 'adult'
# dataset = 'MIBI1CH'
# scribbles_list = ['200']
# dataset = 'MIBI1CH'
# scribbles_list = ['200']
# dataset = 'Vectra_2CH'
# scribbles_list = ['200','300']
#
# dataset = 'MIBI2CH'
# scribbles_list = ['150','250','100']
dataset = 'cellpose'
scribbles_list = ['300']
dataset = 'MIBI1CH'
# scribbles_list = ['100', '200']
scribbles_list = ['100']
dataset_list = ['MIBI1CH_Bladder', 'MIBI1CH_Lung']
dataset_list = ['MIBI1CH_Lung']
# dataset_list = ['MIBI1CH_Bladder']
scribbles_list = ['200','400']
scribbles_list = ['200']
saveout = True
file_bash_name = dataset+'_bash.sh'
model_name_prefix_list=['MS_2tasks_base32depth4relu_adam5e4_mcdrop1e4_nsave5_',
'MS_2tasks_base64depth4relu_adam5e4_mcdrop1e4_nsave5_']
ubase_list = [32,64]
model_name_prefix_list=['MS_2tasks_base32depth4relu_adam5e4_mcdrop1e4_nsave5_']
ubase_list = [32]
# model_name_prefix = 'MS_2tasks_base64depth4relu_adam5e4_gclip10_nsave5_'
# model_name_prefix = 'MS_2tasks_base64depth4relu_adam5e4_gclip10_nsave6_'
# model_name_prefix = 'MS_2tasks_base64depth4relu_adam5e4_nsave5_'
# model_name_prefix = 'MS_2tasks_base64depth4relu_adam5e4_mcdrop1e4_nsave5_'
# model_name_prefix = 'MS_2tasks_base64depth4relu_adam5e4_nsave5_'
mcdrop = True
train = False
load = True
nsaves = 5
reset_optim = True
optim = 'adam' #RMSprop
lr=5e-4
optim_regw = 1e-4
udepth = 4
activation = 'relu'
batchnorm = False
epochs=400
batch = 64
seed_list=[43,44]
seed_list=[42]
gpu = 1
gradclip = 0
# weights_dic = {'02505':[0.25, 0.25, 0.49, 0.01],
# '04501': [0.45, 0.45, 0.09, 0.01],
# '00509': [0.05, 0.05, 0.89, 0.01]} #wfore, wback, wrec, wreg
#
# weights_dic = {'04501': [0.45, 0.45, 0.09, 0.01],
# '02505': [0.25, 0.25, 0.49, 0.01]} #wfore, wback, wrec, wreg
weights_dic = {'04501': [0.45, 0.45, 0.09, 0.01]} #wfore, wback, wrec, wreg
# weights_dic = {'00509': [0.05, 0.05, 0.89, 0.01]} #wfore, wback, wrec, wreg
losses_dic = {'segCErecL2':['CE','L2']}
rows_model = []
basedir_root = '/data/natalia/models/'
file_bash_name = 'MS_bash.sh'
with open(file_bash_name, 'w') as f:
str_root = 'basedir_root="{}"'.format(basedir_root)
f.write(str_root + '\n\n\n')
for seed in seed_list:
if seed == 42:
saveout = True
else:
saveout = False
for dataset in dataset_list:
ix_model = 0
for model_name_prefix in model_name_prefix_list:
ubase = ubase_list[ix_model]
ix_model += 1
if ubase == 128:
batch = 32
for scribbles in scribbles_list:
basedir_local = dataset + '/s' + scribbles + '/MS/'
basedir = basedir_root + basedir_local
for loss_key in losses_dic.keys():
for weights_key in weights_dic.keys():
loss_list = losses_dic[loss_key]
weights_list = weights_dic[weights_key]
out_file_ext = dataset + '_' + model_name_prefix + loss_key + '_w'+ weights_key +'_seed' + str(seed) + '_verbose'
model_name = model_name_prefix + loss_key + '_w'+ weights_key +'_seed' + str(seed)
cmd = 'python main_ms.py --basedir="{}" --dataset="{}" --model_name="{}" --saveout={} --scribbles={}'.format(basedir,dataset, model_name,saveout,scribbles)
# cmd = 'python main_ms.py --basedir={}"{}" --dataset="{}" --model_name="{}" --saveout={} --scribbles={} --gpu={}'.format('$basedir_root',basedir_local, dataset, model_name,saveout,scribbles,gpu)
cmd = cmd + ' --optim_regw={} --optim="{}" --lr={} --gradclip={} --seed={} --train={}'.format(optim_regw, optim, lr, gradclip, seed,train)
cmd = cmd + ' --udepth="{}" --ubase="{}" --activation="{}" --batchnorm={}'.format(udepth,ubase,activation,batchnorm)
cmd = cmd + ' --seg_loss="{}" --rec_loss="{}" --nsaves={} --mcdrop={} --reset_optim={}'.format(loss_list[0], loss_list[1],nsaves,mcdrop,reset_optim)
cmd = cmd + ' --wfore={} --wback={} --wrec={} --wreg={}'.format(weights_list[0], weights_list[1], weights_list[2], weights_list[3])
cmd = cmd + ' --epochs={} --batch={} --load={} > {}.txt'.format(epochs,batch,load,out_file_ext)
rows_model.append(basedir_local + model_name + '/')
if ubase == 32:
run_command(cmd, minmem=5.5, use_env_variable=True, admissible_gpus=[1], sleep=60)
else:
run_command(cmd, minmem=8, use_env_variable=True, admissible_gpus=[1], sleep=60)
f.write(cmd + '\n\n\n')
f.write('\n\n\n')
f.write('\n\n\n')
pd_model = pd.DataFrame(data = rows_model, columns=['model_path'])
pd_model.to_csv('model_path.csv',index = 0) | [
"numpy.minimum",
"numpy.argmax",
"time.sleep",
"pandas.DataFrame",
"numpy.maximum",
"sys.path.append"
] | [((1360, 1382), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (1375, 1382), False, 'import sys\n'), ((6385, 6438), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'rows_model', 'columns': "['model_path']"}), "(data=rows_model, columns=['model_path'])\n", (6397, 6438), True, 'import pandas as pd\n'), ((287, 304), 'time.sleep', 'time.sleep', (['sleep'], {}), '(sleep)\n', (297, 304), False, 'import time, os\n'), ((862, 889), 'numpy.argmax', 'np.argmax', (['[free_0, free_1]'], {}), '([free_0, free_1])\n', (871, 889), True, 'import numpy as np\n'), ((706, 732), 'numpy.minimum', 'np.minimum', (['free_0', 'free_1'], {}), '(free_0, free_1)\n', (716, 732), True, 'import numpy as np\n'), ((797, 823), 'numpy.maximum', 'np.maximum', (['free_0', 'free_1'], {}), '(free_0, free_1)\n', (807, 823), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
import time
from capslayer import layers
from capslayer import losses
from capslayer import ops
class SquashTest(tf.test.TestCase):
def testSquash(self):
"""Checks the value and shape of the squash output given an input."""
input_tensor = tf.ones((1, 1, 1, 1, 1, 1))
squashed = ops.squash(input_tensor)
self.assertEqual(len(squashed.get_shape()), 6)
with self.test_session() as sess:
r_squashed = sess.run(squashed)
scale = 0.5
self.assertEqual(np.array(r_squashed).shape, input_tensor.get_shape())
self.assertAllClose(np.linalg.norm(r_squashed, axis=2), [[[[[scale]]]]])
def testSquash_(self):
"""Checks the value and shape of the squash output given an input."""
input_tensor = tf.ones((1, 1, 1, 1, 1, 1))
squashed = ops._squash(input_tensor)
self.assertEqual(len(squashed.get_shape()), 6)
with self.test_session() as sess:
r_squashed = sess.run(squashed)
scale = 0.5
self.assertEqual(np.array(r_squashed).shape, input_tensor.get_shape())
self.assertAllClose(np.linalg.norm(r_squashed, axis=2), [[[[[scale]]]]])
# def testProcessSquashDetail(self):
# vector = tf.ones((1, 1, 1, 1, 20, 1))
# squared_norm = tf.reduce_sum(tf.square(vector), -2, keepdims=True)
# scalar_factor = squared_norm / (1 + squared_norm) / tf.sqrt(squared_norm + 0.000001)
# result = scalar_factor * vector
# with self.test_session() as sess:
# r_squared_norm, r_scalar_factor, r_result = sess.run([squared_norm, scalar_factor, result])
# print('r_squared_norm',r_squared_norm)
# print('r_scalar_factor', r_scalar_factor)
# print('r_result', r_result)
# print('result shape', np.shape(r_result))
#
# def testProcessSquashDetail_(self):
# vector = tf.ones((1, 1, 20, 1, 1, 1))
# norm = tf.norm(vector, 2, keepdims=True)
# norm_squared = norm * norm
# result = (vector / norm) * (norm_squared / (1 + norm_squared))
# with self.test_session() as sess:
# r_norm, r_norm_squared, r_result = sess.run([norm, norm_squared, result])
# print('r_norm',r_norm)
# print('r_norm_squared', r_norm_squared)
# print('r_result', r_result)
# print('result shape', np.shape(r_result))
def testSquashTime(self):
input_tensor = tf.ones((128, 1, 1000, 1, 20, 1))
squashed = ops.squash(input_tensor,axis=-2)
with self.test_session() as sess:
time1 = time.time()
for i in range(100):
r_squashed = sess.run(squashed)
time2 = time.time()
print('Squash time:',time2-time1)
def testSquashTime_(self):
input_tensor = tf.ones((128, 1, 1000, 1, 20, 1))
squashed = ops._squash(input_tensor, axis=-2)
with self.test_session() as sess:
time1 = time.time()
for i in range(100):
r_squashed = sess.run(squashed)
time2 = time.time()
print('_Squash time:',time2-time1)
if __name__ == '__main__':
tf.test.main()
| [
"tensorflow.ones",
"tensorflow.test.main",
"capslayer.ops.squash",
"numpy.array",
"numpy.linalg.norm",
"time.time",
"capslayer.ops._squash"
] | [((3204, 3218), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (3216, 3218), True, 'import tensorflow as tf\n'), ((306, 333), 'tensorflow.ones', 'tf.ones', (['(1, 1, 1, 1, 1, 1)'], {}), '((1, 1, 1, 1, 1, 1))\n', (313, 333), True, 'import tensorflow as tf\n'), ((353, 377), 'capslayer.ops.squash', 'ops.squash', (['input_tensor'], {}), '(input_tensor)\n', (363, 377), False, 'from capslayer import ops\n'), ((828, 855), 'tensorflow.ones', 'tf.ones', (['(1, 1, 1, 1, 1, 1)'], {}), '((1, 1, 1, 1, 1, 1))\n', (835, 855), True, 'import tensorflow as tf\n'), ((875, 900), 'capslayer.ops._squash', 'ops._squash', (['input_tensor'], {}), '(input_tensor)\n', (886, 900), False, 'from capslayer import ops\n'), ((2483, 2516), 'tensorflow.ones', 'tf.ones', (['(128, 1, 1000, 1, 20, 1)'], {}), '((128, 1, 1000, 1, 20, 1))\n', (2490, 2516), True, 'import tensorflow as tf\n'), ((2536, 2569), 'capslayer.ops.squash', 'ops.squash', (['input_tensor'], {'axis': '(-2)'}), '(input_tensor, axis=-2)\n', (2546, 2569), False, 'from capslayer import ops\n'), ((2853, 2886), 'tensorflow.ones', 'tf.ones', (['(128, 1, 1000, 1, 20, 1)'], {}), '((128, 1, 1000, 1, 20, 1))\n', (2860, 2886), True, 'import tensorflow as tf\n'), ((2906, 2940), 'capslayer.ops._squash', 'ops._squash', (['input_tensor'], {'axis': '(-2)'}), '(input_tensor, axis=-2)\n', (2917, 2940), False, 'from capslayer import ops\n'), ((646, 680), 'numpy.linalg.norm', 'np.linalg.norm', (['r_squashed'], {'axis': '(2)'}), '(r_squashed, axis=2)\n', (660, 680), True, 'import numpy as np\n'), ((1169, 1203), 'numpy.linalg.norm', 'np.linalg.norm', (['r_squashed'], {'axis': '(2)'}), '(r_squashed, axis=2)\n', (1183, 1203), True, 'import numpy as np\n'), ((2631, 2642), 'time.time', 'time.time', ([], {}), '()\n', (2640, 2642), False, 'import time\n'), ((2744, 2755), 'time.time', 'time.time', ([], {}), '()\n', (2753, 2755), False, 'import time\n'), ((3003, 3014), 'time.time', 'time.time', ([], {}), '()\n', (3012, 3014), False, 'import time\n'), ((3116, 3127), 'time.time', 'time.time', ([], {}), '()\n', (3125, 3127), False, 'import time\n'), ((564, 584), 'numpy.array', 'np.array', (['r_squashed'], {}), '(r_squashed)\n', (572, 584), True, 'import numpy as np\n'), ((1087, 1107), 'numpy.array', 'np.array', (['r_squashed'], {}), '(r_squashed)\n', (1095, 1107), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Provide the 'Efficient Lifelong Learning Algorithm' (ELLA).
The ELLA algorithm is an online multi-task learning algorithm that maintains a shared knowledge database that can be
trained and used to incorporate new knowledge to improve the performance on multiple tasks [1,2,3].
The code presented here is based on [3,4].
References:
[1] "Learning Task Grouping and Overlap in Multi-Task Learning" (GO-MTL), Kumar et al., 2012
[2] "ELLA: An Efficient Lifelong Learning Algorithm", Ruvolo et al., 2013
[3] "Online Multi-Task Learning for Policy Gradient Methods", Ammar et al., 2014
[4] Implementation of ELLA on Github (by <NAME>): https://github.com/paulruvolo/ELLA
[5] Implementation of PG-ELLA on Github (by ): https://github.com/cdcsai/Online_Multi_Task_Learning
"""
import torch
import numpy as np
from pyrobolearn.utils.torch_utils import kronecker
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, PyRoboLearn"
__credits__ = ["<NAME>", "<NAME>", "<NAME>"]
__license__ = "GNU GPLv3"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
class ELLA(object):
r"""Efficient Lifelong Learning Algorithm (ELLA)
Type:: online multi-task, parametric
ELLA is an online multi-task learning algorithm that maintains and refines (given new tasks) a shared basis for all
task models, allowing to transfer knowledge from previous tasks and improve the performance on all tasks (including
previous ones).
In the multi-task learning paradigm, we are given a series of learning tasks: math:`Z^{(1)}, \cdots, Z^{(T_{max})}`,
where each task:
- in the supervised learning (SL) case is expressed as :math:`Z^{(t)} = (\hat{f}^{(t)}, X^{(t)}, y^{(t)}`, with
:math:`\hat{f}` being the hidden true function that maps the input set to the output set, and the goal is to find
the parametric function :math:`y = f(x; \theta^{(t)})` where :math:`\theta^{(t)}` are the parameters.
- in the reinforcement learning (RL) case is given by
:math:`Z^{(t)} = \langle S_0^{(t)}, S^{(t)}, A^{(t)}, P^{(t)}, R^{(t)}, \gamma^{(t)} \rangle`, where the goal is
to find the policy :math:`\pi_{\theta^{(t)}}(a_t | s_t)` that maps states to actions, given the parameters
:math:`\theta^{(t)}`.
ELLA [2,3] achieves this by following the approach proposed in [1]; by maintaining and sharing a library of latent
model components :math:`L \in \mathbb{R}^{d \times k}` between the various tasks such that the parameters for any
given task `t` are given by :math:`\theta^{(t)} = L s^{(t)}`, with :math:`s^{(t)} \in \mathbb{R}^{k}` being the
sparse latent weight vector; that is, :math:`\theta^{(t)}` is given by the superposition of the latent basis vector
in :math:`L` and where the coefficients for each basis vector is given by the elements in :math:`s^{(t)}`.
The ELLA minimizes the following objective function:
.. math:: e_T(L) = \frac{1}{T} \sum_{t=1}^T \min_{s^(t)} \left[ \mathcal{L}(\theta^{(t)}) + \mu ||s^{(t)}||_1
\right] + \lambda ||L||^2_F
where :math:`T` is the number of tasks encountered so far, and :math:`\mathcal{L}(\theta^{(t)})` is the loss
function which is given by:
- in the SL case: :math:`\mathcal{L}(\theta^{(t)}) = \frac{1}{n_t} \sum_{i=1}^{n_t} \mathcal{L}(f(x_i^{(t)};
\theta^{(t)}), y_i^{(t)})`, where :math:`x_i^{(t)}` and :math:`y_i^{(t)}` are the input and output data for the
task :math:`t`, and :math:`\theta^{(t)} = L s^{(t)}` are the parameters.
- in the RL case: :math:`\mathcal{L}(\theta^{(t)}) = - \mathcal{J}(\theta^{(t)})`, where
:math:`\mathcal{J}(\theta^{(t)}) = \int_{\mathbb{T}^{(t)}} p_{\theta^{(t)}}(\tau) R(\tau) d\tau` is the expected
average return on task :math:`t`.
Now, there are two problems with the formulation above:
1. the explicit dependency to all the training data or trajectories including the ones from the previous tasks
through the inner summation (i.e. :math:`\sum_{i=1}^{n_t}` in the SL case, and :math:`\int_{\mathbb{T}^{(t)}}` in
the RL case). Ideally, we would like to only depend on the training data and trajectories for the current task.
This is performed by approximating the objective function using the second-order Taylor expansion of
:math:`\mathcal{L}(\theta^{(t)})` around the best parameters
:math:`\theta^{(t)*} = \arg \min_{\theta} \mathcal{L}(\theta)`.
2. the evaluation of the :math:`s^{(t)}`'s for each task by solving the minimization problem which can become
increasingly expensive as the number of tasks increased. Ideally, we would like to only perform the optimization
for the current task and exploit the fact that while we only update the current :math:`s^{(t)}`, the
library :math:`L` is updated for each task, and thus the tasks can still benefit from this last one. In [2,3],
it has been observed that the choice of only updating the current :math:`s^{(t)}` does not affect the quality of
the model as the number of tasks grows large.
...
Warnings:
- The ELLA requires the computation of the parameters that minimize the loss function, and most importantly
the Hessian matrix of the loss function with respect to the parameters evaluated at the best found above
parameters. Depending on the number of parameters that matrix can be big and expensive to compute.
- The ELLA assumes that the input and output dimension of the model as well as its number of parameters is
constant between the various tasks. For different state and action spaces between the tasks, see inter-task
mappings [6], or ELLA using task groups where tasks in the same group share a common state and action space,
such that :math:`\theta^{(t)} = B^{(g)}s^{(t)}` with :math:`B^{(g)} = \Phi^{(g)} L` where :math:`g` denotes
the task group, :math:`B^{(g)}` is the latent model components shared withing :math:`g`, :math:`L` is the
global latent model components, and :math:`\Phi^{(g)}` is the mapping from :math:`L` to :math:`B^{(g)}` [7].
- The complexity of each ELLA update is :math:`O(k^2 d^3, \xi(d, n_t))` where :math:`k` is the number of latent
components, :math:`d` is the dimensionality of the parameters, :math:`n_t` is the number of data instances (in
SL) or trajectories (in RL), and :math:`\xi` is the function that computes the complexity to compute the
best set of parameters and the Hessian matrix for the current task :math:`t`.
The current implementation is based on [4,5].
Pseudo-algorithm
----------------
1. Inputs: k=number of latent components, d=dimensionality of parameters, lambda=L2 norm coefficient,
mu=L1 norm coefficient
2. Init: T=0, A=zeros(k*d,k*d), b=zeros(k*d,1), L=zeros(d,k)
3. ...
References:
[1] "Learning Task Grouping and Overlap in Multi-Task Learning" (GO-MTL), Kumar et al., 2012
[2] "ELLA: An Efficient Lifelong Learning Algorithm", Ruvolo et al., 2013
[3] "Online Multi-Task Learning for Policy Gradient Methods", Ammar et al., 2014
[4] Implementation of ELLA on Github (by <NAME>): https://github.com/paulruvolo/ELLA
[5] Implementation of PG-ELLA on Github (by ): https://github.com/cdcsai/Online_Multi_Task_Learning
[6] "Transfer learning via inter-task mappings for temporal difference learning", Taylor et al., 2007
[7] "Autonomous cross-domain knowledge transfer in lifelong policy gradient reinforcement learning", Ammar et
al., 2015
"""
def __init__(self, num_parameters, num_latent_component, l1_sparsity_coefficient=1., l2_library_coefficient=1.):
r"""
Initialize ELLA.
Args:
num_parameters (int): number of model parameters (in the papers [2,3], this is the `d` variable).
num_latent_component (int): number of latent component (in the papers [2,3], this is the `k` variable).
l1_sparsity_coefficient (float): coefficient for the L1 norm applied on the sparse weight vector
:math:`s^{(t)}` (in the papers [2,3], this is the `\mu` variable).
l2_library_coefficient (float): coefficient for the L2 norm applied on the shared library basis :math:`L`
(in the papers [2,3], this is the `\lambda` variable).
"""
d, k = num_parameters, num_latent_component
self.d = num_parameters
self.k = num_latent_component
self.l1_coeff = l1_sparsity_coefficient
self.l2_coeff = l2_library_coefficient
self.A = torch.zeros((d*k, d*k)) # A matrix used to compute the shared library L=A^{-1}b
self.b = torch.zeros((d*k, 1)) # b vector used to compute the shared library L=A^{-1}b
self.s = torch.zeros(k) # sparse weight vector
self.L = torch.randn((self.d, self.k)) # shared library of basis vectors
self.num_task = 0 # counter for the number of tasks encountered
self.tasks = {} # dict of encountered tasks
def train(self, task, save_task_parameters=True):
"""
Train using ELLA [2,3].
Args:
task (ILTask, RLTask): task. All the tasks must contain the same policy.
Returns:
"""
# if new task
if task not in self.tasks:
self.num_task += 1
# update dataset or collect trajectory randomly
# TODO
else:
# get previous theta, hessian, and s vector
values = self.tasks[task]
theta, hessian, s = values['theta'], values['hessian'], values['s']
# update A and b with respect to these previous parameters
self.A -= kronecker(s.matmul(s.t()), hessian)
self.b -= kronecker(s.t(), theta.t().matmul(hessian)).view(-1, 1)
# update dataset or collect trajectories using theta
# TODO
# compute the best parameters and hessian matrix evaluated at these best parameters
# task.train()
theta = task.best_parameters
hessian = None # TODO
# reinitialize the columns of the library L
self.reinitialize_zero_columns()
# optimize the loss with respect to s to obtain the best sparse vector s
s = self.s
diff = (theta - self.L.matmul(s))
loss = self.l1_coeff * torch.sum(torch.abs(s)) + diff.t().matmul(hessian.matmul(diff))
# TODO
# compute A
self.A += kronecker(s.matmul(s.t()), hessian)
# compute b
self.b += kronecker(s.t(), theta.t().matmul(hessian)).view(-1, 1)
# compute L=A^{-1}b
self.L = torch.inverse(1./self.num_task * self.A + self.l2_coeff).matmul(1./self.num_task * self.b)
# save the best parameters and hessian for the task
# TODO: saving the hessian can be quite expensive, maybe we should just save the important components (using
# SVD)
self.tasks.setdefault(task, {}).update({'theta': theta, 'hessian': hessian, 's': self.s})
def reinitialize_zero_columns(self):
"""
Reinitialize the library columns that are zeros.
"""
for i, val in enumerate(np.sum(self.L, axis=0)):
if abs(val) < 10 ** -8:
self.L[:, i] = torch.randn(self.d,)
def plot_latent(self):
pass
| [
"torch.abs",
"numpy.sum",
"torch.zeros",
"torch.inverse",
"torch.randn"
] | [((8683, 8710), 'torch.zeros', 'torch.zeros', (['(d * k, d * k)'], {}), '((d * k, d * k))\n', (8694, 8710), False, 'import torch\n'), ((8781, 8804), 'torch.zeros', 'torch.zeros', (['(d * k, 1)'], {}), '((d * k, 1))\n', (8792, 8804), False, 'import torch\n'), ((8877, 8891), 'torch.zeros', 'torch.zeros', (['k'], {}), '(k)\n', (8888, 8891), False, 'import torch\n'), ((8933, 8962), 'torch.randn', 'torch.randn', (['(self.d, self.k)'], {}), '((self.d, self.k))\n', (8944, 8962), False, 'import torch\n'), ((11285, 11307), 'numpy.sum', 'np.sum', (['self.L'], {'axis': '(0)'}), '(self.L, axis=0)\n', (11291, 11307), True, 'import numpy as np\n'), ((10747, 10806), 'torch.inverse', 'torch.inverse', (['(1.0 / self.num_task * self.A + self.l2_coeff)'], {}), '(1.0 / self.num_task * self.A + self.l2_coeff)\n', (10760, 10806), False, 'import torch\n'), ((11377, 11396), 'torch.randn', 'torch.randn', (['self.d'], {}), '(self.d)\n', (11388, 11396), False, 'import torch\n'), ((10464, 10476), 'torch.abs', 'torch.abs', (['s'], {}), '(s)\n', (10473, 10476), False, 'import torch\n')] |
from blenderneuron.section import Section
import numpy as np
import math
import numpy as np
class BlenderSection(Section):
def __init__(self):
super(BlenderSection, self).__init__()
self.was_split = False
self.split_sections = []
def from_full_NEURON_section_dict(self, nrn_section_dict):
self.name = nrn_section_dict["name"]
self.nseg = nrn_section_dict["nseg"]
self.point_count = nrn_section_dict["point_count"]
self.coords = nrn_section_dict["coords"]
self.radii = nrn_section_dict["radii"]
self.parent_connection_loc = nrn_section_dict["parent_connection_loc"]
self.connection_end = nrn_section_dict["connection_end"]
# Parse the children
self.children = []
for nrn_child in nrn_section_dict["children"]:
child = BlenderSection()
child.from_full_NEURON_section_dict(nrn_child)
self.children.append(child)
self.segments_3D = []
if "activity" in nrn_section_dict:
self.activity.from_dict(nrn_section_dict["activity"])
def make_split_sections(self, max_length):
"""
Splits a section into smaller chained sub-sections if the arc length of the points
exceeds the specified length. This is used to temporarily split the sections for
confining dendrites between layers.
:param max_length: maximum allowed section length in um
:return: None
"""
arc_lengths = self.arc_lengths()
total_length = arc_lengths[-1]
num_sections = int(math.ceil(total_length / max_length))
is_too_long = num_sections > 1
if not is_too_long:
return None
# Mark the the section as having been split
self.was_split = True
# Get the maximum length of the new sections
new_length = total_length / num_sections
# Create new sections
self.split_sections = [BlenderSection() for i in range(num_sections)]
old_coords = np.array(self.coords).reshape((-1, 3))
old_radii = np.array(self.radii)
# Split the coords and radii
split_length = 0
point_i = 0
for split_sec_i, split_sec in enumerate(self.split_sections):
split_length += new_length
split_sec_coords = []
split_sec_radii = []
# Start a 2nd+ split section with the most recent point
if split_sec_i > 0:
prev_sec = self.split_sections[split_sec_i-1]
split_sec_coords.append(prev_sec.coords[-1])
split_sec_radii.append(prev_sec.radii[-1])
exact_length_match = False
# Add 3d points to the split until reached the end of the split
while arc_lengths[point_i] <= split_length:
split_sec_coords.append(old_coords[point_i])
split_sec_radii.append(old_radii[point_i])
exact_length_match = abs(arc_lengths[point_i] - split_length) < 0.001
point_i += 1
if point_i == len(arc_lengths):
break
# If reached the end of the sub-section, but the last real sub-section point is not
# at the exact end of the sub-section, then create a virtual point, which
# lies at the exact end of the sub-section
if not exact_length_match:
virtual_arc_length_delta = split_length - arc_lengths[point_i-1]
pt_segment_arc_length_delta = arc_lengths[point_i] - arc_lengths[point_i - 1]
pt_segment_vector = old_coords[point_i] - old_coords[point_i-1]
fraction_along = virtual_arc_length_delta / pt_segment_arc_length_delta
virtual_coord = old_coords[point_i-1] + pt_segment_vector * fraction_along
pt_segment_radius_delta = old_radii[point_i] - old_radii[point_i-1]
virtual_radius = old_radii[point_i-1] + pt_segment_radius_delta * fraction_along
split_sec_coords.append(virtual_coord)
split_sec_radii.append(virtual_radius)
split_sec.coords = np.array(split_sec_coords)
split_sec.radii = np.array(split_sec_radii)
split_sec.point_count = len(split_sec.radii)
split_sec.name = self.name + "["+str(split_sec_i)+"]"
return self.split_sections
def update_coords_from_split_sections(self):
if not self.was_split:
return
# Reassemble the coords and radii, skipping identical consecutive points
prev_coord, prev_radius = None, None
coords, radii = [], []
for split_i, split_sec in enumerate(self.split_sections):
for coord_i, coord in enumerate(np.reshape(split_sec.coords, (-1, 3))):
radius = split_sec.radii[coord_i]
# Skip if identical to previous point
if prev_coord is not None and radius == prev_radius and \
np.all(np.isclose(coord, prev_coord, rtol=0.001)):
continue
else:
coords.append(coord)
radii.append(radius)
prev_coord = coord
prev_radius = radius
self.coords = np.array(coords).reshape(-1)
self.radii = np.array(radii).reshape(-1)
self.point_count = len(self.radii)
def arc_lengths(self):
coords = np.array(self.coords).reshape(-1, 3)
start = coords[0:-1]
end = coords[1:]
diff = end - start
sq = np.square(diff)
sum = np.sum(sq, axis=1)
dist = np.sqrt(sum)
tot_len = np.concatenate(([0],np.cumsum(dist)))
return tot_len
def dist_to_closest_coord(self, target):
coords = np.array(self.coords).reshape(-1, 3)
target = np.array(target).reshape((1, 3))
diff = coords - target
sq = np.square(diff)
sum = np.sum(sq, axis=1)
dists = np.sqrt(sum)
return np.min(dists)
def remove_split_sections(self, recursive=True):
if self.was_split:
self.split_sections = []
self.was_split = False
if recursive:
for child_sec in self.children:
child_sec.remove_split_sections(recursive=True)
class BlenderRoot(BlenderSection):
def __init__(self, index, name, group=None):
super(BlenderRoot, self).__init__()
self.index = index
self.name = name
self.group = group
@property
def ui_root(self):
return self.group.ui_group.root_entries[self.name]
def remove(self, node):
# Remove view container objects if any
if self.group is not None and self.group.view is not None:
self.group.view.remove_container(self.name)
# remove from UI and from node groups
self.remove_from_group(delete=True)
# remove from index
node.root_index.pop(self.name)
def remove_from_group(self, delete=False):
if self.group is None:
return
# Keep a reference to group
current_group = self.group
# Remove group from 3D view
if self.group.view is not None:
self.group.view.remove()
self.group.view = None
# Set group to none in the root_index
self.group = None
# remove from node group
current_group.roots.pop(self.name)
# from ui group
root_entry = current_group.ui_group.root_entries.get(self.name)
if root_entry is not None and root_entry.selected:
root_entry.selected = False
if delete:
# Remove the root entry from all the UI groups
for group in current_group.node.groups.values():
entries = group.ui_group.root_entries
ui_root = entries.get(self.name)
if ui_root is not None:
remove_idx = entries.find(self.name)
entries.remove(remove_idx)
def add_to_UI_group(self, ui_group):
ui_root = ui_group.root_entries.add()
ui_root.index = self.index
ui_root.name = self.name
return ui_root
def add_to_group(self, group):
if self.group == group:
return
if self.group is not None:
self.remove_from_group()
# index
self.group = group
if group is None:
return
# node group
self.group.roots[self.name] = self
# ui
group.highlight()
ui_group = self.group.ui_group
root_entry = ui_group.root_entries.get(self.name)
# If not on the list of cells (e.g. when newly added in NRN)
if root_entry is None:
root_entry = self.add_to_UI_group(ui_group)
if root_entry is not None and not root_entry.selected:
root_entry.selected = True | [
"numpy.sqrt",
"math.ceil",
"numpy.reshape",
"numpy.isclose",
"numpy.square",
"numpy.array",
"numpy.sum",
"numpy.min",
"numpy.cumsum"
] | [((2166, 2186), 'numpy.array', 'np.array', (['self.radii'], {}), '(self.radii)\n', (2174, 2186), True, 'import numpy as np\n'), ((5768, 5783), 'numpy.square', 'np.square', (['diff'], {}), '(diff)\n', (5777, 5783), True, 'import numpy as np\n'), ((5799, 5817), 'numpy.sum', 'np.sum', (['sq'], {'axis': '(1)'}), '(sq, axis=1)\n', (5805, 5817), True, 'import numpy as np\n'), ((5834, 5846), 'numpy.sqrt', 'np.sqrt', (['sum'], {}), '(sum)\n', (5841, 5846), True, 'import numpy as np\n'), ((6130, 6145), 'numpy.square', 'np.square', (['diff'], {}), '(diff)\n', (6139, 6145), True, 'import numpy as np\n'), ((6161, 6179), 'numpy.sum', 'np.sum', (['sq'], {'axis': '(1)'}), '(sq, axis=1)\n', (6167, 6179), True, 'import numpy as np\n'), ((6197, 6209), 'numpy.sqrt', 'np.sqrt', (['sum'], {}), '(sum)\n', (6204, 6209), True, 'import numpy as np\n'), ((6228, 6241), 'numpy.min', 'np.min', (['dists'], {}), '(dists)\n', (6234, 6241), True, 'import numpy as np\n'), ((1644, 1680), 'math.ceil', 'math.ceil', (['(total_length / max_length)'], {}), '(total_length / max_length)\n', (1653, 1680), False, 'import math\n'), ((4298, 4324), 'numpy.array', 'np.array', (['split_sec_coords'], {}), '(split_sec_coords)\n', (4306, 4324), True, 'import numpy as np\n'), ((4356, 4381), 'numpy.array', 'np.array', (['split_sec_radii'], {}), '(split_sec_radii)\n', (4364, 4381), True, 'import numpy as np\n'), ((2106, 2127), 'numpy.array', 'np.array', (['self.coords'], {}), '(self.coords)\n', (2114, 2127), True, 'import numpy as np\n'), ((4923, 4960), 'numpy.reshape', 'np.reshape', (['split_sec.coords', '(-1, 3)'], {}), '(split_sec.coords, (-1, 3))\n', (4933, 4960), True, 'import numpy as np\n'), ((5462, 5478), 'numpy.array', 'np.array', (['coords'], {}), '(coords)\n', (5470, 5478), True, 'import numpy as np\n'), ((5513, 5528), 'numpy.array', 'np.array', (['radii'], {}), '(radii)\n', (5521, 5528), True, 'import numpy as np\n'), ((5633, 5654), 'numpy.array', 'np.array', (['self.coords'], {}), '(self.coords)\n', (5641, 5654), True, 'import numpy as np\n'), ((5886, 5901), 'numpy.cumsum', 'np.cumsum', (['dist'], {}), '(dist)\n', (5895, 5901), True, 'import numpy as np\n'), ((5994, 6015), 'numpy.array', 'np.array', (['self.coords'], {}), '(self.coords)\n', (6002, 6015), True, 'import numpy as np\n'), ((6049, 6065), 'numpy.array', 'np.array', (['target'], {}), '(target)\n', (6057, 6065), True, 'import numpy as np\n'), ((5174, 5215), 'numpy.isclose', 'np.isclose', (['coord', 'prev_coord'], {'rtol': '(0.001)'}), '(coord, prev_coord, rtol=0.001)\n', (5184, 5215), True, 'import numpy as np\n')] |
import pickle
import time
import numpy as np
import torch
import tqdm
from liga.models import load_data_to_gpu
from liga.utils import common_utils
def statistics_info(cfg, ret_dict, metric, disp_dict):
for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST:
metric['recall_roi_%s' % str(cur_thresh)] += ret_dict.get('roi_%s' % str(cur_thresh), 0)
metric['recall_rcnn_%s' % str(cur_thresh)] += ret_dict.get('rcnn_%s' % str(cur_thresh), 0)
metric['gt_num'] += ret_dict.get('gt', 0)
metric['num'] += 1
min_thresh = cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST[0]
disp_dict['recall_%s' % str(min_thresh)] = \
'(%d, %d) / %d' % (metric['recall_roi_%s' % str(min_thresh)], metric['recall_rcnn_%s' % str(min_thresh)], metric['gt_num'])
# depth evaluation for stereo detection
for k, v in ret_dict.items():
if k.startswith('depth_error_'):
if k.endswith('perbox'):
if k not in metric:
metric[k] = []
metric[k].extend(v)
else:
metric[k] = metric.get(k, 0.) + ret_dict[k]
if k in ['depth_error_fg_median', 'depth_error_median']:
disp_dict[k] = '%.3f' % (metric[k] / metric['num'])
def eval_one_epoch(cfg, model, dataloader, epoch_id, logger, dist_test=False, save_to_file=False, result_dir=None):
result_dir.mkdir(parents=True, exist_ok=True)
final_output_dir = result_dir / 'final_result' / 'data'
final_2d_output_dir = result_dir / 'final_result' / 'data2d'
if save_to_file:
final_output_dir.mkdir(parents=True, exist_ok=True)
final_2d_output_dir.mkdir(parents=True, exist_ok=True)
metric = {
'num': 0,
'gt_num': 0,
# 'depth_error_mean': 0.,
# 'depth_error_median': 0.,
}
for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST:
metric['recall_roi_%s' % str(cur_thresh)] = 0
metric['recall_rcnn_%s' % str(cur_thresh)] = 0
dataset = dataloader.dataset
class_names = dataset.class_names
det_annos = []
det_annos_2d = []
iou_results = []
logger.info('*************** EPOCH %s EVALUATION *****************' % epoch_id)
if dist_test:
num_gpus = torch.cuda.device_count()
local_rank = cfg.LOCAL_RANK % num_gpus
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[local_rank],
broadcast_buffers=False
)
model.eval()
if cfg.LOCAL_RANK == 0:
progress_bar = tqdm.tqdm(total=len(dataloader), leave=True, desc='eval', dynamic_ncols=True)
start_time = time.time()
for i, batch_dict in enumerate(dataloader):
load_data_to_gpu(batch_dict)
with torch.no_grad():
pred_dicts, ret_dict = model(batch_dict)
disp_dict = {}
statistics_info(cfg, ret_dict, metric, disp_dict)
if 'gt_boxes' in batch_dict and 'iou_results' in pred_dicts[0]:
iou_results.extend([x['iou_results'] for x in pred_dicts])
annos_2d = dataset.generate_prediction_dicts(
batch_dict, pred_dicts, class_names,
output_path=final_2d_output_dir if save_to_file else None,
mode_2d=True
) if 'pred_scores_2d' in pred_dicts[0] else None
annos = dataset.generate_prediction_dicts(
batch_dict, pred_dicts, class_names,
output_path=final_output_dir if save_to_file else None
) if 'pred_scores' in pred_dicts[0] else None
if annos_2d is not None:
det_annos_2d += annos_2d
if annos is not None:
det_annos += annos
if cfg.LOCAL_RANK == 0:
progress_bar.set_postfix(disp_dict)
progress_bar.update()
if cfg.LOCAL_RANK == 0:
progress_bar.close()
if dist_test:
rank, world_size = common_utils.get_dist_info()
iou_results = common_utils.merge_results_dist(iou_results, len(dataset), tmpdir=result_dir / 'tmpdir')
det_annos = common_utils.merge_results_dist(det_annos, len(dataset), tmpdir=result_dir / 'tmpdir')
det_annos_2d = common_utils.merge_results_dist(det_annos_2d, len(dataset), tmpdir=result_dir / 'tmpdir')
metric = common_utils.merge_results_dist([metric], world_size, tmpdir=result_dir / 'tmpdir')
logger.info('*************** Performance of EPOCH %s *****************' % epoch_id)
sec_per_example = (time.time() - start_time) / len(dataloader.dataset)
logger.info('Generate label finished(sec_per_example: %.4f second).' % sec_per_example)
if cfg.LOCAL_RANK != 0:
return {}
ret_dict = {}
if dist_test:
for key, val in metric[0].items():
for k in range(1, world_size):
metric[0][key] += metric[k][key]
metric = metric[0]
gt_num_cnt = metric['gt_num']
for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST:
cur_roi_recall = metric['recall_roi_%s' % str(cur_thresh)] / max(gt_num_cnt, 1)
cur_rcnn_recall = metric['recall_rcnn_%s' % str(cur_thresh)] / max(gt_num_cnt, 1)
logger.info('recall_roi_%s: %f' % (cur_thresh, cur_roi_recall))
logger.info('recall_rcnn_%s: %f' % (cur_thresh, cur_rcnn_recall))
ret_dict['recall/roi_%s' % str(cur_thresh)] = cur_roi_recall
ret_dict['recall/rcnn_%s' % str(cur_thresh)] = cur_rcnn_recall
for k in metric:
if k.startswith('depth_error_'):
if not k.endswith('perbox'):
metric[k] /= metric['num']
logger.info('%s: %f' % (k, metric[k]))
ret_dict['depth_error/%s' % (k)] = metric[k]
else:
for kk in metric[k][0]:
if kk.startswith("err_"):
values = [item[kk] for item in metric[k]]
mean_value = np.mean(values)
logger.info('%s: %f' % (k + "_" + kk, mean_value))
ret_dict['%s' % (k + "_" + kk)] = mean_value
# copy iou into metric[k]
if not iou_results:
continue
for x in metric[k]:
x['iou'] = iou_results[x['image_idx']][x['idx']]
total_pred_objects = 0
for anno in det_annos:
total_pred_objects += anno['name'].__len__()
logger.info('Average predicted number of objects(%d samples): %.3f'
% (len(det_annos), total_pred_objects / max(1, len(det_annos))))
with open(result_dir / 'result.pkl', 'wb') as f:
pickle.dump(det_annos, f)
with open(result_dir / 'metric_result.pkl', 'wb') as f:
pickle.dump(metric, f)
if det_annos and 'gt_boxes' in batch_dict:
logger.info('---- 3d box evaluation ---- ')
result_str, result_dict = dataset.evaluation(
det_annos, class_names,
eval_metric='3d',
output_path=final_output_dir
)
logger.info(result_str)
ret_dict.update(result_dict)
if det_annos_2d and 'gt_boxes_2d' in batch_dict:
logger.info('---- 2d box evaluation ---- ')
result_str, _ = dataset.evaluation(
det_annos_2d, class_names,
eval_metric='2d',
output_path=final_2d_output_dir
)
logger.info(result_str)
else:
logger.info(f"no 2d eval: {'gt_boxes_2d' in batch_dict} / {det_annos_2d}")
logger.info('Result is save to %s' % result_dir)
logger.info('****************Evaluation done.*****************')
return ret_dict
if __name__ == '__main__':
pass
| [
"liga.models.load_data_to_gpu",
"numpy.mean",
"liga.utils.common_utils.merge_results_dist",
"pickle.dump",
"liga.utils.common_utils.get_dist_info",
"torch.cuda.device_count",
"torch.no_grad",
"time.time",
"torch.nn.parallel.DistributedDataParallel"
] | [((2656, 2667), 'time.time', 'time.time', ([], {}), '()\n', (2665, 2667), False, 'import time\n'), ((2258, 2283), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (2281, 2283), False, 'import torch\n'), ((2347, 2449), 'torch.nn.parallel.DistributedDataParallel', 'torch.nn.parallel.DistributedDataParallel', (['model'], {'device_ids': '[local_rank]', 'broadcast_buffers': '(False)'}), '(model, device_ids=[local_rank],\n broadcast_buffers=False)\n', (2388, 2449), False, 'import torch\n'), ((2724, 2752), 'liga.models.load_data_to_gpu', 'load_data_to_gpu', (['batch_dict'], {}), '(batch_dict)\n', (2740, 2752), False, 'from liga.models import load_data_to_gpu\n'), ((3889, 3917), 'liga.utils.common_utils.get_dist_info', 'common_utils.get_dist_info', ([], {}), '()\n', (3915, 3917), False, 'from liga.utils import common_utils\n'), ((4266, 4353), 'liga.utils.common_utils.merge_results_dist', 'common_utils.merge_results_dist', (['[metric]', 'world_size'], {'tmpdir': "(result_dir / 'tmpdir')"}), "([metric], world_size, tmpdir=result_dir /\n 'tmpdir')\n", (4297, 4353), False, 'from liga.utils import common_utils\n'), ((6585, 6610), 'pickle.dump', 'pickle.dump', (['det_annos', 'f'], {}), '(det_annos, f)\n', (6596, 6610), False, 'import pickle\n'), ((6679, 6701), 'pickle.dump', 'pickle.dump', (['metric', 'f'], {}), '(metric, f)\n', (6690, 6701), False, 'import pickle\n'), ((2767, 2782), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2780, 2782), False, 'import torch\n'), ((4462, 4473), 'time.time', 'time.time', ([], {}), '()\n', (4471, 4473), False, 'import time\n'), ((5888, 5903), 'numpy.mean', 'np.mean', (['values'], {}), '(values)\n', (5895, 5903), True, 'import numpy as np\n')] |
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import threading
from threading import Lock, Thread
import random
import time as timeee
transactionCounter = 0
lock = Lock()
succesfulAttacks = 0
failedAttacks = 0
class User(object):
def __init__(self, id: int, malicious: bool):
self.id = id
self.node_id_counter = 1
self.malicious = malicious
self.mana = 1
def increaseMana(self):
self.mana += 1
def resetMana(self):
self.mana = 0
class CacNode(object):
def __init__(self, dag, traId: str, nodeId: int, time: float, user: User, malicious: bool):
self.traId = traId
self.nodeId = nodeId
self.time = time
self.isTip = True
self.user = user
self.malicious = malicious
self.dag = dag
self.vote = None
self.neighbourNodeIds = []
if hasattr(self.dag, 'graph'):
if malicious:
self.dag.graph.add_node(self.traId, pos=(self.time, np.random.uniform(-2, 0)))
else:
self.dag.graph.add_node(self.traId, pos=(self.time, np.random.uniform(0, 10)))
def add_neighbour(self, node):
if node.traId not in self.neighbourNodeIds:
self.neighbourNodeIds.append(node.traId)
def get_vote(self):
neighbourVotes = []
#get neightbours vote from dag.nodes because python is pass by value... :(
for node in (n for n in self.dag.addedNodes if (n.traId in self.neighbourNodeIds)):
neighbourVotes.append(node.vote)
votesWithoutNone = [vote for vote in neighbourVotes if vote != None]
if len(votesWithoutNone) == 0:
return self.vote
if len(votesWithoutNone) == 1:
# If all neighbours have the same vote, set vote to it
self.vote = votesWithoutNone[0]
# TODO: burada senin mana mi yoksa neighbour mu buyuk bakmali
return self.vote
# Else count the occurences of neighbour nodes
votesCountDict = {}
for vote in set(votesWithoutNone):
votesCountDict[vote] = votesWithoutNone.count(vote)
# Check if 2 votes have the same number, if so use mana, else return max vote
maxValue = max(votesCountDict, key=votesCountDict.get)
if isinstance(maxValue, int):
self.vote = maxValue
else:
votesCountDict = {}
for vote in set(votesWithoutNone):
votesCountDict[vote] = 0
for node in (n for n in self.dag.addedNodes if (n.traId in self.neighbourNodeIds)):
votesCountDict[node.vote] += node.user.mana
maxValue = max(votesCountDict, key=votesCountDict.get)
if isinstance(maxValue, int):
self.vote = maxValue
else:
self.vote = maxValue[0]
return self.vote
class DAG_C(object):
def __init__(self, plot=False, numUsers=1, numMalUsers=0, traPerUser=0, reattachment=False):
self.time = 1.0
self.realtime = timeee.time()
print("DAG: numUsers:" + str(numUsers) + " numMalUsers:" + str(numMalUsers) + " traPerUser:" + str(traPerUser) + " reattachment:" + str(reattachment))
if plot:
self.graph = nx.OrderedDiGraph()
self.nodes = []
self.addedNodes = []
self.unvalidatedNodes = []
self.users = []
self.currTime = 0
self.nodesToAdd = []
self.traPerUser = traPerUser
self.reattachment = reattachment
self.numMalUsers = numMalUsers
if numUsers < 3:
self.users.append(User(id=0, malicious=False))
# TODO: Num mal needs to be 2lower i sayfaya ekle
else:
malUserIds = np.random.choice(range(2, numUsers), numMalUsers)
for i in range(numUsers):
self.users.append(User(id=i, malicious=(i in malUserIds)))
def generate_next_node(self, userId=1, time=0, malicious=False):
if malicious:
self.malicious_user_attack(userId, time)
else:
if userId == None:
while len(self.nodesToAdd) != 0:
self.time += 1
self.non_malicious_user(None, self.time)
else:
self.non_malicious_user(userId, self.time)
def malicious_user_attack(self, userId, _time=0):
global failedAttacks
global succesfulAttacks
while len(self.addedNodes) < 1:
timeee.sleep(random.uniform(1, 3))
# Select 2 tips from added nodes
tipNodes = [n for n in self.addedNodes]
if len(tipNodes) > 2:
sTips = random.sample(tipNodes, k=2)
else:
sTips = tipNodes
# Check depth of those 2 tips to see if its larger than num transactions
# str([n.traId for n in self.nodesToAdd])
depth = 0
removalNodeId = self.addedNodes[0].traId
for tip in sTips:
newDepth = self.getCurrentDepthOfNode(tip.traId)
if newDepth > depth:
depth = newDepth
if tip.traId > removalNodeId:
removalNodeId = tip.traId
# TODO: Must change this. Secili tiplerden buyuk olan degil kucuk depth alinmali
#TODO: Depth hesabida yanlis zaten [0,1,2,4] var 4,2 secti depth 5 diyo! :/
# Create subtree with all transactions of that user
newTree = self.maliciousTreeWith(sTips, userId, self.traPerUser)
nodeIdsArray = list([n.traId for n in self.addedNodes])
idx = nodeIdsArray.index(removalNodeId) + 1
curdepth = len(self.addedNodes[(idx+1):])
# Set subtree as main tree or discard as unsuccesfull attack
if curdepth > self.traPerUser:
failedAttacks += 1
print(" -Attack: User:" + str(userId) + " time:" + str(_time) + " -failed")
#Unsuccessful attack, main tree stays as it is
user = [u for u in self.users if u.id == userId][0]
user.resetMana()
else:
succesfulAttacks += 1
print(" -Attack: User:" + str(userId) + " time:" + str(_time) + " -succesfull")
#Succesful Attack, main tree discards the part after tips and appends malicious tree
self.unvalidatedNodes += self.addedNodes[(idx+1):]
self.addedNodes = self.addedNodes[:idx] + newTree
def maliciousTreeWith(self, sTips, userId, depth):
_addedNodes = []
#get largest time of selected tips
startTime = 0
for tip in sTips:
if tip.time > startTime:
startTime = tip.time
tips = sTips
#creates a malicious tree in graph with given number of transactions
#add 2 nodes each second until sub tree formed
for i in range(depth):
node, tips = self.addMaliciousNodeToGraph(userId, startTime, tips)
_addedNodes.append(node)
if i % 2 == 0:
startTime = startTime + 1
#returns the list of nodes that have been created
return _addedNodes
def non_malicious_user(self, userId=1, time=0):
global transactionCounter
self.time = time
oldNodesToAdd = []
if userId != None:
user = [u for u in self.users if u.id == userId][0]
newNode = CacNode(self, traId=transactionCounter, nodeId=user.node_id_counter, time=self.time, user=user, malicious=False)
transactionCounter += 1
user.node_id_counter += 1
nodeToAdd = None
selectedTips = None
if (time == self.currTime) and (userId != None):
self.nodesToAdd.append(newNode)
else:
lock.acquire()
self.currTime = self.time
oldNodesToAdd = self.nodesToAdd
self.nodesToAdd = []
if userId != None:
self.nodesToAdd.append(newNode)
if len(self.addedNodes) > 2:
nodeToAdd, selectedTips = self.cac(oldNodesToAdd)
else:
# Add all
nodeToAdd = None
if len(oldNodesToAdd) <= 2:
for node in oldNodesToAdd:
self.addNodeToGraph(node, self.addedNodes)
else:
self.addNodeToGraph(oldNodesToAdd[0], self.addedNodes)
self.addNodeToGraph(oldNodesToAdd[1], self.addedNodes)
self.nodes.append(oldNodesToAdd[0])
self.nodes.append(oldNodesToAdd[1])
for node in oldNodesToAdd[2:]:
node.time += 1
self.graph.node[node.traId]['pos'] = (node.time, np.random.uniform(0, 10))
self.nodesToAdd.append(node)
if nodeToAdd != None and selectedTips != None:
self.addNodeToGraph(nodeToAdd, selectedTips)
if self.reattachment:
for node in ([n for n in oldNodesToAdd if n.traId != nodeToAdd.traId]):
node.time += 1
self.graph.node[node.traId]['pos'] = (node.time, np.random.uniform(0, 10))
self.nodesToAdd.append(node)
lock.release()
if selectedTips != None:
#TODO: Eski isTip true olan hic falselanmiyo, hep tip kaliyo
for tip in selectedTips:
if len([n for n in self.addedNodes if n.isTip]) > 3:
if self.reattachment:
tipNode = [n for n in self.nodes if n.traId == tip.traId][0]
else:
tipNode = [n for n in self.addedNodes if n.traId == tip.traId][0]
tipNode.isTip = False
else:
if self.reattachment:
tipNode = [n for n in self.nodes if n.traId == tip.traId][0]
else:
tipNode = [n for n in self.addedNodes if n.traId == tip.traId][0]
tipNode.isTip = True
if self.reattachment:
if nodeToAdd != None:
self.nodes.append(nodeToAdd)
else:
for node in oldNodesToAdd:
if node != None and node not in self.nodes:
self.nodes.append(node)
def addNodeToGraph(self, node, tips):
node.isTip = True
self.addedNodes.append(node)
user = [u for u in self.users if u.id == node.user.id][0]
user.increaseMana()
for tip in tips:
if hasattr(self, 'graph'):
self.graph.add_edges_from([(node.traId, tip.traId)], edge_color='r')
self.addNeighbourToNode(tip, node)
def addMaliciousNodeToGraph(self, userId, time, tips):
global transactionCounter
user = [u for u in self.users if u.id == userId][0]
newNode = CacNode(self, traId=transactionCounter, nodeId=user.node_id_counter, time=time, user=user, malicious=True)
transactionCounter += 1
user.node_id_counter += 1
user.increaseMana()
self.nodes.append(newNode)
for tip in tips:
if hasattr(self, 'graph'):
self.graph.add_edges_from([(newNode.traId, tip.traId)], edge_color='r')
self.addNeighbourToNode(tip, newNode)
# TODO: IsTip i false yap onceki tipler icin!!!!
# TODO: IsTip i secmeyi duzelt eger attack basariliysa
# eskileri artik tip olmamali sadece bunlar olmali, basarisizsa bunlar tip olmamali
newTips = []
if len(tips) > 1:
newTips.append(tips[1])
else:
newTips.append(tips[0])
newTips.append(newNode)
return newNode, newTips
def addNeighbourToNode(self, node, newNode):
node.add_neighbour(newNode)
newNode.add_neighbour(node)
def getTipNodes(self):
tipNodes = [n for n in self.addedNodes if n.isTip]
if len(tipNodes) > 2:
selectedTips = random.sample(tipNodes, k=2)
else:
selectedTips = tipNodes
return selectedTips
def getCurrentDepthOfNode(self, nodeId):
node = [n for n in self.addedNodes if n.traId == nodeId][0]
nodePosition = self.addedNodes.index(node)
length = len(self.addedNodes)
return nodePosition - length
def tips(self):
return [n for n in self.addedNodes if n.isTip]
def cac(self, nodesToAdd):
selectedTipss = None
if len(nodesToAdd) > 1:
for node in reversed(nodesToAdd):
selectedTips = self.getTipNodes()
for tip in selectedTips:
if tip.vote == None:
tip.vote = node.traId
selectedTipss = selectedTips
else:
currentVoteMana = 0
if len([n.user.mana for n in self.nodesToAdd if n.traId == tip.vote]) > 0:
currentVoteMana = [n.user.mana for n in self.nodesToAdd if n.traId == tip.vote][0]
if currentVoteMana < node.user.mana:
tip.vote = node.traId
selectedTipss = selectedTips
elif len(nodesToAdd) == 1:
selectedTipss = self.getTipNodes()
for tip in selectedTipss:
tip.vote = nodesToAdd[0].traId
else:
return None, selectedTipss
result = self.vote()
# set all nodes votes to None after geting the final result
for node in self.addedNodes:
node.vote = None
if result != None:
if len([node for node in nodesToAdd if node.traId == result]) > 0:
return [node for node in nodesToAdd if node.traId == result][0], selectedTipss
return [node for node in self.nodes if node.traId == result][0], selectedTipss
return None, selectedTipss
def vote(self, counter=0):
votes = []
for node in self.addedNodes:
votes.append(node.get_vote())
if len(set(votes)) == 0:
return self.nodesToAdd[0].traId
if len(set(votes)) == 1 and votes[0] != None:
return votes[0]
if counter > 8:
if votes[0] != None:
return votes[0]
else:
return votes[len(votes)-1]
return self.vote(counter+1)
def plot(self):
global transactionCounter
global failedAttacks
global succesfulAttacks
transactionCounter = 0
if hasattr(self, 'graph'):
pos = nx.get_node_attributes(self.graph, 'pos')
node_colors = ['#F7A81D', '#27ECEC','#8E8E8E', '#379716', '#7E27EC', '#ECE927', '#E413A8', '#2775EC']
# malNodesLabels = dict(zip([int(node.traId) for node in self.maliciousNodes], [node.nodeId for node in self.maliciousNodes]))
# honNodeLabels = dict(zip([int(node.traId) for node in self.honestNodes], [node.nodeId for node in self.honestNodes]))
maliciousUsers = [u.id for u in self.users if u.malicious == True]
maliciousNodes = []
if len(maliciousUsers) > 0:
maliciousNodes = [n for n in self.nodes if n.user.id in maliciousUsers]
edge_colors = []
for e in self.graph.edges():
if e[0] in [n.traId for n in maliciousNodes]:
edge_colors.append('red')
elif e[0] in [n.traId for n in self.unvalidatedNodes]:
edge_colors.append('gray')
else:
edge_colors.append('black')
# nodeLabels = dict(zip([int(node.traId) for node in self.nodes], [node.nodeId for node in self.nodes]))
nodeLabels = dict(zip([int(node.traId) for node in self.nodes], [node.traId for node in self.nodes]))
allNodes = []
addedUserNodes = []
discardedUserNodes = []
allDiscardedNodes = [node for node in self.nodes if (node not in self.addedNodes)]
for userId in [user.id for user in self.users]:
addedUserNodes.append([int(node.traId) for node in self.addedNodes if node.user.id == userId])
discardedUserNodes.append([int(node.traId) for node in allDiscardedNodes if node.user.id == userId])
for idx, node in enumerate(addedUserNodes):
nx.draw_networkx_nodes(self.graph, pos, nodelist=node, node_color=node_colors[idx % 8], node_size=600, alpha=0.65)
for idx, node in enumerate(discardedUserNodes):
nx.draw_networkx_nodes(self.graph, pos, nodelist=node, node_color=node_colors[idx % 8], node_size=500, alpha=0.25)
nx.draw_networkx_labels(self.graph, pos, nodeLabels, font_size=20)
nx.draw_networkx_edges(self.graph, pos, edgelist=self.graph.edges(), arrows=True, edge_color=edge_colors)
largestTime = 0
for node in self.nodes:
if node.time > largestTime:
largestTime = node.time
print("Result: simTime:" + str(largestTime) + " runTime:" + str(timeee.time() - self.realtime) + " discardedNodes:" + str(len(allDiscardedNodes)) + " addedNodes:" + str(len(self.addedNodes)))
if self.numMalUsers > 0:
print("Attacks: succesful:" + str(succesfulAttacks) + " failed:" + str(failedAttacks))
plt.xlabel('Time')
plt.yticks([])
return plt
| [
"random.sample",
"random.uniform",
"threading.Lock",
"matplotlib.pyplot.xlabel",
"networkx.draw_networkx_nodes",
"networkx.OrderedDiGraph",
"networkx.draw_networkx_labels",
"networkx.get_node_attributes",
"matplotlib.pyplot.yticks",
"numpy.random.uniform",
"time.time"
] | [((193, 199), 'threading.Lock', 'Lock', ([], {}), '()\n', (197, 199), False, 'from threading import Lock, Thread\n'), ((3086, 3099), 'time.time', 'timeee.time', ([], {}), '()\n', (3097, 3099), True, 'import time as timeee\n'), ((3311, 3330), 'networkx.OrderedDiGraph', 'nx.OrderedDiGraph', ([], {}), '()\n', (3328, 3330), True, 'import networkx as nx\n'), ((4716, 4744), 'random.sample', 'random.sample', (['tipNodes'], {'k': '(2)'}), '(tipNodes, k=2)\n', (4729, 4744), False, 'import random\n'), ((12169, 12197), 'random.sample', 'random.sample', (['tipNodes'], {'k': '(2)'}), '(tipNodes, k=2)\n', (12182, 12197), False, 'import random\n'), ((14809, 14850), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['self.graph', '"""pos"""'], {}), "(self.graph, 'pos')\n", (14831, 14850), True, 'import networkx as nx\n'), ((16984, 17050), 'networkx.draw_networkx_labels', 'nx.draw_networkx_labels', (['self.graph', 'pos', 'nodeLabels'], {'font_size': '(20)'}), '(self.graph, pos, nodeLabels, font_size=20)\n', (17007, 17050), True, 'import networkx as nx\n'), ((17681, 17699), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (17691, 17699), True, 'import matplotlib.pyplot as plt\n'), ((17712, 17726), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (17722, 17726), True, 'import matplotlib.pyplot as plt\n'), ((4554, 4574), 'random.uniform', 'random.uniform', (['(1)', '(3)'], {}), '(1, 3)\n', (4568, 4574), False, 'import random\n'), ((16664, 16783), 'networkx.draw_networkx_nodes', 'nx.draw_networkx_nodes', (['self.graph', 'pos'], {'nodelist': 'node', 'node_color': 'node_colors[idx % 8]', 'node_size': '(600)', 'alpha': '(0.65)'}), '(self.graph, pos, nodelist=node, node_color=\n node_colors[idx % 8], node_size=600, alpha=0.65)\n', (16686, 16783), True, 'import networkx as nx\n'), ((16856, 16975), 'networkx.draw_networkx_nodes', 'nx.draw_networkx_nodes', (['self.graph', 'pos'], {'nodelist': 'node', 'node_color': 'node_colors[idx % 8]', 'node_size': '(500)', 'alpha': '(0.25)'}), '(self.graph, pos, nodelist=node, node_color=\n node_colors[idx % 8], node_size=500, alpha=0.25)\n', (16878, 16975), True, 'import networkx as nx\n'), ((1031, 1055), 'numpy.random.uniform', 'np.random.uniform', (['(-2)', '(0)'], {}), '(-2, 0)\n', (1048, 1055), True, 'import numpy as np\n'), ((1144, 1168), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(10)'], {}), '(0, 10)\n', (1161, 1168), True, 'import numpy as np\n'), ((8833, 8857), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(10)'], {}), '(0, 10)\n', (8850, 8857), True, 'import numpy as np\n'), ((9276, 9300), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(10)'], {}), '(0, 10)\n', (9293, 9300), True, 'import numpy as np\n'), ((17400, 17413), 'time.time', 'timeee.time', ([], {}), '()\n', (17411, 17413), True, 'import time as timeee\n')] |
import scipy.io as sio
from pathlib import Path
import numpy as np
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui
mask = np.array([np.ones(32), np.ones(32), np.ones(32),np.concatenate((np.zeros(14), np.ones(18))),
np.concatenate((np.zeros(14), np.ones(18))),
np.concatenate((np.zeros(14), np.ones(18))),
np.ones(32), np.ones(32), np.ones(32),
np.concatenate((np.zeros(14), np.ones(18))),
np.ones(32), np.ones(32), np.ones(32),
np.concatenate((np.zeros(14), np.ones(18))),
np.concatenate((np.zeros(14), np.ones(18))),
np.ones(32), np.ones(32), np.ones(32),
np.concatenate((np.zeros(25), np.ones(4), np.zeros(3))),
np.concatenate((np.zeros(25), np.ones(4), np.zeros(3))),
np.concatenate((np.zeros(25), np.ones(4), np.zeros(3))),
np.concatenate((np.zeros(25), np.ones(4), np.zeros(3))),
np.concatenate((np.zeros(25), np.ones(4), np.zeros(3))),
np.concatenate((np.zeros(25), np.ones(4), np.zeros(3))),
np.concatenate((np.zeros(25), np.ones(4), np.zeros(3))),
np.concatenate((np.zeros(25), np.ones(4), np.zeros(3))),
np.concatenate((np.zeros(25), np.ones(4), np.zeros(3))),
np.concatenate((np.zeros(25), np.ones(4), np.zeros(3))),
np.concatenate((np.zeros(25), np.ones(4), np.zeros(3))),
np.concatenate((np.zeros(25), np.ones(4), np.zeros(3))),
np.concatenate((np.zeros(25), np.ones(4), np.zeros(3))),
np.concatenate((np.zeros(25), np.ones(4), np.zeros(3)))]).astype(np.bool)
mask = mask.reshape((1024,))
def normalize(pressure):
"""
Scales each array of the given array of arrays to the range [0, 1]
Only considers values in the same tactile frame
"""
normalized_p = np.copy(pressure)
for i, press in enumerate(pressure):
min_p = np.min(press)
normalized_p[i] = (press - min_p) / np.max(press - min_p)
return normalized_p
def boost(pressure):
"""
The higher a value is from the mean of the frame, the more it gets boosted.
The idea is that tactile features are robuster
"""
for press in pressure:
mean_p = np.mean(press[mask])
boost_mask = press > mean_p
press[boost_mask] = list(map(lambda x: 4*(x-mean_p), press[boost_mask]))
return pressure
#filename = Path('C:/Users/fabia/Documents/Schweiz/ETH/Master/4_Semester_Spring_2020/Master_thesis/Data_Collection/3kOhm_FB/data_MT_FabianGeiger_5sess.mat')
filename = Path('../../Data_Collection/3kOhm_FB/data_MT_FabianGeiger_5sess.mat')
data = sio.loadmat(filename, squeeze_me=True)
pressure = data['tactile_data']
# Scale data to the range [0, 1]
pressure = np.clip((pressure.astype(np.float32)-1500)/(2700-1500), 0.0, 1.0)
#pressure = normalize(pressure.astype(np.float32))
#pressure = np.exp2(pressure)
#pressure = np.clip((pressure-1), 0.0, 1.0)
pressure = boost(pressure)
pressure = np.clip(pressure, 0.0, 1.0)
object_id = data['object_id']
pressure[:, ~mask] = 0.0
num_sessions = len(np.unique(data['session_id']))
x = []
y = []
sessions = data['session_id']
for i in range(num_sessions):
session_mask = sessions == i
x.append(pressure[session_mask])
y.append(object_id[session_mask])
app = QtGui.QApplication([])
# Create a top-level widget to hold everything
w = QtGui.QWidget()
# Create a bunch of imageview widgets
imv1 = pg.ImageView()
imv2 = pg.ImageView()
imv3 = pg.ImageView()
imv4 = pg.ImageView()
imv5 = pg.ImageView()
# Create a grid layout to manage the widgets size and position
layout = QtGui.QGridLayout()
w.setLayout(layout)
# Add imageview widgets to layout
layout.addWidget(imv1, 0, 0)
layout.addWidget(imv2, 0, 1)
layout.addWidget(imv3, 1, 0)
layout.addWidget(imv4, 1, 1)
layout.addWidget(imv5, 0, 2)
# Show top-level widget
w.show()
# Get data from a specific class
obj_id = 9
data = []
for i in range(num_sessions):
obj_mask = y[i] == obj_id
data.append(x[i][obj_mask].reshape((-1, 32, 32)))
# Display the data and assign each frame a time value
imv1.setImage(data[0], xvals=np.linspace(0., data[0].shape[0]/100, data[0].shape[0]), levels=(0, 1))
imv2.setImage(data[1], xvals=np.linspace(0., data[1].shape[0]/100, data[1].shape[0]), levels=(0, 1))
imv3.setImage(data[2], xvals=np.linspace(0., data[2].shape[0]/100, data[2].shape[0]), levels=(0, 1))
imv4.setImage(data[3], xvals=np.linspace(0., data[3].shape[0]/100, data[3].shape[0]), levels=(0, 1))
imv5.setImage(data[4], xvals=np.linspace(0., data[4].shape[0]/100, data[4].shape[0]), levels=(0, 1))
if __name__ == "__main__":
QtGui.QApplication.instance().exec_() | [
"numpy.clip",
"numpy.copy",
"numpy.mean",
"pyqtgraph.Qt.QtGui.QApplication.instance",
"numpy.unique",
"pyqtgraph.Qt.QtGui.QWidget",
"pathlib.Path",
"numpy.ones",
"scipy.io.loadmat",
"pyqtgraph.Qt.QtGui.QGridLayout",
"numpy.max",
"pyqtgraph.Qt.QtGui.QApplication",
"numpy.linspace",
"numpy.z... | [((2765, 2834), 'pathlib.Path', 'Path', (['"""../../Data_Collection/3kOhm_FB/data_MT_FabianGeiger_5sess.mat"""'], {}), "('../../Data_Collection/3kOhm_FB/data_MT_FabianGeiger_5sess.mat')\n", (2769, 2834), False, 'from pathlib import Path\n'), ((2845, 2883), 'scipy.io.loadmat', 'sio.loadmat', (['filename'], {'squeeze_me': '(True)'}), '(filename, squeeze_me=True)\n', (2856, 2883), True, 'import scipy.io as sio\n'), ((3197, 3224), 'numpy.clip', 'np.clip', (['pressure', '(0.0)', '(1.0)'], {}), '(pressure, 0.0, 1.0)\n', (3204, 3224), True, 'import numpy as np\n'), ((3539, 3561), 'pyqtgraph.Qt.QtGui.QApplication', 'QtGui.QApplication', (['[]'], {}), '([])\n', (3557, 3561), False, 'from pyqtgraph.Qt import QtGui\n'), ((3615, 3630), 'pyqtgraph.Qt.QtGui.QWidget', 'QtGui.QWidget', ([], {}), '()\n', (3628, 3630), False, 'from pyqtgraph.Qt import QtGui\n'), ((3678, 3692), 'pyqtgraph.ImageView', 'pg.ImageView', ([], {}), '()\n', (3690, 3692), True, 'import pyqtgraph as pg\n'), ((3701, 3715), 'pyqtgraph.ImageView', 'pg.ImageView', ([], {}), '()\n', (3713, 3715), True, 'import pyqtgraph as pg\n'), ((3724, 3738), 'pyqtgraph.ImageView', 'pg.ImageView', ([], {}), '()\n', (3736, 3738), True, 'import pyqtgraph as pg\n'), ((3747, 3761), 'pyqtgraph.ImageView', 'pg.ImageView', ([], {}), '()\n', (3759, 3761), True, 'import pyqtgraph as pg\n'), ((3770, 3784), 'pyqtgraph.ImageView', 'pg.ImageView', ([], {}), '()\n', (3782, 3784), True, 'import pyqtgraph as pg\n'), ((3859, 3878), 'pyqtgraph.Qt.QtGui.QGridLayout', 'QtGui.QGridLayout', ([], {}), '()\n', (3876, 3878), False, 'from pyqtgraph.Qt import QtGui\n'), ((2006, 2023), 'numpy.copy', 'np.copy', (['pressure'], {}), '(pressure)\n', (2013, 2023), True, 'import numpy as np\n'), ((3306, 3335), 'numpy.unique', 'np.unique', (["data['session_id']"], {}), "(data['session_id'])\n", (3315, 3335), True, 'import numpy as np\n'), ((2083, 2096), 'numpy.min', 'np.min', (['press'], {}), '(press)\n', (2089, 2096), True, 'import numpy as np\n'), ((2420, 2440), 'numpy.mean', 'np.mean', (['press[mask]'], {}), '(press[mask])\n', (2427, 2440), True, 'import numpy as np\n'), ((4387, 4445), 'numpy.linspace', 'np.linspace', (['(0.0)', '(data[0].shape[0] / 100)', 'data[0].shape[0]'], {}), '(0.0, data[0].shape[0] / 100, data[0].shape[0])\n', (4398, 4445), True, 'import numpy as np\n'), ((4489, 4547), 'numpy.linspace', 'np.linspace', (['(0.0)', '(data[1].shape[0] / 100)', 'data[1].shape[0]'], {}), '(0.0, data[1].shape[0] / 100, data[1].shape[0])\n', (4500, 4547), True, 'import numpy as np\n'), ((4591, 4649), 'numpy.linspace', 'np.linspace', (['(0.0)', '(data[2].shape[0] / 100)', 'data[2].shape[0]'], {}), '(0.0, data[2].shape[0] / 100, data[2].shape[0])\n', (4602, 4649), True, 'import numpy as np\n'), ((4693, 4751), 'numpy.linspace', 'np.linspace', (['(0.0)', '(data[3].shape[0] / 100)', 'data[3].shape[0]'], {}), '(0.0, data[3].shape[0] / 100, data[3].shape[0])\n', (4704, 4751), True, 'import numpy as np\n'), ((4795, 4853), 'numpy.linspace', 'np.linspace', (['(0.0)', '(data[4].shape[0] / 100)', 'data[4].shape[0]'], {}), '(0.0, data[4].shape[0] / 100, data[4].shape[0])\n', (4806, 4853), True, 'import numpy as np\n'), ((2142, 2163), 'numpy.max', 'np.max', (['(press - min_p)'], {}), '(press - min_p)\n', (2148, 2163), True, 'import numpy as np\n'), ((4902, 4931), 'pyqtgraph.Qt.QtGui.QApplication.instance', 'QtGui.QApplication.instance', ([], {}), '()\n', (4929, 4931), False, 'from pyqtgraph.Qt import QtGui\n'), ((147, 158), 'numpy.ones', 'np.ones', (['(32)'], {}), '(32)\n', (154, 158), True, 'import numpy as np\n'), ((160, 171), 'numpy.ones', 'np.ones', (['(32)'], {}), '(32)\n', (167, 171), True, 'import numpy as np\n'), ((173, 184), 'numpy.ones', 'np.ones', (['(32)'], {}), '(32)\n', (180, 184), True, 'import numpy as np\n'), ((374, 385), 'numpy.ones', 'np.ones', (['(32)'], {}), '(32)\n', (381, 385), True, 'import numpy as np\n'), ((387, 398), 'numpy.ones', 'np.ones', (['(32)'], {}), '(32)\n', (394, 398), True, 'import numpy as np\n'), ((400, 411), 'numpy.ones', 'np.ones', (['(32)'], {}), '(32)\n', (407, 411), True, 'import numpy as np\n'), ((494, 505), 'numpy.ones', 'np.ones', (['(32)'], {}), '(32)\n', (501, 505), True, 'import numpy as np\n'), ((507, 518), 'numpy.ones', 'np.ones', (['(32)'], {}), '(32)\n', (514, 518), True, 'import numpy as np\n'), ((520, 531), 'numpy.ones', 'np.ones', (['(32)'], {}), '(32)\n', (527, 531), True, 'import numpy as np\n'), ((677, 688), 'numpy.ones', 'np.ones', (['(32)'], {}), '(32)\n', (684, 688), True, 'import numpy as np\n'), ((690, 701), 'numpy.ones', 'np.ones', (['(32)'], {}), '(32)\n', (697, 701), True, 'import numpy as np\n'), ((703, 714), 'numpy.ones', 'np.ones', (['(32)'], {}), '(32)\n', (710, 714), True, 'import numpy as np\n'), ((201, 213), 'numpy.zeros', 'np.zeros', (['(14)'], {}), '(14)\n', (209, 213), True, 'import numpy as np\n'), ((215, 226), 'numpy.ones', 'np.ones', (['(18)'], {}), '(18)\n', (222, 226), True, 'import numpy as np\n'), ((264, 276), 'numpy.zeros', 'np.zeros', (['(14)'], {}), '(14)\n', (272, 276), True, 'import numpy as np\n'), ((278, 289), 'numpy.ones', 'np.ones', (['(18)'], {}), '(18)\n', (285, 289), True, 'import numpy as np\n'), ((327, 339), 'numpy.zeros', 'np.zeros', (['(14)'], {}), '(14)\n', (335, 339), True, 'import numpy as np\n'), ((341, 352), 'numpy.ones', 'np.ones', (['(18)'], {}), '(18)\n', (348, 352), True, 'import numpy as np\n'), ((447, 459), 'numpy.zeros', 'np.zeros', (['(14)'], {}), '(14)\n', (455, 459), True, 'import numpy as np\n'), ((461, 472), 'numpy.ones', 'np.ones', (['(18)'], {}), '(18)\n', (468, 472), True, 'import numpy as np\n'), ((567, 579), 'numpy.zeros', 'np.zeros', (['(14)'], {}), '(14)\n', (575, 579), True, 'import numpy as np\n'), ((581, 592), 'numpy.ones', 'np.ones', (['(18)'], {}), '(18)\n', (588, 592), True, 'import numpy as np\n'), ((630, 642), 'numpy.zeros', 'np.zeros', (['(14)'], {}), '(14)\n', (638, 642), True, 'import numpy as np\n'), ((644, 655), 'numpy.ones', 'np.ones', (['(18)'], {}), '(18)\n', (651, 655), True, 'import numpy as np\n'), ((750, 762), 'numpy.zeros', 'np.zeros', (['(25)'], {}), '(25)\n', (758, 762), True, 'import numpy as np\n'), ((764, 774), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (771, 774), True, 'import numpy as np\n'), ((776, 787), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (784, 787), True, 'import numpy as np\n'), ((825, 837), 'numpy.zeros', 'np.zeros', (['(25)'], {}), '(25)\n', (833, 837), True, 'import numpy as np\n'), ((839, 849), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (846, 849), True, 'import numpy as np\n'), ((851, 862), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (859, 862), True, 'import numpy as np\n'), ((900, 912), 'numpy.zeros', 'np.zeros', (['(25)'], {}), '(25)\n', (908, 912), True, 'import numpy as np\n'), ((914, 924), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (921, 924), True, 'import numpy as np\n'), ((926, 937), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (934, 937), True, 'import numpy as np\n'), ((975, 987), 'numpy.zeros', 'np.zeros', (['(25)'], {}), '(25)\n', (983, 987), True, 'import numpy as np\n'), ((989, 999), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (996, 999), True, 'import numpy as np\n'), ((1001, 1012), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1009, 1012), True, 'import numpy as np\n'), ((1050, 1062), 'numpy.zeros', 'np.zeros', (['(25)'], {}), '(25)\n', (1058, 1062), True, 'import numpy as np\n'), ((1064, 1074), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (1071, 1074), True, 'import numpy as np\n'), ((1076, 1087), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1084, 1087), True, 'import numpy as np\n'), ((1125, 1137), 'numpy.zeros', 'np.zeros', (['(25)'], {}), '(25)\n', (1133, 1137), True, 'import numpy as np\n'), ((1139, 1149), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (1146, 1149), True, 'import numpy as np\n'), ((1151, 1162), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1159, 1162), True, 'import numpy as np\n'), ((1200, 1212), 'numpy.zeros', 'np.zeros', (['(25)'], {}), '(25)\n', (1208, 1212), True, 'import numpy as np\n'), ((1214, 1224), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (1221, 1224), True, 'import numpy as np\n'), ((1226, 1237), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1234, 1237), True, 'import numpy as np\n'), ((1275, 1287), 'numpy.zeros', 'np.zeros', (['(25)'], {}), '(25)\n', (1283, 1287), True, 'import numpy as np\n'), ((1289, 1299), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (1296, 1299), True, 'import numpy as np\n'), ((1301, 1312), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1309, 1312), True, 'import numpy as np\n'), ((1350, 1362), 'numpy.zeros', 'np.zeros', (['(25)'], {}), '(25)\n', (1358, 1362), True, 'import numpy as np\n'), ((1364, 1374), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (1371, 1374), True, 'import numpy as np\n'), ((1376, 1387), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1384, 1387), True, 'import numpy as np\n'), ((1425, 1437), 'numpy.zeros', 'np.zeros', (['(25)'], {}), '(25)\n', (1433, 1437), True, 'import numpy as np\n'), ((1439, 1449), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (1446, 1449), True, 'import numpy as np\n'), ((1451, 1462), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1459, 1462), True, 'import numpy as np\n'), ((1500, 1512), 'numpy.zeros', 'np.zeros', (['(25)'], {}), '(25)\n', (1508, 1512), True, 'import numpy as np\n'), ((1514, 1524), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (1521, 1524), True, 'import numpy as np\n'), ((1526, 1537), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1534, 1537), True, 'import numpy as np\n'), ((1575, 1587), 'numpy.zeros', 'np.zeros', (['(25)'], {}), '(25)\n', (1583, 1587), True, 'import numpy as np\n'), ((1589, 1599), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (1596, 1599), True, 'import numpy as np\n'), ((1601, 1612), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1609, 1612), True, 'import numpy as np\n'), ((1650, 1662), 'numpy.zeros', 'np.zeros', (['(25)'], {}), '(25)\n', (1658, 1662), True, 'import numpy as np\n'), ((1664, 1674), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (1671, 1674), True, 'import numpy as np\n'), ((1676, 1687), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1684, 1687), True, 'import numpy as np\n'), ((1725, 1737), 'numpy.zeros', 'np.zeros', (['(25)'], {}), '(25)\n', (1733, 1737), True, 'import numpy as np\n'), ((1739, 1749), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (1746, 1749), True, 'import numpy as np\n'), ((1751, 1762), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1759, 1762), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
def classify_prices(discount):
price_classification = [] # Change/remove this line
for d in discount:
if float(d) <= 0:
category = 'no_discount'
price_classification.append(category)
elif 0 <= float(d) <= 0.1:
category = 'discounted'
price_classification.append(category)
elif 0.1 <= float(d) <= 0.2:
category = 'good_deal'
price_classification.append(category)
else:
category = 'buy_now'
price_classification.append(category)
# Please, introduce your answer here
return price_classification
def calculate_discount(current, reference):
list_discount = []
for i in range(len(current)):
c = current[i]
r = reference[i]
discount = (r - c)/r
list_discount.append(discount)
return list_discount
def read_files(current_price_filename, reference_price_filename):
with open(current_price_filename,encoding='utf-8')as f:
data = np.loadtxt(f,delimiter=',')
with open(reference_price_filename, encoding='utf-8')as f:
data1 = np.loadtxt(f, delimiter=',')
current = np.array(data,dtype=np.int) # Change/remove this line
reference = np.array(data1,dtype=np.int) # Change/remove this line
# Please, introduce your answer here
return current, reference
def check_output(current, reference, discount, price_classification):
# Do not modify this function, it is provided only for you to check your answer
n_prices = len(discount)
print('----------------------------------------------')
print('P', 'current', 'ref', 'discount', 'classification', sep='\t')
print('----------------------------------------------')
for i in range(n_prices):
print(i, current[i],
reference[i],
str(np.round(discount[i], 2)) + '%',
price_classification[i], sep='\t')
if __name__ == '__main__':
current_price_filename = 'data/current_prices_example.csv' # You can change this value for testing
reference_price_filename = 'data/reference_prices_example.csv' # You can change this value for testing
# The lines below are provided to run your code in a similar order as
# will be done during marking and to help you check your answer.
current, reference = read_files(current_price_filename, reference_price_filename)
discount = calculate_discount(current, reference)
price_classification = classify_prices(discount)
# You can use the function below to check your answer only
# Please comment it for your submission
check_output(current, reference, discount, price_classification)
| [
"numpy.array",
"numpy.loadtxt",
"numpy.round"
] | [((1051, 1079), 'numpy.array', 'np.array', (['data'], {'dtype': 'np.int'}), '(data, dtype=np.int)\n', (1059, 1079), True, 'import numpy as np\n'), ((1119, 1148), 'numpy.array', 'np.array', (['data1'], {'dtype': 'np.int'}), '(data1, dtype=np.int)\n', (1127, 1148), True, 'import numpy as np\n'), ((911, 939), 'numpy.loadtxt', 'np.loadtxt', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (921, 939), True, 'import numpy as np\n'), ((1010, 1038), 'numpy.loadtxt', 'np.loadtxt', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (1020, 1038), True, 'import numpy as np\n'), ((1681, 1705), 'numpy.round', 'np.round', (['discount[i]', '(2)'], {}), '(discount[i], 2)\n', (1689, 1705), True, 'import numpy as np\n')] |
import os
from os.path import expanduser
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"]="0"
import numpy as np
import tensorflow as tf
from copy import deepcopy
from sklearn.utils import shuffle
HOME_DIR = os.getcwd()
seed = 1731
np.random.seed(seed)
tf.random.set_random_seed(seed)
continual = True
class PermutedMnistGenerator():
def __init__(self, max_iter=5):
data_dir = "{}/data/MNIST".format(HOME_DIR)
if not os.path.exists(data_dir):
os.makedirs(data_dir)
mnist=tf.keras.datasets.mnist.load_data(data_dir+'/mnist.npz')
data_train, data_test = mnist
self.X_train = data_train[0] / 255.0
self.Y_train = data_train[1]
self.X_test = data_test[0] / 255.0
self.Y_test = data_test[1]
self.X_train = np.reshape(self.X_train, [self.X_train.shape[0], -1])
self.X_test = np.reshape(self.X_test, [self.X_test.shape[0], -1])
self.max_iter = max_iter
self.cur_iter = 0
self.num_classes = 10*max_iter
def get_dims(self):
# Get data input and output dimensions
return self.X_train.shape[1], 10
def next_task(self, task_id):
if self.cur_iter >= self.max_iter:
raise Exception('Number of tasks exceeded!')
else:
np.random.seed(self.cur_iter)
perm_inds = np.arange(self.X_train.shape[1])
np.random.shuffle(perm_inds)
# print (perm_inds[:10])
# Retrieve train data
next_x_train = deepcopy(self.X_train)
next_x_train = next_x_train[:,perm_inds]
# next_y_train = np.eye(10)[self.Y_train]
# Retrieve test data
next_x_test = deepcopy(self.X_test)
next_x_test = next_x_test[:,perm_inds]
# next_y_test = np.eye(10)[self.Y_test]
if continual:
next_y_train = np.eye(self.num_classes)[self.Y_train + (task_id*10)]
next_y_test = np.eye(self.num_classes)[self.Y_test + (task_id*10)]
else:
next_y_train = np.eye(10)[self.Y_train]
next_y_test = np.eye(10)[self.Y_test]
self.cur_iter += 1
return next_x_train, next_y_train, next_x_test, next_y_test
num_tasks = 10
data_gen = PermutedMnistGenerator(num_tasks)
task_dir = "{}/data/permuted_mnist_10/".format(HOME_DIR)
if not os.path.exists(task_dir):
os.makedirs(task_dir)
for task in np.arange(num_tasks):
x_train, y_train, x_test, y_test = data_gen.next_task(task)
x_train, y_train = shuffle(x_train, y_train, random_state=seed+task)
print (x_train.shape, y_train.shape, x_test.shape, y_test.shape)
print (task_dir)
print (np.argmax(y_train, axis=1))
np.savez('{}continual_task_{}.npz'.format(task_dir, task), X_train = x_train, Y_train = y_train, X_test = x_test, Y_test = y_test)
| [
"os.path.exists",
"numpy.eye",
"numpy.reshape",
"os.makedirs",
"tensorflow.keras.datasets.mnist.load_data",
"sklearn.utils.shuffle",
"tensorflow.random.set_random_seed",
"numpy.argmax",
"os.getcwd",
"numpy.random.seed",
"copy.deepcopy",
"numpy.arange",
"numpy.random.shuffle"
] | [((270, 281), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (279, 281), False, 'import os\n'), ((298, 318), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (312, 318), True, 'import numpy as np\n'), ((320, 351), 'tensorflow.random.set_random_seed', 'tf.random.set_random_seed', (['seed'], {}), '(seed)\n', (345, 351), True, 'import tensorflow as tf\n'), ((2285, 2305), 'numpy.arange', 'np.arange', (['num_tasks'], {}), '(num_tasks)\n', (2294, 2305), True, 'import numpy as np\n'), ((2220, 2244), 'os.path.exists', 'os.path.exists', (['task_dir'], {}), '(task_dir)\n', (2234, 2244), False, 'import os\n'), ((2248, 2269), 'os.makedirs', 'os.makedirs', (['task_dir'], {}), '(task_dir)\n', (2259, 2269), False, 'import os\n'), ((2390, 2441), 'sklearn.utils.shuffle', 'shuffle', (['x_train', 'y_train'], {'random_state': '(seed + task)'}), '(x_train, y_train, random_state=seed + task)\n', (2397, 2441), False, 'from sklearn.utils import shuffle\n'), ((564, 622), 'tensorflow.keras.datasets.mnist.load_data', 'tf.keras.datasets.mnist.load_data', (["(data_dir + '/mnist.npz')"], {}), "(data_dir + '/mnist.npz')\n", (597, 622), True, 'import tensorflow as tf\n'), ((818, 871), 'numpy.reshape', 'np.reshape', (['self.X_train', '[self.X_train.shape[0], -1]'], {}), '(self.X_train, [self.X_train.shape[0], -1])\n', (828, 871), True, 'import numpy as np\n'), ((889, 940), 'numpy.reshape', 'np.reshape', (['self.X_test', '[self.X_test.shape[0], -1]'], {}), '(self.X_test, [self.X_test.shape[0], -1])\n', (899, 940), True, 'import numpy as np\n'), ((2535, 2561), 'numpy.argmax', 'np.argmax', (['y_train'], {'axis': '(1)'}), '(y_train, axis=1)\n', (2544, 2561), True, 'import numpy as np\n'), ((501, 525), 'os.path.exists', 'os.path.exists', (['data_dir'], {}), '(data_dir)\n', (515, 525), False, 'import os\n'), ((531, 552), 'os.makedirs', 'os.makedirs', (['data_dir'], {}), '(data_dir)\n', (542, 552), False, 'import os\n'), ((1268, 1297), 'numpy.random.seed', 'np.random.seed', (['self.cur_iter'], {}), '(self.cur_iter)\n', (1282, 1297), True, 'import numpy as np\n'), ((1314, 1346), 'numpy.arange', 'np.arange', (['self.X_train.shape[1]'], {}), '(self.X_train.shape[1])\n', (1323, 1346), True, 'import numpy as np\n'), ((1351, 1379), 'numpy.random.shuffle', 'np.random.shuffle', (['perm_inds'], {}), '(perm_inds)\n', (1368, 1379), True, 'import numpy as np\n'), ((1456, 1478), 'copy.deepcopy', 'deepcopy', (['self.X_train'], {}), '(self.X_train)\n', (1464, 1478), False, 'from copy import deepcopy\n'), ((1618, 1639), 'copy.deepcopy', 'deepcopy', (['self.X_test'], {}), '(self.X_test)\n', (1626, 1639), False, 'from copy import deepcopy\n'), ((1773, 1797), 'numpy.eye', 'np.eye', (['self.num_classes'], {}), '(self.num_classes)\n', (1779, 1797), True, 'import numpy as np\n'), ((1846, 1870), 'numpy.eye', 'np.eye', (['self.num_classes'], {}), '(self.num_classes)\n', (1852, 1870), True, 'import numpy as np\n'), ((1929, 1939), 'numpy.eye', 'np.eye', (['(10)'], {}), '(10)\n', (1935, 1939), True, 'import numpy as np\n'), ((1973, 1983), 'numpy.eye', 'np.eye', (['(10)'], {}), '(10)\n', (1979, 1983), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# etips
#
# Copyright (c) Siemens AG, 2020
# Authors:
# <NAME> <<EMAIL>>
# License-Identifier: MIT
from pathlib import Path
from joblib import dump
import numpy as np
from sklearn.model_selection import KFold
from sklearn.dummy import DummyClassifier
from utils import fix_random_seed, load_counting_data, load_mnist_data
if __name__ == '__main__':
fix_random_seed(0)
data_fp = Path('../data/')
exp_name = 'RD1' # or RD2
cv_index = 0 # 0-4
exp_fp = Path(f'./Exps/{exp_name}/CV{cv_index}/')
exp_fp.mkdir(parents=True, exist_ok=True)
x, y = load_counting_data(fp=data_fp, fn='Dataset_10k.pickle')
# x, y = load_mnist_data()
y = np.argmax(y, axis=1)
test_size = int(0.1 * x.shape[0])
x_tr, x_te = x[test_size:, :, :], x[:test_size, :, :]
y_tr, y_te = y[test_size:], y[:test_size]
print(f'shape of x_tr, x_te: {x_tr.shape}, {x_te.shape}')
print(f'shape of y_tr, y_te: {y_tr.shape}, {y_te.shape}')
kf = KFold(n_splits=5, shuffle=False, random_state=None)
tr_idx, val_idx = list(kf.split(x_tr))[cv_index]
data_list = [x_tr[tr_idx, :, :], y_tr[tr_idx],
x_tr[val_idx, :, :], y_tr[val_idx],
x_te, y_te]
rc = DummyClassifier(strategy='uniform')
rc.fit(data_list[0], data_list[1])
acc = rc.score(data_list[-2], data_list[-1])
print(acc)
dump(rc, exp_fp / 'model_trial_random.joblib')
print('The model is saved.')
| [
"pathlib.Path",
"numpy.argmax",
"utils.fix_random_seed",
"sklearn.dummy.DummyClassifier",
"utils.load_counting_data",
"sklearn.model_selection.KFold",
"joblib.dump"
] | [((405, 423), 'utils.fix_random_seed', 'fix_random_seed', (['(0)'], {}), '(0)\n', (420, 423), False, 'from utils import fix_random_seed, load_counting_data, load_mnist_data\n'), ((439, 455), 'pathlib.Path', 'Path', (['"""../data/"""'], {}), "('../data/')\n", (443, 455), False, 'from pathlib import Path\n'), ((526, 566), 'pathlib.Path', 'Path', (['f"""./Exps/{exp_name}/CV{cv_index}/"""'], {}), "(f'./Exps/{exp_name}/CV{cv_index}/')\n", (530, 566), False, 'from pathlib import Path\n'), ((625, 680), 'utils.load_counting_data', 'load_counting_data', ([], {'fp': 'data_fp', 'fn': '"""Dataset_10k.pickle"""'}), "(fp=data_fp, fn='Dataset_10k.pickle')\n", (643, 680), False, 'from utils import fix_random_seed, load_counting_data, load_mnist_data\n'), ((720, 740), 'numpy.argmax', 'np.argmax', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (729, 740), True, 'import numpy as np\n'), ((1019, 1070), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(5)', 'shuffle': '(False)', 'random_state': 'None'}), '(n_splits=5, shuffle=False, random_state=None)\n', (1024, 1070), False, 'from sklearn.model_selection import KFold\n'), ((1269, 1304), 'sklearn.dummy.DummyClassifier', 'DummyClassifier', ([], {'strategy': '"""uniform"""'}), "(strategy='uniform')\n", (1284, 1304), False, 'from sklearn.dummy import DummyClassifier\n'), ((1412, 1458), 'joblib.dump', 'dump', (['rc', "(exp_fp / 'model_trial_random.joblib')"], {}), "(rc, exp_fp / 'model_trial_random.joblib')\n", (1416, 1458), False, 'from joblib import dump\n')] |
#!/usr/bin/env python
import numpy as np
import tensorflow as tf
train_X = np.linspace(-1, 1, 100)
train_Y = 2 * train_X + np.random.randn(*train_X.shape) * 0.33 + 10
X = tf.placeholder("float")
Y = tf.placeholder("float")
w = tf.Variable(0.0, name="weight")
b = tf.Variable(0.0, name="bias")
cost_op = tf.square(Y - tf.mul(X, w) - b)
train_op = tf.train.GradientDescentOptimizer(0.01).minimize(cost_op)
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for i in range(10):
for (x, y) in zip(train_X, train_Y):
sess.run(train_op, feed_dict={X: x, Y: y})
if 1 < sess.run(w) < 3 and 9 < sess.run(b) < 11:
print("Success")
else:
print("Fail")
| [
"tensorflow.initialize_all_variables",
"tensorflow.Variable",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.train.GradientDescentOptimizer",
"numpy.linspace",
"numpy.random.randn",
"tensorflow.mul"
] | [((77, 100), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(100)'], {}), '(-1, 1, 100)\n', (88, 100), True, 'import numpy as np\n'), ((174, 197), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""'], {}), "('float')\n", (188, 197), True, 'import tensorflow as tf\n'), ((202, 225), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""'], {}), "('float')\n", (216, 225), True, 'import tensorflow as tf\n'), ((230, 261), 'tensorflow.Variable', 'tf.Variable', (['(0.0)'], {'name': '"""weight"""'}), "(0.0, name='weight')\n", (241, 261), True, 'import tensorflow as tf\n'), ((266, 295), 'tensorflow.Variable', 'tf.Variable', (['(0.0)'], {'name': '"""bias"""'}), "(0.0, name='bias')\n", (277, 295), True, 'import tensorflow as tf\n'), ((414, 426), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (424, 426), True, 'import tensorflow as tf\n'), ((350, 389), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['(0.01)'], {}), '(0.01)\n', (383, 389), True, 'import tensorflow as tf\n'), ((449, 478), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (476, 478), True, 'import tensorflow as tf\n'), ((125, 156), 'numpy.random.randn', 'np.random.randn', (['*train_X.shape'], {}), '(*train_X.shape)\n', (140, 156), True, 'import numpy as np\n'), ((321, 333), 'tensorflow.mul', 'tf.mul', (['X', 'w'], {}), '(X, w)\n', (327, 333), True, 'import tensorflow as tf\n')] |
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import pytest
import torch
from mmcv import Config
from numpy.testing import assert_almost_equal
from mmpose.datasets import DATASETS
def test_NVGesture_dataset():
dataset = 'NVGestureDataset'
dataset_info = Config.fromfile(
'configs/_base_/datasets/nvgesture.py').dataset_info
dataset_class = DATASETS.get(dataset)
data_cfg = dict(
video_size=[320, 240],
modality=['rgb', 'depth'],
bbox_file='tests/data/nvgesture/bboxes.json',
)
# Test
data_cfg_copy = copy.deepcopy(data_cfg)
_ = dataset_class(
ann_file='tests/data/nvgesture/test_nvgesture.lst',
vid_prefix='tests/data/nvgesture/',
data_cfg=data_cfg_copy,
pipeline=[],
dataset_info=dataset_info,
test_mode=True)
custom_dataset = dataset_class(
ann_file='tests/data/nvgesture/test_nvgesture.lst',
vid_prefix='tests/data/nvgesture/',
data_cfg=data_cfg_copy,
pipeline=[],
dataset_info=dataset_info,
test_mode=False)
assert custom_dataset.dataset_name == 'nvgesture'
assert custom_dataset.test_mode is False
assert len(custom_dataset) == 1
sample = custom_dataset[0]
# make pseudo prediction for evaluation
sample['logits'] = {
modal: torch.zeros(1, 25, 1)
for modal in sample['modality']
}
sample['logits']['rgb'][:, sample['label']] = 1
sample['logits']['depth'][:, (sample['label'] + 1) % 25] = 1
sample['label'] = torch.tensor([sample['label']]).long()
infos = custom_dataset.evaluate([sample], metric=['AP'])
assert_almost_equal(infos['AP_rgb'], 1.0)
assert_almost_equal(infos['AP_depth'], 0.0)
assert_almost_equal(infos['AP_mean'], 0.5)
with pytest.raises(KeyError):
infos = custom_dataset.evaluate([sample], metric='mAP')
| [
"mmpose.datasets.DATASETS.get",
"numpy.testing.assert_almost_equal",
"torch.tensor",
"pytest.raises",
"copy.deepcopy",
"mmcv.Config.fromfile",
"torch.zeros"
] | [((380, 401), 'mmpose.datasets.DATASETS.get', 'DATASETS.get', (['dataset'], {}), '(dataset)\n', (392, 401), False, 'from mmpose.datasets import DATASETS\n'), ((582, 605), 'copy.deepcopy', 'copy.deepcopy', (['data_cfg'], {}), '(data_cfg)\n', (595, 605), False, 'import copy\n'), ((1662, 1703), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (["infos['AP_rgb']", '(1.0)'], {}), "(infos['AP_rgb'], 1.0)\n", (1681, 1703), False, 'from numpy.testing import assert_almost_equal\n'), ((1708, 1751), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (["infos['AP_depth']", '(0.0)'], {}), "(infos['AP_depth'], 0.0)\n", (1727, 1751), False, 'from numpy.testing import assert_almost_equal\n'), ((1756, 1798), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (["infos['AP_mean']", '(0.5)'], {}), "(infos['AP_mean'], 0.5)\n", (1775, 1798), False, 'from numpy.testing import assert_almost_equal\n'), ((281, 336), 'mmcv.Config.fromfile', 'Config.fromfile', (['"""configs/_base_/datasets/nvgesture.py"""'], {}), "('configs/_base_/datasets/nvgesture.py')\n", (296, 336), False, 'from mmcv import Config\n'), ((1351, 1372), 'torch.zeros', 'torch.zeros', (['(1)', '(25)', '(1)'], {}), '(1, 25, 1)\n', (1362, 1372), False, 'import torch\n'), ((1809, 1832), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (1822, 1832), False, 'import pytest\n'), ((1558, 1589), 'torch.tensor', 'torch.tensor', (["[sample['label']]"], {}), "([sample['label']])\n", (1570, 1589), False, 'import torch\n')] |
import operator
import threading
import functools
import itertools
import contextlib
import collections
import numpy as np
from ..autoray import (
get_lib_fn,
infer_backend,
get_dtype_name,
register_function,
astype,
)
_EMPTY_DICT = {}
class LazyArray:
"""A lazy array representing a shaped node in a computational graph.
"""
__slots__ = (
"_backend",
"_fn",
"_args",
"_kwargs",
"_shape",
"_dtype",
"_data",
"_deps",
)
def __init__(
self, backend, fn, args, kwargs, shape, dtype, deps=None,
):
# info required to perform the computation
self._backend = backend
self._fn = fn
self._args = args
if kwargs is None:
self._kwargs = _EMPTY_DICT
else:
self._kwargs = kwargs
# resulting array information
self._shape = shape
self._dtype = dtype
self._data = None
# lazy arrays this ``LazyArray`` depends on
if deps is None:
# automatically find them
self._deps = (*find_lazy(self._args), *find_lazy(self._kwargs))
else:
# manually specified (more efficient)
self._deps = deps
@classmethod
def from_data(cls, data):
"""Create a new ``LazyArray`` directly from a concrete array.
"""
obj = cls.__new__(cls)
obj._backend = infer_backend(data)
obj._fn = obj._args = obj._kwargs = None
obj._shape = tuple(map(int, data.shape))
obj._dtype = get_dtype_name(data)
obj._data = data
obj._deps = ()
return obj
@classmethod
def from_shape(cls, shape, backend='numpy', dtype=None):
"""Create a new ``LazyArray`` with a given shape.
"""
obj = cls.__new__(cls)
obj._backend = backend
obj._fn = obj._args = obj._kwargs = None
obj._shape = tuple(map(int, shape))
obj._dtype = dtype
obj._data = '__PLACEHOLDER__'
obj._deps = ()
return obj
def to(
self,
fn,
args,
kwargs=None,
backend=None,
shape=None,
dtype=None,
deps=None,
):
"""Create a new ``LazyArray``, by default propagating backend, shape,
dtype and deps from the the current LazyArray.
"""
return LazyArray(
fn=fn,
args=args,
kwargs=kwargs,
backend=backend if backend is not None else self._backend,
shape=shape if shape is not None else self.shape,
dtype=dtype if dtype is not None else self.dtype,
deps=deps if deps is not None else (self,),
)
def _materialize(self):
"""Recursively compute all required args and kwargs for this node
before computing itself and dereferencing dependencies. Note using this
to materialize a large computation from scratch should be avoided due
to the recursion limit, use ``x.compute()`` instead.
"""
if self._data is None:
# materialize any actual array args
args = (maybe_materialize(x) for x in self._args)
kwargs = {k: maybe_materialize(v) for k, v in self._kwargs.items()}
self._data = self._fn(*args, **kwargs)
# free any references to deps
self._fn = self._args = self._kwargs = None
self._deps = ()
return self._data
def __iter__(self):
"""Generate each unique computational node. Use ``ascend`` if you need
to visit children before parents.
"""
seen = set()
queue = [self]
queue_pop = queue.pop
queue_extend = queue.extend
seen_add = seen.add
while queue:
node = queue_pop()
nid = id(node)
if nid not in seen:
yield node
queue_extend(node._deps)
seen_add(nid)
def ascend(self):
"""Generate each unique computational node, from leaves to root.
"""
seen = set()
ready = set()
queue = [self]
queue_extend = queue.extend
queue_pop = queue.pop
ready_add = ready.add
seen_add = seen.add
while queue:
node = queue[-1]
need_to_visit = [c for c in node._deps if id(c) not in ready]
if need_to_visit:
queue_extend(need_to_visit)
else:
node = queue_pop()
nid = id(node)
ready_add(nid)
if nid not in seen:
yield node
seen_add(nid)
def compute(self):
"""Compute the value of this lazy array.
Unlike ``self._materialize()`` this avoids deep recursion.
"""
for node in self.ascend():
node._materialize()
return self._data
def compute_constants(self, variables):
"""Fold constant arrays - everything not dependent on ``variables`` -
into the graph.
"""
if isinstance(variables, LazyArray):
variables = (variables,)
variables = set(variables)
# must ascend
for node in self.ascend():
if not any(c in variables for c in node._deps):
# can fold
node._materialize()
else:
# mark as variable
variables.add(node)
def as_string(self, params):
"""Create a string which evaluates to the lazy array creation.
"""
# name function and store in locals
fn_name = f"{getattr(self._fn, '__name__', 'fn')}{id(self._fn)}"
params.setdefault(fn_name, self._fn)
# string of args and kwargs
str_call = ", ".join(
itertools.chain(
(stringify(x, params) for x in self._args),
(
f"{k}: {stringify(v, params)}"
for k, v in self._kwargs.items()
),
)
)
# assign function call to new variable
return f"x{id(self)} = {fn_name}({str_call})"
def get_source(self, params=None):
"""Write the source code of an unravelled version of the computational
graph, injecting required runtime objects into ``params``.
"""
if params is None:
# locals space mapping LazyArray names to values
params = {}
delete_checked = set()
s = [] # source code lines
for node in reversed(tuple(self.ascend())):
# when *descending*, the first encounter of a node is the
# *last* time it is referenced in forward pass -> delete,
# need to do this for GC since running in single big function
for c in node._deps:
if c not in delete_checked:
if c._deps:
# is an intermediate - safe to delete
s.append(f"del x{id(c)}")
delete_checked.add(c)
if node._data is None:
# create the array via computation
s.append(node.as_string(params))
else:
# inject the already computed data as constant
params[f"x{id(node)}"] = node._data
# reverse (ascend) into source code
return "\n".join(reversed(s))
def get_compiled(self, optimize=1):
"""Compile the function into a code object using ``compile``,
returning a wrapper that executes it using ``exec`` and the 'locals'
dict specifiying inputs which can be modified. It should be called
like:
fn, params = x.get_compiled()
# modify params e.g. inject new arrays here before call
...
fn(params)
"""
# write source and populate locals mapping that function will run under
params = {}
source = self.get_source(params)
# compile source
code = compile(source, f"code{id(self)}", "exec", optimize=optimize)
compiled = functools.partial(
_code_exec_fn, code=code, out_name=f"x{id(self)}"
)
# need both function and locals mapping to run it with / modify args
return compiled, params
def get_function(self, variables, fold_constants=True):
"""Get a compiled function that computes ``fn(arrays)``, with ``fn``
describing the computational graph of this ``LazyArray`` and ``arrays``
corresponding to the downstream ``LazyArray`` nodes ``variables``.
Parameters
----------
variables : sequence of LazyArray
Input nodes whose data can change between calls.
fold_constants : bool, optional
Compute all intermediates which do not depend on ``variables``
prior to compilation.
Returns
-------
fn : callable
Function with signature ``fn(arrays)``.
"""
if fold_constants:
self.compute_constants(variables=variables)
var_names = tuple(f"x{id(v)}" for v in variables)
fn, params = self.get_compiled()
return functools.partial(
_array_fn, var_names=var_names, params=params, fn=fn
)
def history_max_size(self):
"""Get the largest single tensor size appearing in this computation.
"""
return max(node.size for node in self)
def history_size_footprint(self):
"""Get the combined size of intermediates at each step of the
computation. Note this assumes that intermediates are immediately
garbage collected when they are no longer required.
"""
delete_checked = set()
sizes = []
for node in reversed(tuple(self.ascend())):
for c in node._deps:
if c not in delete_checked:
# last time a dependency is seen, subtract the size
if c._deps:
sizes.append(-c.size)
delete_checked.add(c)
if node._data is None:
# this is a new intermediate, add the size
sizes.append(+node.size)
sizes.reverse()
return list(itertools.accumulate(sizes))
def history_peak_size(self):
"""Get the peak combined intermediate size of this computation.
"""
return max(self.history_size_footprint())
def history_total_size(self):
"""The the total size of all unique arrays in the computational graph,
possibly relevant e.g. for back-propagation algorithms.
"""
return sum(node.size for node in self)
def plot_history_size_footprint(
self,
log=None,
figsize=(8, 2),
color='purple',
alpha=0.5,
ax=None,
return_fig=False,
):
"""Plot the memory footprint throughout this computation.
Parameters
----------
log : None or int, optional
If not None, display the sizes in base ``log``.
figsize : tuple, optional
Size of the figure.
color : str, optional
Color of the line.
alpha : float, optional
Alpha of the line.
ax : matplotlib.axes.Axes, optional
Axes to plot on, will be created if not provided.
return_fig : bool, optional
If True, return the figure object, else just show and close it.
"""
import matplotlib.pyplot as plt
y = np.array(self.history_size_footprint())
if log:
y = np.log2(y) / np.log2(log)
ylabel = f'$\\log_{log}[SIZE]$'
else:
ylabel = 'SIZE'
x = np.arange(y.size)
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
else:
fig = None
ax.fill_between(x, 0, y, alpha=alpha, color=color)
if fig is not None:
ax.grid(True, c=(0.95, 0.95, 0.95), which='both')
ax.set_axisbelow(True)
ax.set_xlim(0, np.max(x))
ax.set_ylim(0, np.max(y))
ax.set_ylabel(ylabel)
if return_fig or fig is None:
return fig
else:
plt.show()
plt.close(fig)
def to_nx_digraph(
self,
variables=None,
var_color=(0, 0.5, 0.25),
const_color=(0, 0.5, 1.0),
root_color=(1, 0, 0.5),
node_scale=5,
):
"""Convert this ``LazyArray`` into a ``networkx.DiGraph``, injecting
various plotting information as properties.
"""
import networkx as nx
if variables is not None:
if isinstance(variables, LazyArray):
variables = (variables,)
variables = set(variables)
def is_variable(node):
return node in variables
else:
def is_variable(_):
return False
def extract_props(node, **kwargs):
v = is_variable(node)
d = {
"variable": v,
"fn": getattr(node._fn, "__name__", "CONST"),
"size": node_scale * np.log2(node.size) + node_scale,
"color": var_color if v else const_color,
}
d.update(kwargs)
if not node._deps:
d["color"] = tuple(x ** 0.2 for x in d["color"])
return d
G = nx.DiGraph()
for node in self.ascend():
if any(is_variable(child) for child in node._deps):
variables.add(node)
G.add_node(node, **extract_props(node))
for x in node._deps:
G.add_edge(x, node)
G.nodes[self]["color"] = root_color
return G
def plot(
self,
variables=None,
initial_layout="spiral",
iterations=0,
k=None,
connectionstyle="arc3,rad=0.2",
arrowsize=5,
edge_color=None,
var_color=(0, 0.5, 0.25),
const_color=(0, 0.5, 1.0),
root_color=(1, 0, 0.5),
node_scale=5,
node_alpha=1.0,
show_labels=True,
label_alpha=0.2,
label_color=None,
font_size=8,
figsize=(6, 6),
ax=None,
return_fig=False,
**layout_opts,
):
"""Plot the computational graph of this ``LazyArray``.
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.colors import to_rgb
import networkx as nx
isdark = sum(to_rgb(mpl.rcParams["figure.facecolor"])) / 3 < 0.5
if isdark:
draw_color = (0.75, 0.77, 0.80, 1.0)
else:
draw_color = (0.45, 0.47, 0.50, 1.0)
if edge_color is None:
edge_color = draw_color
if label_color is None:
label_color = mpl.rcParams["axes.labelcolor"]
created_fig = ax is None
if created_fig:
fig, ax = plt.subplots(figsize=figsize, constrained_layout=True)
ax.axis("off")
ax.set_aspect("equal")
G = self.to_nx_digraph(
variables=variables,
var_color=var_color,
const_color=const_color,
root_color=root_color,
node_scale=node_scale,
)
if initial_layout == "spiral":
layout_opts.setdefault("equidistant", True)
pos = getattr(nx, initial_layout + "_layout")(G, **layout_opts)
if iterations:
pos = nx.layout.spring_layout(
G, pos=pos, k=k, iterations=iterations
)
nx.draw_networkx_edges(
G,
pos=pos,
ax=ax,
edge_color=draw_color,
connectionstyle=connectionstyle,
arrowsize=arrowsize,
arrows=True,
)
nx.draw_networkx_nodes(
G,
pos=pos,
ax=ax,
node_color=[G.nodes[x]["color"] for x in G.nodes],
node_size=[G.nodes[x]["size"] for x in G.nodes],
alpha=node_alpha,
)
if show_labels:
nx.draw_networkx_labels(
G,
pos=pos,
ax=ax,
labels={x: G.nodes[x]["fn"] for x in G.nodes},
font_color=label_color,
font_size=font_size,
alpha=label_alpha,
bbox={
"color": to_rgb(mpl.rcParams["figure.facecolor"]),
"alpha": label_alpha,
},
)
if not created_fig:
return
if return_fig:
return fig
else:
plt.show()
plt.close(fig)
@property
def fn(self):
return self._fn
@property
def fn_name(self):
return getattr(self._fn, "__name__", "None")
@property
def args(self):
return self._args
@property
def kwargs(self):
return self._kwargs
@property
def shape(self):
return self._shape
@property
def ndim(self):
return len(self._shape)
@property
def size(self):
return functools.reduce(operator.mul, self.shape, 1)
@property
def dtype(self):
return self._dtype
@property
def backend(self):
return self._backend
@property
def deps(self):
return self._deps
def __getitem__(self, key):
return getitem(self, key)
# this makes numpy operations delegate to __rmatmul__ etc.
__array_ufunc__ = None
def __mul__(self, other):
return multiply(self, other)
def __rmul__(self, other):
return multiply(self, other)
def __add__(self, other):
return add(self, other)
def __radd__(self, other):
return add(self, other)
def __sub__(self, other):
return sub(self, other)
def __rsub__(self, other):
return sub(other, self)
def __floordiv__(self, other):
return floordivide(self, other)
def __rfloordiv__(self, other):
return floordivide(other, self)
def __truediv__(self, other):
return truedivide(self, other)
def __rtruediv__(self, other):
return truedivide(other, self)
def __pow__(self, other):
return pow_(self, other)
def __rpow__(self, other):
return pow_(other, self)
def __matmul__(self, other):
return matmul(self, other)
def __rmatmul__(self, other):
return matmul(other, self)
def __abs__(self):
return abs_(self)
@property
def T(self):
return transpose(self)
@property
def H(self):
return conj(transpose(self))
@property
def real(self):
return real(self)
@property
def imag(self):
return imag(self)
def __repr__(self):
return (
f"<{self.__class__.__name__}("
f"fn={self.fn_name}, "
f"shape={self.shape}, "
f"dtype={self.dtype}, "
f"backend='{self.backend}')>"
)
def ensure_lazy(array):
if not isinstance(array, LazyArray):
return LazyArray.from_data(array)
return array
def find_lazy(x):
"""Recursively search for ``LazyArray`` instances in pytrees.
"""
if isinstance(x, LazyArray):
yield x
return
if isinstance(x, (tuple, list)):
for subx in x:
yield from find_lazy(subx)
return
if isinstance(x, dict):
for subx in x.values():
yield from find_lazy(subx)
return
# --------------------- recusively evaluating 'pytrees' --------------------- #
def materialize_larray(x):
return x._materialize()
def materialize_tuple(x):
return tuple(map(maybe_materialize, x))
def materialize_list(x):
return list(map(maybe_materialize, x))
def materialize_dict(x):
return {k: maybe_materialize(v) for k, v in x.items()}
def materialize_identity(x):
return x
_materialize_dispatch = collections.defaultdict(lambda: materialize_identity, {
LazyArray: materialize_larray,
tuple: materialize_tuple,
list: materialize_list,
dict: materialize_dict,
})
def maybe_materialize(x):
"""Recursively evaluate LazyArray instances in tuples, lists and dicts.
"""
return _materialize_dispatch[x.__class__](x)
# -------------------- recusively stringifying 'pytrees' -------------------- #
def stringify_larray(x, params):
name = f"x{id(x)}"
if x._data is not None:
params.setdefault(name, x._data)
return name
def stringify_tuple(x, params):
if not x:
return "()"
return f"({', '.join(stringify(xi, params) for xi in x)},)"
def stringify_list(x, params):
return f"[{', '.join(stringify(xi, params) for xi in x)}]"
def stringify_dict(x, params):
entries = (f"{k}: {stringify(v, params)}" for k, v in x.items())
return f"{{{', '.join(entries)}}}"
def stringify_identity(x, params):
if isinstance(x, (int, float, complex, bool, slice, range)):
return f"{x}"
if isinstance(x, str):
return f"'{x}'"
name = f"c{id(x)}"
params.setdefault(name, x)
return name
_stringify_dispatch = collections.defaultdict(lambda: stringify_identity, {
LazyArray: stringify_larray,
tuple: stringify_tuple,
list: stringify_list,
dict: stringify_dict,
})
def stringify(x, params):
"""Recursively stringify LazyArray instances in tuples, lists and dicts.
"""
return _stringify_dispatch[x.__class__](x, params)
def _code_exec_fn(params, code, out_name):
exec(code, None, params)
return params[out_name]
def _array_fn(arrays, var_names, fn, params):
# inject the new arrays
for name, array in zip(var_names, arrays):
params[name] = array
# run the byte-compiled function with the new locals
return fn(params)
# --------------------------------- caching --------------------------------- #
_SHARING_STACK = collections.defaultdict(list)
def currently_sharing():
"""Check if we are currently sharing a cache -- thread specific.
"""
return threading.get_ident() in _SHARING_STACK
def get_sharing_cache():
"""Return the most recent sharing cache -- thread specific.
"""
return _SHARING_STACK[threading.get_ident()][-1]
def _add_sharing_cache(cache):
_SHARING_STACK[threading.get_ident()].append(cache)
def _remove_sharing_cache():
tid = threading.get_ident()
_SHARING_STACK[tid].pop()
if not _SHARING_STACK[tid]:
del _SHARING_STACK[tid]
@contextlib.contextmanager
def shared_intermediates(cache=None):
"""Context in which contract intermediate results are shared.
Note that intermediate computations will not be garbage collected until
1. this context exits, and
2. the yielded cache is garbage collected (if it was captured).
Parameters
----------
cache : dict
If specified, a user-stored dict in which intermediate results will
be stored. This can be used to interleave sharing contexts.
Returns
-------
cache : dict
A dictionary in which sharing results are stored. If ignored,
sharing results will be garbage collected when this context is
exited. This dict can be passed to another context to resume
sharing.
"""
if cache is None:
cache = {}
_add_sharing_cache(cache)
try:
yield cache
finally:
_remove_sharing_cache()
def maybe_id(x):
if hasattr(x, "shape"):
return id(x)
return x
def hash_args_kwargs(fn_name, *args, **kwargs):
hargs = tuple(map(maybe_id, args))
if kwargs:
hkwargs = tuple(sorted((k, maybe_id(v)) for k, v in kwargs.items()))
else:
hkwargs = None
return f"{fn_name}-{hash((hargs, hkwargs))}"
def lazy_cache(fn_name, hasher=None):
if hasher is None:
hasher = hash_args_kwargs
def wrapper(fn):
@functools.wraps(fn)
def wrapped(*args, **kwargs):
if not currently_sharing():
return fn(*args, **kwargs)
cache = get_sharing_cache()
key = hasher(fn_name, *args, **kwargs)
if key not in cache:
cache[key] = fn(*args, **kwargs)
return cache[key]
return wrapped
return wrapper
_DTYPES_REAL_EQUIV = {"complex128": "float64", "complex64": "float32"}
_DTYPES_COMPLEX_EQUIV = {"float64": "complex128", "float32": "complex64"}
@functools.lru_cache(None)
def dtype_real_equiv(dtype_name):
return _DTYPES_REAL_EQUIV.get(dtype_name, dtype_name)
@functools.lru_cache(None)
def dtype_complex_equiv(dtype_name):
return _DTYPES_COMPLEX_EQUIV.get(dtype_name, dtype_name)
@functools.lru_cache(None)
def _find_common_dtype(array_types, scalar_types):
return np.find_common_type(array_types, scalar_types).name
def find_common_dtype(*xs):
return _find_common_dtype(tuple(map(get_dtype_name, xs)), ())
def find_common_backend(*xs):
backend = None
# prefer inferring from LazyArray
for x in xs:
b = getattr(x, "backend", None)
if b == "autoray.lazy":
# check if any LazyArray is *itself* backed by LazyArray
return b
# else default to first backend seen
elif (backend is None) and (b is not None):
backend = b
# if no LazyArray args, check raw arrays
if backend is None:
backend = next(iter(
infer_backend(x) for x in xs if hasattr(x, "shape")
), None)
return backend
@functools.lru_cache(1024)
def find_broadcast_shape(xshape, yshape):
xndim = len(xshape)
yndim = len(yshape)
if xndim < yndim:
xshape = (1,) * (yndim - xndim)
elif yndim < xndim:
yshape = (1,) * (xndim - yndim)
return tuple(max(d1, d2) for d1, d2 in zip(xshape, yshape))
# -------------------------------- interface -------------------------------- #
def Variable(shape, backend=None, dtype=None):
"""Create a ``LazyArray`` from a shape only, representing a leaf node
in the computational graph. It can only act as a placeholder for data.
"""
return LazyArray.from_shape(shape, backend=backend, dtype=dtype)
@lazy_cache("array")
def array(x):
"""Create a ``LazyArray`` from an input array, representing a leaf node
in the computational graph.
"""
return LazyArray.from_data(x)
@lazy_cache("transpose")
def transpose(a, axes=None):
a = ensure_lazy(a)
fn_transpose = get_lib_fn(a.backend, "transpose")
if axes is None:
axes = range(a.ndim)[::-1]
newshape = tuple(a.shape[i] for i in axes)
# check for chaining transpositions
if a._fn is fn_transpose:
b = a._args[0]
if isinstance(b, LazyArray):
axes_prev = a._args[1]
axes_chained = tuple(axes_prev[k] for k in axes)
return b.to(fn_transpose, (b, axes_chained), shape=newshape)
return a.to(fn_transpose, (a, axes), shape=newshape)
@lazy_cache("reshape")
def _reshape_tuple(a, newshape):
a = ensure_lazy(a)
fn_reshape = get_lib_fn(a.backend, "reshape")
# check for redundant reshapes
if a._fn is fn_reshape:
b = a._args[0]
if isinstance(b, LazyArray):
a = b
return a.to(fn_reshape, (a, newshape), shape=newshape)
@functools.lru_cache(1024)
def find_full_reshape(newshape, size):
try:
expand = newshape.index(-1)
before = newshape[:expand]
after = newshape[expand + 1:]
d = size // functools.reduce(
operator.mul, itertools.chain(before, after), 1
)
return (*before, d, *after)
except ValueError:
return newshape
def reshape(a, newshape):
newshape = find_full_reshape(tuple(newshape), a.size)
return _reshape_tuple(a, newshape)
def getitem_hasher(_, a, key):
if not isinstance(key, tuple):
key = (key,)
hkey = tuple(
str(k) if isinstance(k, slice) else id(k) if hasattr(k, "shape") else k
for k in key
)
return f"getitem-{hash((id(a), hkey))}"
@lazy_cache("getitem", hasher=getitem_hasher)
def getitem(a, key):
a = ensure_lazy(a)
deps = (a,)
if not isinstance(key, tuple):
key = (key,)
try:
# expand ellipsis
expand = key.index(...)
ndiff = a.ndim - len(key) + 1
key = key[:expand] + (slice(None),) * ndiff + key[expand + 1:]
except ValueError:
# else pad trailing slices if necessary
ndiff = a.ndim - len(key)
if ndiff:
key = key + (slice(None),) * ndiff
newshape = []
for k, d in zip(key, a.shape):
if isinstance(k, LazyArray):
newshape.append(k.size)
deps += (k,)
elif isinstance(k, slice):
newshape.append(len(range(d)[k]))
else:
try:
newshape.append(len(k))
except TypeError:
pass
# TODO: np.newaxis == None
newshape = tuple(newshape)
return a.to(operator.getitem, (a, key), shape=newshape, deps=deps)
@lazy_cache("tensordot")
def tensordot(a, b, axes=2):
if isinstance(axes, int):
axes = (tuple(range(a.ndim - axes, a.ndim)), tuple(range(b.ndim)))
newshape = tuple(
d for i, d in enumerate(a.shape) if i not in axes[0]
) + tuple(d for i, d in enumerate(b.shape) if i not in axes[1])
newdtype = find_common_dtype(a, b)
backend = find_common_backend(a, b)
fn_tensordot = get_lib_fn(backend, "tensordot")
return LazyArray(
backend=backend,
fn=fn_tensordot,
args=(a, b, axes),
kwargs=None,
shape=newshape,
dtype=newdtype,
deps=tuple(x for x in (a, b) if isinstance(x, LazyArray)),
)
@lazy_cache("einsum")
def einsum(*operands):
from opt_einsum.parser import parse_einsum_input
deps, output, larrays = parse_einsum_input(operands)
size_dict = {}
for term, op in zip(deps.split(","), larrays):
for i, char in enumerate(term):
size_dict[char] = max(size_dict.get(char, 1), op.shape[i])
eq = deps + "->" + output
newshape = tuple(size_dict[char] for char in output)
backend = find_common_backend(*larrays)
newdtype = find_common_dtype(*larrays)
fn_einsum = get_lib_fn(backend, "einsum")
return LazyArray(
backend=backend,
fn=fn_einsum,
args=(eq, *larrays),
kwargs=None,
shape=newshape,
dtype=newdtype,
deps=tuple(x for x in larrays if isinstance(x, LazyArray)),
)
@lazy_cache("trace")
def trace(a):
a = ensure_lazy(a)
return a.to(fn=get_lib_fn(a.backend, "trace"), args=(a,), shape=(),)
@lazy_cache("matmul")
def matmul(x1, x2):
backend = find_common_backend(x1, x2)
newdtype = find_common_dtype(x1, x2)
newshape = (*x1.shape[:-1], *x2.shape[1:])
return LazyArray(
backend=backend,
fn=operator.matmul,
args=(x1, x2),
kwargs=None,
shape=newshape,
dtype=newdtype,
deps=tuple(x for x in (x1, x2) if isinstance(x, LazyArray)),
)
@lazy_cache("clip")
def clip(a, a_min, a_max):
a = ensure_lazy(a)
fn_clip = get_lib_fn(a.backend, "clip")
return a.to(fn_clip, (a, a_min, a_max))
@lazy_cache("flip")
def flip(a, axis=None):
a = ensure_lazy(a)
fn_flip = get_lib_fn(a.backend, "flip")
return a.to(fn_flip, (a, axis))
@lazy_cache("sort")
def sort(a, axis=-1):
a = ensure_lazy(a)
return a.to(get_lib_fn(a.backend, "sort"), (a, axis))
@lazy_cache("argsort")
def argsort(a, axis=-1):
a = ensure_lazy(a)
return a.to(
fn=get_lib_fn(a.backend, "argsort"), args=(a, axis), dtype="int",
)
@lazy_cache("stack")
def stack(arrays, axis=0):
arrays = tuple(arrays)
newdtype = find_common_dtype(*arrays)
newshape = list(arrays[0].shape)
newshape.insert(axis if axis >= 0 else axis + 1, len(arrays))
backend = find_common_backend(*arrays)
fn = get_lib_fn(backend, "stack")
return LazyArray(
backend=backend,
fn=fn,
args=(arrays, axis),
kwargs=None,
shape=tuple(newshape),
dtype=newdtype,
deps=tuple(x for x in arrays if isinstance(x, LazyArray)),
)
def make_binary_func(name, fn):
@lazy_cache(name)
def binary_func(x1, x2):
newdtype = find_common_dtype(x1, x2)
x1shape = getattr(x1, "shape", ())
x2shape = getattr(x2, "shape", ())
newshape = find_broadcast_shape(x1shape, x2shape)
return LazyArray(
backend=find_common_backend(x1, x2),
fn=fn,
args=(x1, x2),
kwargs=None,
shape=newshape,
dtype=newdtype,
deps=tuple(x for x in (x1, x2) if isinstance(x, LazyArray)),
)
return binary_func
multiply = make_binary_func("multiply", operator.mul)
add = make_binary_func("add", operator.add)
sub = make_binary_func("sub", operator.sub)
floordivide = make_binary_func("floordivide", operator.floordiv)
truedivide = make_binary_func("truedivide", operator.truediv)
pow_ = make_binary_func("pow", operator.pow)
def make_unary_func(name, to_real=False):
if to_real:
def get_newdtype(x):
return dtype_real_equiv(x.dtype)
else:
def get_newdtype(x):
return None
@lazy_cache(name)
def unary_func(x):
x = ensure_lazy(x)
newdtype = get_newdtype(x)
return x.to(fn=get_lib_fn(x.backend, name), args=(x,), dtype=newdtype,)
return unary_func
sin = make_unary_func("sin")
cos = make_unary_func("cos")
tan = make_unary_func("tan")
arcsin = make_unary_func("arcsin")
arccos = make_unary_func("arccos")
arctan = make_unary_func("arctan")
sinh = make_unary_func("sinh")
cosh = make_unary_func("cosh")
tanh = make_unary_func("tanh")
arcsinh = make_unary_func("arcsinh")
arccosh = make_unary_func("arccosh")
arctanh = make_unary_func("arctanh")
exp = make_unary_func("exp")
log = make_unary_func("log")
log2 = make_unary_func("log2")
log10 = make_unary_func("log10")
conj = make_unary_func("conj")
sign = make_unary_func("sign")
abs_ = make_unary_func("abs", to_real=True)
angle = make_unary_func("angle", to_real=True)
real = make_unary_func("real", to_real=True)
imag = make_unary_func("imag", to_real=True)
def make_reduction_func(name):
@lazy_cache(name)
def reduction_func(a, axis=None):
a = ensure_lazy(a)
fn = get_lib_fn(a.backend, name)
nd = a.ndim
if axis is None:
return a.to(fn=fn, args=(a,), shape=(),)
elif not hasattr(axis, "__len__"):
axis = (axis,)
axis = tuple(nd - i if i < 0 else i for i in axis)
newshape = tuple(d for i, d in enumerate(a.shape) if i not in axis)
return a.to(fn=fn, args=(a, axis), shape=newshape)
return reduction_func
sum_ = make_reduction_func("sum")
prod = make_reduction_func("prod")
min_ = make_reduction_func("min")
max_ = make_reduction_func("max")
# # XXX: still missing
# allclose, complex, diag
# dot, vdot, kron, inner, outer
# pad, eye
# squeeze, expand_dims
# to_numpy
# ---------------------------- autoray specials ----------------------------- #
def lazy_get_dtype_name(x):
return x.dtype
@lazy_cache("astype")
def lazy_astype(x, dtype_name):
x = ensure_lazy(x)
return x.to(fn=astype, args=(x, dtype_name), dtype=dtype_name,)
register_function("autoray.lazy", "get_dtype_name", lazy_get_dtype_name)
register_function("autoray.lazy", "astype", lazy_astype)
| [
"itertools.chain",
"networkx.draw_networkx_nodes",
"matplotlib.colors.to_rgb",
"numpy.arange",
"networkx.DiGraph",
"functools.wraps",
"numpy.max",
"matplotlib.pyplot.close",
"threading.get_ident",
"opt_einsum.parser.parse_einsum_input",
"functools.reduce",
"numpy.log2",
"matplotlib.pyplot.sh... | [((20196, 20365), 'collections.defaultdict', 'collections.defaultdict', (['(lambda : materialize_identity)', '{LazyArray: materialize_larray, tuple: materialize_tuple, list:\n materialize_list, dict: materialize_dict}'], {}), '(lambda : materialize_identity, {LazyArray:\n materialize_larray, tuple: materialize_tuple, list: materialize_list,\n dict: materialize_dict})\n', (20219, 20365), False, 'import collections\n'), ((21400, 21559), 'collections.defaultdict', 'collections.defaultdict', (['(lambda : stringify_identity)', '{LazyArray: stringify_larray, tuple: stringify_tuple, list: stringify_list,\n dict: stringify_dict}'], {}), '(lambda : stringify_identity, {LazyArray:\n stringify_larray, tuple: stringify_tuple, list: stringify_list, dict:\n stringify_dict})\n', (21423, 21559), False, 'import collections\n'), ((22172, 22201), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (22195, 22201), False, 'import collections\n'), ((24693, 24718), 'functools.lru_cache', 'functools.lru_cache', (['None'], {}), '(None)\n', (24712, 24718), False, 'import functools\n'), ((24814, 24839), 'functools.lru_cache', 'functools.lru_cache', (['None'], {}), '(None)\n', (24833, 24839), False, 'import functools\n'), ((24941, 24966), 'functools.lru_cache', 'functools.lru_cache', (['None'], {}), '(None)\n', (24960, 24966), False, 'import functools\n'), ((25771, 25796), 'functools.lru_cache', 'functools.lru_cache', (['(1024)'], {}), '(1024)\n', (25790, 25796), False, 'import functools\n'), ((27551, 27576), 'functools.lru_cache', 'functools.lru_cache', (['(1024)'], {}), '(1024)\n', (27570, 27576), False, 'import functools\n'), ((22639, 22660), 'threading.get_ident', 'threading.get_ident', ([], {}), '()\n', (22658, 22660), False, 'import threading\n'), ((30128, 30156), 'opt_einsum.parser.parse_einsum_input', 'parse_einsum_input', (['operands'], {}), '(operands)\n', (30146, 30156), False, 'from opt_einsum.parser import parse_einsum_input\n'), ((9296, 9367), 'functools.partial', 'functools.partial', (['_array_fn'], {'var_names': 'var_names', 'params': 'params', 'fn': 'fn'}), '(_array_fn, var_names=var_names, params=params, fn=fn)\n', (9313, 9367), False, 'import functools\n'), ((11858, 11875), 'numpy.arange', 'np.arange', (['y.size'], {}), '(y.size)\n', (11867, 11875), True, 'import numpy as np\n'), ((13582, 13594), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (13592, 13594), True, 'import networkx as nx\n'), ((15778, 15913), 'networkx.draw_networkx_edges', 'nx.draw_networkx_edges', (['G'], {'pos': 'pos', 'ax': 'ax', 'edge_color': 'draw_color', 'connectionstyle': 'connectionstyle', 'arrowsize': 'arrowsize', 'arrows': '(True)'}), '(G, pos=pos, ax=ax, edge_color=draw_color,\n connectionstyle=connectionstyle, arrowsize=arrowsize, arrows=True)\n', (15800, 15913), True, 'import networkx as nx\n'), ((16013, 16181), 'networkx.draw_networkx_nodes', 'nx.draw_networkx_nodes', (['G'], {'pos': 'pos', 'ax': 'ax', 'node_color': "[G.nodes[x]['color'] for x in G.nodes]", 'node_size': "[G.nodes[x]['size'] for x in G.nodes]", 'alpha': 'node_alpha'}), "(G, pos=pos, ax=ax, node_color=[G.nodes[x]['color'] for\n x in G.nodes], node_size=[G.nodes[x]['size'] for x in G.nodes], alpha=\n node_alpha)\n", (16035, 16181), True, 'import networkx as nx\n'), ((17341, 17386), 'functools.reduce', 'functools.reduce', (['operator.mul', 'self.shape', '(1)'], {}), '(operator.mul, self.shape, 1)\n', (17357, 17386), False, 'import functools\n'), ((22317, 22338), 'threading.get_ident', 'threading.get_ident', ([], {}), '()\n', (22336, 22338), False, 'import threading\n'), ((24151, 24170), 'functools.wraps', 'functools.wraps', (['fn'], {}), '(fn)\n', (24166, 24170), False, 'import functools\n'), ((25029, 25075), 'numpy.find_common_type', 'np.find_common_type', (['array_types', 'scalar_types'], {}), '(array_types, scalar_types)\n', (25048, 25075), True, 'import numpy as np\n'), ((10366, 10393), 'itertools.accumulate', 'itertools.accumulate', (['sizes'], {}), '(sizes)\n', (10386, 10393), False, 'import itertools\n'), ((11922, 11951), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (11934, 11951), True, 'import matplotlib.pyplot as plt\n'), ((12373, 12383), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12381, 12383), True, 'import matplotlib.pyplot as plt\n'), ((12396, 12410), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (12405, 12410), True, 'import matplotlib.pyplot as plt\n'), ((15132, 15186), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize', 'constrained_layout': '(True)'}), '(figsize=figsize, constrained_layout=True)\n', (15144, 15186), True, 'import matplotlib.pyplot as plt\n'), ((15675, 15738), 'networkx.layout.spring_layout', 'nx.layout.spring_layout', (['G'], {'pos': 'pos', 'k': 'k', 'iterations': 'iterations'}), '(G, pos=pos, k=k, iterations=iterations)\n', (15698, 15738), True, 'import networkx as nx\n'), ((16849, 16859), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16857, 16859), True, 'import matplotlib.pyplot as plt\n'), ((16872, 16886), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (16881, 16886), True, 'import matplotlib.pyplot as plt\n'), ((22482, 22503), 'threading.get_ident', 'threading.get_ident', ([], {}), '()\n', (22501, 22503), False, 'import threading\n'), ((11733, 11743), 'numpy.log2', 'np.log2', (['y'], {}), '(y)\n', (11740, 11743), True, 'import numpy as np\n'), ((11746, 11758), 'numpy.log2', 'np.log2', (['log'], {}), '(log)\n', (11753, 11758), True, 'import numpy as np\n'), ((12202, 12211), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (12208, 12211), True, 'import numpy as np\n'), ((12240, 12249), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (12246, 12249), True, 'import numpy as np\n'), ((22561, 22582), 'threading.get_ident', 'threading.get_ident', ([], {}), '()\n', (22580, 22582), False, 'import threading\n'), ((27798, 27828), 'itertools.chain', 'itertools.chain', (['before', 'after'], {}), '(before, after)\n', (27813, 27828), False, 'import itertools\n'), ((14710, 14750), 'matplotlib.colors.to_rgb', 'to_rgb', (["mpl.rcParams['figure.facecolor']"], {}), "(mpl.rcParams['figure.facecolor'])\n", (14716, 14750), False, 'from matplotlib.colors import to_rgb\n'), ((13318, 13336), 'numpy.log2', 'np.log2', (['node.size'], {}), '(node.size)\n', (13325, 13336), True, 'import numpy as np\n'), ((16611, 16651), 'matplotlib.colors.to_rgb', 'to_rgb', (["mpl.rcParams['figure.facecolor']"], {}), "(mpl.rcParams['figure.facecolor'])\n", (16617, 16651), False, 'from matplotlib.colors import to_rgb\n')] |
import cv2 as cv
import numpy as np
img = cv.imread('/home/praveen/Desktop/Python/Deep Learning/Open CV/Resources/Photos/cats.jpg')
cv.imshow('cats',img)
blank=np.zeros(img.shape,dtype='uint8')
cv.imshow('blank',blank)
gray=cv.cvtColor(img,cv.COLOR_BGR2GRAY)
cv.imshow('gray',gray)
blur = cv.GaussianBlur(gray,(5,5),cv.BORDER_DEFAULT)
cv.imshow("blur",blur)
canny =cv.Canny(blur,125,175)
cv.imshow("canny edge",canny)
ret,thresh=cv.threshold(gray,125,255,cv.THRESH_BINARY)
cv.imshow("thresh",thresh)
contours,hierchiess=cv.findContours(canny,cv.RETR_LIST,cv.CHAIN_APPROX_SIMPLE) #canny or thresh
print(len(contours))
cv.drawContours(blank,contours,-1,(0,0,255),2)
cv.imshow("contours drawn",blank)
cv.waitKey(0) | [
"cv2.drawContours",
"cv2.threshold",
"cv2.Canny",
"cv2.imshow",
"numpy.zeros",
"cv2.waitKey",
"cv2.cvtColor",
"cv2.findContours",
"cv2.GaussianBlur",
"cv2.imread"
] | [((43, 142), 'cv2.imread', 'cv.imread', (['"""/home/praveen/Desktop/Python/Deep Learning/Open CV/Resources/Photos/cats.jpg"""'], {}), "(\n '/home/praveen/Desktop/Python/Deep Learning/Open CV/Resources/Photos/cats.jpg'\n )\n", (52, 142), True, 'import cv2 as cv\n'), ((133, 155), 'cv2.imshow', 'cv.imshow', (['"""cats"""', 'img'], {}), "('cats', img)\n", (142, 155), True, 'import cv2 as cv\n'), ((162, 196), 'numpy.zeros', 'np.zeros', (['img.shape'], {'dtype': '"""uint8"""'}), "(img.shape, dtype='uint8')\n", (170, 196), True, 'import numpy as np\n'), ((196, 221), 'cv2.imshow', 'cv.imshow', (['"""blank"""', 'blank'], {}), "('blank', blank)\n", (205, 221), True, 'import cv2 as cv\n'), ((227, 262), 'cv2.cvtColor', 'cv.cvtColor', (['img', 'cv.COLOR_BGR2GRAY'], {}), '(img, cv.COLOR_BGR2GRAY)\n', (238, 262), True, 'import cv2 as cv\n'), ((262, 285), 'cv2.imshow', 'cv.imshow', (['"""gray"""', 'gray'], {}), "('gray', gray)\n", (271, 285), True, 'import cv2 as cv\n'), ((293, 341), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['gray', '(5, 5)', 'cv.BORDER_DEFAULT'], {}), '(gray, (5, 5), cv.BORDER_DEFAULT)\n', (308, 341), True, 'import cv2 as cv\n'), ((339, 362), 'cv2.imshow', 'cv.imshow', (['"""blur"""', 'blur'], {}), "('blur', blur)\n", (348, 362), True, 'import cv2 as cv\n'), ((370, 394), 'cv2.Canny', 'cv.Canny', (['blur', '(125)', '(175)'], {}), '(blur, 125, 175)\n', (378, 394), True, 'import cv2 as cv\n'), ((393, 423), 'cv2.imshow', 'cv.imshow', (['"""canny edge"""', 'canny'], {}), "('canny edge', canny)\n", (402, 423), True, 'import cv2 as cv\n'), ((435, 481), 'cv2.threshold', 'cv.threshold', (['gray', '(125)', '(255)', 'cv.THRESH_BINARY'], {}), '(gray, 125, 255, cv.THRESH_BINARY)\n', (447, 481), True, 'import cv2 as cv\n'), ((479, 506), 'cv2.imshow', 'cv.imshow', (['"""thresh"""', 'thresh'], {}), "('thresh', thresh)\n", (488, 506), True, 'import cv2 as cv\n'), ((527, 587), 'cv2.findContours', 'cv.findContours', (['canny', 'cv.RETR_LIST', 'cv.CHAIN_APPROX_SIMPLE'], {}), '(canny, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)\n', (542, 587), True, 'import cv2 as cv\n'), ((625, 677), 'cv2.drawContours', 'cv.drawContours', (['blank', 'contours', '(-1)', '(0, 0, 255)', '(2)'], {}), '(blank, contours, -1, (0, 0, 255), 2)\n', (640, 677), True, 'import cv2 as cv\n'), ((672, 706), 'cv2.imshow', 'cv.imshow', (['"""contours drawn"""', 'blank'], {}), "('contours drawn', blank)\n", (681, 706), True, 'import cv2 as cv\n'), ((708, 721), 'cv2.waitKey', 'cv.waitKey', (['(0)'], {}), '(0)\n', (718, 721), True, 'import cv2 as cv\n')] |
# ---
# jupyter:
# jupytext:
# cell_markers: region,endregion
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <center>Data Mining Project 1 Spring semester 2018-2019</center>
# ## <center><NAME></center>
# ## <center><NAME></center>
# ___
# ### Do all the necessary imports for this notebook
# region
# for wordclouds
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
import pandas as pd
from wordcloud import WordCloud
from IPython.display import Image
from PIL import Image as imgWordcloud
import numpy as np
# for preprocessing
import re
from string import punctuation
from nltk.stem import StemmerI, RegexpStemmer, LancasterStemmer, ISRIStemmer, PorterStemmer, SnowballStemmer, RSLPStemmer
from nltk import word_tokenize
from nltk.corpus import stopwords as nltkStopwords
# for classification
from sklearn import svm, preprocessing
from sklearn.metrics import classification_report, accuracy_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
import gensim
from gensim.models import Word2Vec
import random
from operator import add
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
# endregion
# ## __Data Analysis__
# - ### *Wordclouds*
# region
# read train data
trainData = pd.read_csv('../twitter_data/train2017.tsv', sep='\t+', escapechar="\\",
engine='python', names=['ID_1', 'ID_2', 'Label', 'Text'])
# make stop words
stopWords = ENGLISH_STOP_WORDS
# endregion
# - #### Wordcloud for all tweets
# region
# make a text of all tweets
wholeText = ''
for tweetText in trainData['Text']:
wholeText = wholeText + ' ' + tweetText
wc = WordCloud(width=600, height=600, background_color='white', stopwords=stopWords)
wc.generate(wholeText)
wc.to_file('wholeTextWordcloud.png')
Image('wholeTextWordcloud.png')
# endregion
# As we see in the above wordcloud there are many useless words like "http" so let's do some preprocessing.
# region
def replaceEmojis(text):
"""Turn emojis into smilePositive and smileNegative to reduce noise."""
processedText = text.replace('0:-)', 'smilePositive')
processedText = processedText.replace(':)', 'smilePositive')
processedText = processedText.replace(':D', 'smilePositive')
processedText = processedText.replace(':*', 'smilePositive')
processedText = processedText.replace(':o', 'smilePositive')
processedText = processedText.replace(':p', 'smilePositive')
processedText = processedText.replace(';)', 'smilePositive')
processedText = processedText.replace('>:(', 'smileNegative')
processedText = processedText.replace(';(', 'smileNegative')
processedText = processedText.replace('>:)', 'smileNegative')
processedText = processedText.replace('d:<', 'smileNegative')
processedText = processedText.replace(':(', 'smileNegative')
processedText = processedText.replace(':|', 'smileNegative')
processedText = processedText.replace('>:/', 'smileNegative')
return processedText
def preprocessText(initText):
"""Preprocess the text"""
# Replace these characters as we saw in the first wordcloud that are not useful in this form
processedText = initText.replace("\\u002c", ',')
processedText = processedText.replace("\\u2019", '\'')
# Make everything to lower case
processedText = processedText.lower()
# Remove urls
processedText = re.sub(r'(http:\/\/www\.|https:\/\/www\.|http:\/\/|https:\/\/)?[a-z0-9]+([\-\.]{1}[a-z0-9]+)'
r'*\.[a-z]{2,5}(:[0-9]{1,5})?(\/.*)?', ' ', processedText)
# Remove hashtags
processedText = re.sub(r"\B#\w\w+", ' ', processedText)
# Remove references
processedText = re.sub(r"\B@\w\w+", ' ', processedText)
# Replace emojis with tags
processedText = replaceEmojis(processedText)
# Replace hahas
processedText = re.sub('hahaha+', ' ', processedText)
processedText = re.sub('haha+', ' ', processedText)
# Remove any punctuation from the text
for c in punctuation:
processedText = processedText.replace(c, ' ')
# Remove consecutive spaces
processedText = re.sub(r" {2,}", ' ', processedText)
# Split to words
tokens = word_tokenize(processedText)
# Remove sropwords
filtered = [w for w in tokens if w not in stopWords]
# Concat the remaining words in a single string again
if not filtered: # list is empty
processedText = ''
else:
processedText = filtered[0]
for word in filtered[1:]:
processedText = processedText + ' ' + word
return processedText
def stemmingPreprocess(initText):
# Split to words
tokens = word_tokenize(initText)
# Do the stemming
stemmer = PorterStemmer()
stems = [stemmer.stem(token) for token in tokens]
# Concat the remaining words in a single string again
if not stems: # list is empty
processedText = ''
else:
processedText = stems[0]
for stem in stems[1:]:
processedText = processedText + ' ' + stem
return processedText
# endregion
# Below we conclude on what our stopwords are and then preprocess the text.
# In the first wordcloud we saw many words that occur multiple times and don't seem to add anything regarding the sentiment so we will remove them.
# region
# In the first wordcloud we see that there are many words that are not included in stop words
# So let's add our stop words
myAdditionalStopWords = ['tomorrow', 'today', 'day', 'tonight', 'sunday',
'monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'week', 'just', 'going', 'time','say','said']
stopWords = (stopWords.union(myAdditionalStopWords)).union(nltkStopwords.words('english'))
for index, row in trainData.iterrows():
initialText = row["Text"]
trainData.loc[index, "Text"] = preprocessText(initialText)
# endregion
# Let's make again a wordcloud for the text of all tweets
# region
# make a text of all tweets
wholeText = ''
for tweetText in trainData['Text']:
wholeText = wholeText + ' ' + tweetText
generalMask = np.array(imgWordcloud.open("generalMask.png"))
wc = WordCloud(background_color="white", mask=generalMask, max_words=100,
stopwords=stopWords, contour_width=3, contour_color='steelblue')
# generate word cloud
wc.generate(wholeText)
# store to file
wc.to_file('wholeTextCleanWordcloud.png')
Image('wholeTextCleanWordcloud.png')
# endregion
# #### Make content for each category of all tweets
# For each category we make a dictionary that maps the category to a string that contains all the tweets for the respective category as one.
# region
tweetCategories = list(set(trainData['Label']))
# make a dictionary of form {category:contentString}
contentDict = {category: '' for category in tweetCategories}
# fill the content of each category
for (content, category) in zip(trainData['Text'], trainData['Label']):
contentDict[category] = contentDict[category] + ' ' + content
# endregion
# - #### Wordcloud for positive tweets
# region
positiveMask = np.array(imgWordcloud.open("positiveMask.png"))
wc = WordCloud(background_color="white", mask=positiveMask, max_words=100,
stopwords=stopWords, contour_width=3, contour_color='steelblue')
# generate word cloud
wc.generate(contentDict['positive'])
# store to file
wc.to_file('positiveWordcloud.png')
Image('positiveWordcloud.png')
# endregion
# - #### Wordcloud for negative tweets
# region
negativeMask = np.array(imgWordcloud.open("negativeMask.png"))
wc = WordCloud(background_color="white", mask=negativeMask, max_words=100,
stopwords=stopWords, contour_width=3, contour_color='steelblue')
# generate word cloud
wc.generate(contentDict['negative'])
# store to file
wc.to_file('negativeWordcloud.png')
Image('negativeWordcloud.png')
# endregion
# - #### Wordcloud for neutral tweets
# region
neutralMask = np.array(imgWordcloud.open("neutralMask.png"))
wc = WordCloud(background_color="white", mask=neutralMask, max_words=100,
stopwords=stopWords, contour_width=3, contour_color='steelblue')
# generate word cloud
wc.generate(contentDict['neutral'])
# store to file
wc.to_file('neutralWordcloud.png')
Image('neutralWordcloud.png')
# endregion
# ___
# ## __Classification__
# Below we will proceed to the classification of the text. For that purpose we will use 2 classifiers, kNN and SVM, and 3 different ways of vectorization, bag-of-words, TF-IDF and word embeddings. We provide two different ways in order to produce the word embeddings, a model we trained ourselves and the use of pretrained word embeddings. Here we use only the pretrained embeddings as after experimentation we concluded that these provide better results
# - #### Classification using SVM classifier
def SvmClassification(trainX, trainY, testX, testY, labelEncoder):
"""
Classify the text using the SVM classifier of scikit-learn
"""
clf = svm.SVC(kernel='linear', C=1, probability=True)
# fit train set
clf.fit(trainX, trainY)
# Predict test set
predY = clf.predict(testX)
# Classification_report
print(classification_report(testY, predY, target_names=list(labelEncoder.classes_)))
return accuracy_score(testY, predY)
# - #### Classification using KNN classifier
def KnnClassification(trainX, trainY, testX, testY, labelEncoder):
"""
Classify the text using the kNN classifier of scikit-learn
"""
knn = KNeighborsClassifier(n_neighbors=5)
# fit train set
knn.fit(trainX, trainY)
# Predict test set
predY = knn.predict(testX)
# Classification_report
print(classification_report(testY, predY, target_names=list(labelEncoder.classes_)))
return accuracy_score(testY, predY)
# Prepare train and test data that we will need below
# region
# read test data
testData = pd.read_csv('../twitter_data/test2017.tsv', sep='\t', names=['ID_1', 'ID_2', 'Label', 'Text'])
# preprocess test data
for index, row in testData.iterrows():
initialText = row["Text"]
testData.loc[index, "Text"] = preprocessText(initialText)
# read test results
testResults = pd.read_csv('../twitter_data/SemEval2017_task4_subtaskA_test_english_gold.txt',
sep='\t', names=['ID', 'Label'])
# Build label encoder for categories
le = preprocessing.LabelEncoder()
le.fit(trainData["Label"])
# Transform categories into numbers
trainY = le.transform(trainData["Label"])
testY = le.transform(testResults["Label"])
accuracyDict = dict()
trainNotStemmed = trainData
testNotStemmed = testData
# Let's do stemming
for index, row in trainData.iterrows():
initialText = row["Text"]
trainData.loc[index, "Text"] = stemmingPreprocess(initialText)
for index, row in testData.iterrows():
initialText = row["Text"]
testData.loc[index, "Text"] = stemmingPreprocess(initialText)
# endregion
# ## __Vectorization__
# Let's do classification using 3 different ways of vectorization
# - #### Bag-of-words vectorization
# region
bowVectorizer = CountVectorizer(max_features=3000)
trainX = bowVectorizer.fit_transform(trainData['Text'])
testX = bowVectorizer.transform(testData['Text'])
print('-------------SVM Classification Report with BOW Vectorization-------------')
accuracyDict["BOW-SVM"] = SvmClassification(trainX, trainY, testX, testY, le)
print('-------------KNN Classification Report with BOW Vectorization-------------')
accuracyDict["BOW-KNN"] = KnnClassification(trainX, trainY, testX, testY, le)
# endregion
# - #### Tf-idf vectorization
# region
tfIdfVectorizer = TfidfVectorizer(max_features=3000)
trainX = tfIdfVectorizer.fit_transform(trainData['Text'])
testX = tfIdfVectorizer.transform(testData['Text'])
print('-------------SVM Classification Report with TfIdf Vectorization-------------')
accuracyDict["TfIdf-SVM"] = SvmClassification(trainX, trainY, testX, testY, le)
print('-------------KNN Classification Report with TfIdf Vectorization-------------')
accuracyDict["TfIdf-KNN"] = KnnClassification(trainX, trainY, testX, testY, le)
# endregion
# - #### Word embeddings vectorization
# Train the word embeddings model and save it. If the model is already trained and saved then you only have to load
# it as shown in the next cell
# region
# Word embeddings
tokenize = lambda x: x.split()
tokens = [word_tokenize(row["Text"]) for index, row in trainData.iterrows()] # tokenizing
vec_size = 200
model_w2v = gensim.models.Word2Vec(
tokens,
size=200, # desired no. of features/independent variables
window=5, # context window size
min_count=2,
sg=1, # 1 for skip-gram model
hs=0,
negative=10, # for negative sampling
workers=2, # no.of cores
seed=34)
model_w2v.train(tokens, total_examples=trainData.shape[0], epochs=20)
model_w2v.save("word2vec.model")
# endregion
# Load the word embeddings model
model_w2v = Word2Vec.load("word2vec.model")
# Read pre-trained Word Embeddings
# region
embeddings_dict = {}
# f = open("datastories.twitter.50d.txt", "r", encoding="utf-8")
# vec_size = 50
# f = open("datastories.twitter.200d.txt", "r", encoding="utf-8")
# vec_size = 200
f = open("datastories.twitter.300d.txt", "r", encoding="utf-8")
vec_size = 300
for i, line in enumerate(f):
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_dict[word] = coefs
# endregion
# Let's make a tsne plot for pre-trained word embeddings
# region
labels = []
tokens = []
maxWords = 200
counter = 0
for word, value in embeddings_dict.items():
tokens.append(value)
labels.append(word)
counter += 1
if(counter == maxWords):
break
tsne_model = TSNE(perplexity=40, n_components=2, init='pca', n_iter=2500, random_state=23)
new_values = tsne_model.fit_transform(tokens)
x = []
y = []
for value in new_values:
x.append(value[0])
y.append(value[1])
plt.figure(figsize=(16, 16))
for i in range(len(x)):
plt.scatter(x[i],y[i])
plt.annotate(labels[i],
xy=(x[i], y[i]),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.show()
# endregion
# Use the following function to vectorize the data using the word embeddings vectorizer
# region
def sample_floats(low=-1.0, high=1.0, k=1):
"""
Return a k-length list of unique random floats in the range of low <= x <= high
"""
result = []
seen = set()
for i in range(k):
x = random.uniform(low, high)
while x in seen:
x = random.uniform(low, high)
seen.add(x)
result.append(x)
return result
def wordEmbeddingsVectorizer(data):
"""
Vectorize the data based on the model we trained ourselves.
"""
text_vec = []
for index, row in data.iterrows():
text = row["Text"]
text_len = len(text)
if text_len == 0:
tweet_vec = sample_floats(-5.0, 5.0, vec_size)
text_vec.append(tweet_vec)
continue
tokens = word_tokenize(text)
if tokens[0] in model_w2v.wv.vocab:
tweet_vec = model_w2v.wv[tokens[0]]
else:
tweet_vec = sample_floats(-5.0, 5.0, vec_size)
for token in tokens[1:]:
if token in model_w2v.wv.vocab:
tweet_vec = list(map(add, tweet_vec, model_w2v.wv[token]))
else:
tweet_vec = list(map(add, tweet_vec, sample_floats(-5.0, 5.0, vec_size)))
final_tweet_vec = [i / text_len for i in tweet_vec]
text_vec.append(final_tweet_vec)
return np.array(text_vec)
def wordEmbeddingsPreTrainedVectorizer(data):
"""
Vectorize the data based on a pretrained model we downloaded.
"""
text_vec = []
for index, row in data.iterrows():
text = row["Text"]
text_len = len(text)
# If the text is empty make a random vector
if text_len == 0:
tweet_vec = sample_floats(-3.0, 3.0, vec_size)
text_vec.append(tweet_vec)
continue
tokens = word_tokenize(text)
if tokens[0] in embeddings_dict:
tweet_vec = embeddings_dict[tokens[0]]
else:
tweet_vec = sample_floats(-3.0, 3.0, vec_size) # make a random vector if the word is not in the model
for token in tokens[1:]:
if token in embeddings_dict:
tweet_vec = list(map(add, tweet_vec, embeddings_dict[token]))
else:
# make a random vector if the word is not in the model
tweet_vec = list(map(add, tweet_vec, sample_floats(-3.0, 3.0, vec_size)))
final_tweet_vec = [i / text_len for i in tweet_vec]
text_vec.append(final_tweet_vec)
return np.array(text_vec)
# endregion
# Read the lexica
def readDictionary(fileName):
dictFile = open(fileName, "r")
dictionary = dict()
for line in dictFile:
words = line.split()
text = ' '.join(words[:-1])
dictionary[text] = float(words[-1])
return dictionary
# For every tweet calculate the values of each dictionary and append them as extra features in the feature vector
def getDictValues(data, vector):
extra_feats = []
for index, row in data.iterrows():
text = row["Text"]
affinScore = 0.0
emotweetScore = 0.0
genericScore = 0.0
nrcScore = 0.0
nrctagScore = 0.0
# Empty rows are not considered strings if read from a csv.
if not isinstance(text, str):
l = [affinScore, emotweetScore, genericScore, nrcScore, nrctagScore]
extra_feats.append(l)
continue
text_len = len(text)
# If the tweet is empty after preprocessing add zeroes
if text_len == 0:
l = [affinScore, emotweetScore, genericScore, nrcScore, nrctagScore]
extra_feats.append(l)
continue
tokens = word_tokenize(text)
# If the tweet is empty after preprocessing add zeroes
if tokens == []:
extra_feats.append(l)
continue
text_len = len(tokens)
# Calculate the sum of the values of each dict
for token in tokens:
if token in affinDict:
affinScore += affinDict[token]
if token in emotweetDict:
emotweetScore += emotweetDict[token]
if token in genericDict:
genericScore += genericDict[token]
if token in nrcDict:
nrcScore += nrcDict[token]
if token in nrctagDict:
nrctagScore += nrctagDict[token]
# Get the average values for each text
affinScore /= text_len
emotweetScore /= text_len
genericScore /= text_len
nrcScore /= text_len
nrctagScore /= text_len
l = [affinScore, emotweetScore, genericScore, nrcScore, nrctagScore]
extra_feats.append(l)
return np.append(vector, np.array(extra_feats), axis=1)
# Read the dictionary files and store them in python dictionaries
affinDict = readDictionary("../lexica/affin/affin.txt")
emotweetDict = readDictionary("../lexica/emotweet/valence_tweet.txt")
genericDict = readDictionary("../lexica/generic/generic.txt")
nrcDict = readDictionary("../lexica/nrc/val.txt")
nrctagDict = readDictionary("../lexica/nrctag/val.txt")
# Vectorize the content using word embeddings vectorizer. Then add some extra features using the dictionary files
# region
# these are for our trained word embeddings
# trainX = wordEmbeddingsVectorizer(trainData)
# testX = wordEmbeddingsVectorizer(testData)
# these are for pre-trained wordEmbeddings
trainX = wordEmbeddingsPreTrainedVectorizer(trainNotStemmed)
testX = wordEmbeddingsPreTrainedVectorizer(testNotStemmed)
print('-------------SVM Classification Report with Word Embeddings Vectorization without lexica-------------')
accuracyDict["WordEmbed-SVM-without-lexica"] = SvmClassification(trainX, trainY, testX, testY, le)
print('-------------KNN Classification Report with Word Embeddings Vectorization without lexica-------------')
accuracyDict["WordEmbed-KNN-without-lexica"] = KnnClassification(trainX, trainY, testX, testY, le)
# endregion
# Let's try word embeddings with lexica
# region
trainX = getDictValues(trainNotStemmed, trainX)
testX = getDictValues(testNotStemmed, testX)
print('-------------SVM Classification Report with Word Embeddings Vectorization with lexica-------------')
accuracyDict["WordEmbed-SVM-with-lexica"] = SvmClassification(trainX, trainY, testX, testY, le)
print('-------------KNN Classification Report with Word Embeddings Vectorization with lexica-------------')
accuracyDict["WordEmbed-KNN-with-lexica"] = KnnClassification(trainX, trainY, testX, testY, le)
# endregion
# ## __Final Results__
# region
resultsData = {r'Vectorizer \ Classifier': ['BOW', 'Tfidf', 'Word Embeddings without lexica',
'Word Embeddings with lexica'],
'KNN': [accuracyDict["BOW-KNN"], accuracyDict["TfIdf-KNN"], accuracyDict["WordEmbed-KNN-without-lexica"],
accuracyDict["WordEmbed-KNN-with-lexica"]],
'SVM': [accuracyDict["BOW-SVM"], accuracyDict["TfIdf-SVM"], accuracyDict["WordEmbed-SVM-without-lexica"],
accuracyDict["WordEmbed-KNN-with-lexica"]]}
resultsDataFrame = pd.DataFrame(data=resultsData)
resultsDataFrame
# endregion
# **Comments and remarks**
# - After various experimentations regarding the max_features parameter of bow and tf-idf we concluded that for the value 3000, the results are similar or better than other values we tried or the default value, both for the SVM and kNN classifiers.
# - We observe that the SVM classifier performs better than kNN
# - Moreover, after experimentations on preproccessing we noticed that if we stemmed our data we have better accuracy for both BOW and TF-IDF opposing to not doing stemming
# - In vectorization with word embeddings we decided not to use stemming because we used the lexica, in whihc the words are in their normal form
# - We tried making our own word embeddings model as well as a pretrained one. Eventually we saw that the preptrained model has better results and as such we used this.
# - We observed that the minimun value in the pretrained word embeddings model with 200 dimensions is -3.224762 and the maximum is 5.65153, so we decided to create our random vectors with values between -3 amd 3
# - In order to rerun the notebook from the beggining you will have to change the file paths on your own if you plan to use different files
# - Keep in mind that when reruning from the beggining it miht take some time for the vectorizations to finish
#
| [
"sklearn.preprocessing.LabelEncoder",
"pandas.read_csv",
"sklearn.neighbors.KNeighborsClassifier",
"matplotlib.pyplot.annotate",
"numpy.array",
"nltk.corpus.stopwords.words",
"sklearn.feature_extraction.text.CountVectorizer",
"gensim.models.Word2Vec.load",
"IPython.display.Image",
"numpy.asarray",... | [((1526, 1660), 'pandas.read_csv', 'pd.read_csv', (['"""../twitter_data/train2017.tsv"""'], {'sep': '"""\t+"""', 'escapechar': '"""\\\\"""', 'engine': '"""python"""', 'names': "['ID_1', 'ID_2', 'Label', 'Text']"}), "('../twitter_data/train2017.tsv', sep='\\t+', escapechar='\\\\',\n engine='python', names=['ID_1', 'ID_2', 'Label', 'Text'])\n", (1537, 1660), True, 'import pandas as pd\n'), ((1919, 1998), 'wordcloud.WordCloud', 'WordCloud', ([], {'width': '(600)', 'height': '(600)', 'background_color': '"""white"""', 'stopwords': 'stopWords'}), "(width=600, height=600, background_color='white', stopwords=stopWords)\n", (1928, 1998), False, 'from wordcloud import WordCloud\n'), ((2061, 2092), 'IPython.display.Image', 'Image', (['"""wholeTextWordcloud.png"""'], {}), "('wholeTextWordcloud.png')\n", (2066, 2092), False, 'from IPython.display import Image\n'), ((6467, 6604), 'wordcloud.WordCloud', 'WordCloud', ([], {'background_color': '"""white"""', 'mask': 'generalMask', 'max_words': '(100)', 'stopwords': 'stopWords', 'contour_width': '(3)', 'contour_color': '"""steelblue"""'}), "(background_color='white', mask=generalMask, max_words=100,\n stopwords=stopWords, contour_width=3, contour_color='steelblue')\n", (6476, 6604), False, 'from wordcloud import WordCloud\n'), ((6722, 6758), 'IPython.display.Image', 'Image', (['"""wholeTextCleanWordcloud.png"""'], {}), "('wholeTextCleanWordcloud.png')\n", (6727, 6758), False, 'from IPython.display import Image\n'), ((7446, 7584), 'wordcloud.WordCloud', 'WordCloud', ([], {'background_color': '"""white"""', 'mask': 'positiveMask', 'max_words': '(100)', 'stopwords': 'stopWords', 'contour_width': '(3)', 'contour_color': '"""steelblue"""'}), "(background_color='white', mask=positiveMask, max_words=100,\n stopwords=stopWords, contour_width=3, contour_color='steelblue')\n", (7455, 7584), False, 'from wordcloud import WordCloud\n'), ((7710, 7740), 'IPython.display.Image', 'Image', (['"""positiveWordcloud.png"""'], {}), "('positiveWordcloud.png')\n", (7715, 7740), False, 'from IPython.display import Image\n'), ((7874, 8012), 'wordcloud.WordCloud', 'WordCloud', ([], {'background_color': '"""white"""', 'mask': 'negativeMask', 'max_words': '(100)', 'stopwords': 'stopWords', 'contour_width': '(3)', 'contour_color': '"""steelblue"""'}), "(background_color='white', mask=negativeMask, max_words=100,\n stopwords=stopWords, contour_width=3, contour_color='steelblue')\n", (7883, 8012), False, 'from wordcloud import WordCloud\n'), ((8138, 8168), 'IPython.display.Image', 'Image', (['"""negativeWordcloud.png"""'], {}), "('negativeWordcloud.png')\n", (8143, 8168), False, 'from IPython.display import Image\n'), ((8299, 8436), 'wordcloud.WordCloud', 'WordCloud', ([], {'background_color': '"""white"""', 'mask': 'neutralMask', 'max_words': '(100)', 'stopwords': 'stopWords', 'contour_width': '(3)', 'contour_color': '"""steelblue"""'}), "(background_color='white', mask=neutralMask, max_words=100,\n stopwords=stopWords, contour_width=3, contour_color='steelblue')\n", (8308, 8436), False, 'from wordcloud import WordCloud\n'), ((8560, 8589), 'IPython.display.Image', 'Image', (['"""neutralWordcloud.png"""'], {}), "('neutralWordcloud.png')\n", (8565, 8589), False, 'from IPython.display import Image\n'), ((10223, 10321), 'pandas.read_csv', 'pd.read_csv', (['"""../twitter_data/test2017.tsv"""'], {'sep': '"""\t"""', 'names': "['ID_1', 'ID_2', 'Label', 'Text']"}), "('../twitter_data/test2017.tsv', sep='\\t', names=['ID_1', 'ID_2',\n 'Label', 'Text'])\n", (10234, 10321), True, 'import pandas as pd\n'), ((10508, 10624), 'pandas.read_csv', 'pd.read_csv', (['"""../twitter_data/SemEval2017_task4_subtaskA_test_english_gold.txt"""'], {'sep': '"""\t"""', 'names': "['ID', 'Label']"}), "('../twitter_data/SemEval2017_task4_subtaskA_test_english_gold.txt',\n sep='\\t', names=['ID', 'Label'])\n", (10519, 10624), True, 'import pandas as pd\n'), ((10690, 10718), 'sklearn.preprocessing.LabelEncoder', 'preprocessing.LabelEncoder', ([], {}), '()\n', (10716, 10718), False, 'from sklearn import svm, preprocessing\n'), ((11408, 11442), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'max_features': '(3000)'}), '(max_features=3000)\n', (11423, 11442), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\n'), ((11949, 11983), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'max_features': '(3000)'}), '(max_features=3000)\n', (11964, 11983), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\n'), ((12809, 12921), 'gensim.models.Word2Vec', 'gensim.models.Word2Vec', (['tokens'], {'size': '(200)', 'window': '(5)', 'min_count': '(2)', 'sg': '(1)', 'hs': '(0)', 'negative': '(10)', 'workers': '(2)', 'seed': '(34)'}), '(tokens, size=200, window=5, min_count=2, sg=1, hs=0,\n negative=10, workers=2, seed=34)\n', (12831, 12921), False, 'import gensim\n'), ((13328, 13359), 'gensim.models.Word2Vec.load', 'Word2Vec.load', (['"""word2vec.model"""'], {}), "('word2vec.model')\n", (13341, 13359), False, 'from gensim.models import Word2Vec\n'), ((14137, 14214), 'sklearn.manifold.TSNE', 'TSNE', ([], {'perplexity': '(40)', 'n_components': '(2)', 'init': '"""pca"""', 'n_iter': '(2500)', 'random_state': '(23)'}), "(perplexity=40, n_components=2, init='pca', n_iter=2500, random_state=23)\n", (14141, 14214), False, 'from sklearn.manifold import TSNE\n'), ((14349, 14377), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 16)'}), '(figsize=(16, 16))\n', (14359, 14377), True, 'import matplotlib.pyplot as plt\n'), ((14629, 14639), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14637, 14639), True, 'import matplotlib.pyplot as plt\n'), ((21929, 21959), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'resultsData'}), '(data=resultsData)\n', (21941, 21959), True, 'import pandas as pd\n'), ((3659, 3830), 're.sub', 're.sub', (['"""(http:\\\\/\\\\/www\\\\.|https:\\\\/\\\\/www\\\\.|http:\\\\/\\\\/|https:\\\\/\\\\/)?[a-z0-9]+([\\\\-\\\\.]{1}[a-z0-9]+)*\\\\.[a-z]{2,5}(:[0-9]{1,5})?(\\\\/.*)?"""', '""" """', 'processedText'], {}), "(\n '(http:\\\\/\\\\/www\\\\.|https:\\\\/\\\\/www\\\\.|http:\\\\/\\\\/|https:\\\\/\\\\/)?[a-z0-9]+([\\\\-\\\\.]{1}[a-z0-9]+)*\\\\.[a-z]{2,5}(:[0-9]{1,5})?(\\\\/.*)?'\n , ' ', processedText)\n", (3665, 3830), False, 'import re\n'), ((3882, 3923), 're.sub', 're.sub', (['"""\\\\B#\\\\w\\\\w+"""', '""" """', 'processedText'], {}), "('\\\\B#\\\\w\\\\w+', ' ', processedText)\n", (3888, 3923), False, 'import re\n'), ((3967, 4008), 're.sub', 're.sub', (['"""\\\\B@\\\\w\\\\w+"""', '""" """', 'processedText'], {}), "('\\\\B@\\\\w\\\\w+', ' ', processedText)\n", (3973, 4008), False, 'import re\n'), ((4129, 4166), 're.sub', 're.sub', (['"""hahaha+"""', '""" """', 'processedText'], {}), "('hahaha+', ' ', processedText)\n", (4135, 4166), False, 'import re\n'), ((4187, 4222), 're.sub', 're.sub', (['"""haha+"""', '""" """', 'processedText'], {}), "('haha+', ' ', processedText)\n", (4193, 4222), False, 'import re\n'), ((4400, 4435), 're.sub', 're.sub', (['""" {2,}"""', '""" """', 'processedText'], {}), "(' {2,}', ' ', processedText)\n", (4406, 4435), False, 'import re\n'), ((4480, 4508), 'nltk.word_tokenize', 'word_tokenize', (['processedText'], {}), '(processedText)\n', (4493, 4508), False, 'from nltk import word_tokenize\n'), ((4943, 4966), 'nltk.word_tokenize', 'word_tokenize', (['initText'], {}), '(initText)\n', (4956, 4966), False, 'from nltk import word_tokenize\n'), ((5008, 5023), 'nltk.stem.PorterStemmer', 'PorterStemmer', ([], {}), '()\n', (5021, 5023), False, 'from nltk.stem import StemmerI, RegexpStemmer, LancasterStemmer, ISRIStemmer, PorterStemmer, SnowballStemmer, RSLPStemmer\n'), ((6029, 6059), 'nltk.corpus.stopwords.words', 'nltkStopwords.words', (['"""english"""'], {}), "('english')\n", (6048, 6059), True, 'from nltk.corpus import stopwords as nltkStopwords\n'), ((6423, 6459), 'PIL.Image.open', 'imgWordcloud.open', (['"""generalMask.png"""'], {}), "('generalMask.png')\n", (6440, 6459), True, 'from PIL import Image as imgWordcloud\n'), ((7401, 7438), 'PIL.Image.open', 'imgWordcloud.open', (['"""positiveMask.png"""'], {}), "('positiveMask.png')\n", (7418, 7438), True, 'from PIL import Image as imgWordcloud\n'), ((7829, 7866), 'PIL.Image.open', 'imgWordcloud.open', (['"""negativeMask.png"""'], {}), "('negativeMask.png')\n", (7846, 7866), True, 'from PIL import Image as imgWordcloud\n'), ((8255, 8291), 'PIL.Image.open', 'imgWordcloud.open', (['"""neutralMask.png"""'], {}), "('neutralMask.png')\n", (8272, 8291), True, 'from PIL import Image as imgWordcloud\n'), ((9305, 9352), 'sklearn.svm.SVC', 'svm.SVC', ([], {'kernel': '"""linear"""', 'C': '(1)', 'probability': '(True)'}), "(kernel='linear', C=1, probability=True)\n", (9312, 9352), False, 'from sklearn import svm, preprocessing\n'), ((9591, 9619), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['testY', 'predY'], {}), '(testY, predY)\n', (9605, 9619), False, 'from sklearn.metrics import classification_report, accuracy_score\n'), ((9830, 9865), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(5)'}), '(n_neighbors=5)\n', (9850, 9865), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((10100, 10128), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['testY', 'predY'], {}), '(testY, predY)\n', (10114, 10128), False, 'from sklearn.metrics import classification_report, accuracy_score\n'), ((12700, 12726), 'nltk.word_tokenize', 'word_tokenize', (["row['Text']"], {}), "(row['Text'])\n", (12713, 12726), False, 'from nltk import word_tokenize\n'), ((13762, 13801), 'numpy.asarray', 'np.asarray', (['values[1:]'], {'dtype': '"""float32"""'}), "(values[1:], dtype='float32')\n", (13772, 13801), True, 'import numpy as np\n'), ((14406, 14429), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x[i]', 'y[i]'], {}), '(x[i], y[i])\n', (14417, 14429), True, 'import matplotlib.pyplot as plt\n'), ((14433, 14546), 'matplotlib.pyplot.annotate', 'plt.annotate', (['labels[i]'], {'xy': '(x[i], y[i])', 'xytext': '(5, 2)', 'textcoords': '"""offset points"""', 'ha': '"""right"""', 'va': '"""bottom"""'}), "(labels[i], xy=(x[i], y[i]), xytext=(5, 2), textcoords=\n 'offset points', ha='right', va='bottom')\n", (14445, 14546), True, 'import matplotlib.pyplot as plt\n'), ((16071, 16089), 'numpy.array', 'np.array', (['text_vec'], {}), '(text_vec)\n', (16079, 16089), True, 'import numpy as np\n'), ((17240, 17258), 'numpy.array', 'np.array', (['text_vec'], {}), '(text_vec)\n', (17248, 17258), True, 'import numpy as np\n'), ((14964, 14989), 'random.uniform', 'random.uniform', (['low', 'high'], {}), '(low, high)\n', (14978, 14989), False, 'import random\n'), ((15513, 15532), 'nltk.word_tokenize', 'word_tokenize', (['text'], {}), '(text)\n', (15526, 15532), False, 'from nltk import word_tokenize\n'), ((16555, 16574), 'nltk.word_tokenize', 'word_tokenize', (['text'], {}), '(text)\n', (16568, 16574), False, 'from nltk import word_tokenize\n'), ((18466, 18485), 'nltk.word_tokenize', 'word_tokenize', (['text'], {}), '(text)\n', (18479, 18485), False, 'from nltk import word_tokenize\n'), ((19511, 19532), 'numpy.array', 'np.array', (['extra_feats'], {}), '(extra_feats)\n', (19519, 19532), True, 'import numpy as np\n'), ((15031, 15056), 'random.uniform', 'random.uniform', (['low', 'high'], {}), '(low, high)\n', (15045, 15056), False, 'import random\n')] |
import numpy as np
import scipy.fftpack as fftpack
import audio_dspy as adsp
def tf2minphase(h, normalize=True):
"""Converts a transfer function to minimum phase
Parameters
----------
h : ndarray
Numpy array containing the original transfer function
Returns
-------
h_min : ndarray
Numpy array containing the minimum phase transfer function
"""
H = np.abs(np.fft.fft(h))
arg_H = -1*fftpack.hilbert(np.log(H))
H_min = H * np.exp(-1j*arg_H)
h_min = np.real(np.fft.ifft(H_min))
if normalize:
h_min = adsp.normalize(h_min)
return h_min
def tf2linphase(h, normalize=True):
"""Converts a transfer function to linear phase
Parameters
----------
h : ndarray
Numpy array containing the original transfer function
Returns
-------
h_lin : ndarray
Numpy array containing the linear phase transfer function
"""
N = len(h)
H = np.fft.fft(h)
w = np.linspace(0, 2*np.pi, N)
delay_kernels = np.exp(-1j*(N/2)*w)
h_lin = np.real(np.fft.ifft(delay_kernels * np.abs(H)))
h_lin = h_lin - np.mean(h_lin) # remove DC bias
if normalize:
h_lin = adsp.normalize(h_lin)
return h_lin
| [
"numpy.mean",
"numpy.abs",
"audio_dspy.normalize",
"numpy.fft.fft",
"numpy.log",
"numpy.exp",
"numpy.linspace",
"numpy.fft.ifft"
] | [((964, 977), 'numpy.fft.fft', 'np.fft.fft', (['h'], {}), '(h)\n', (974, 977), True, 'import numpy as np\n'), ((986, 1014), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', 'N'], {}), '(0, 2 * np.pi, N)\n', (997, 1014), True, 'import numpy as np\n'), ((1033, 1060), 'numpy.exp', 'np.exp', (['(-1.0j * (N / 2) * w)'], {}), '(-1.0j * (N / 2) * w)\n', (1039, 1060), True, 'import numpy as np\n'), ((412, 425), 'numpy.fft.fft', 'np.fft.fft', (['h'], {}), '(h)\n', (422, 425), True, 'import numpy as np\n'), ((485, 506), 'numpy.exp', 'np.exp', (['(-1.0j * arg_H)'], {}), '(-1.0j * arg_H)\n', (491, 506), True, 'import numpy as np\n'), ((523, 541), 'numpy.fft.ifft', 'np.fft.ifft', (['H_min'], {}), '(H_min)\n', (534, 541), True, 'import numpy as np\n'), ((578, 599), 'audio_dspy.normalize', 'adsp.normalize', (['h_min'], {}), '(h_min)\n', (592, 599), True, 'import audio_dspy as adsp\n'), ((1133, 1147), 'numpy.mean', 'np.mean', (['h_lin'], {}), '(h_lin)\n', (1140, 1147), True, 'import numpy as np\n'), ((1200, 1221), 'audio_dspy.normalize', 'adsp.normalize', (['h_lin'], {}), '(h_lin)\n', (1214, 1221), True, 'import audio_dspy as adsp\n'), ((458, 467), 'numpy.log', 'np.log', (['H'], {}), '(H)\n', (464, 467), True, 'import numpy as np\n'), ((1101, 1110), 'numpy.abs', 'np.abs', (['H'], {}), '(H)\n', (1107, 1110), True, 'import numpy as np\n')] |
import sys
sys.path.append("..")
from .Mesure import *
from Ordonnancement import *
import numpy as np
from scipy import stats
class EvalIRModel:
"""Evaluation d'un modèle d'appariement avec une mesure d'evaluation.
-----------------------------------------------------
Parameters:
- model : modèle d'appariement.
- mesure : mesure d'evaluation.
"""
def __init__(self, model, mesure):
self.model = model
self.mesure = mesure
def eval(self, queryParser, verbose=False):
"""
Methode pour l'evaluation du modèle.
-----------------------------------------------------
Args:
- queryParser : Objet permettant de stocker les requetes.
- Verbose : variable permettant de controller l'affichage.
"""
queries = queryParser.get_queries()
evals = []
nb_queries = len(queries.items())
for (idQ, query) in queries.items():
ranking = np.array(self.model.getRanking(query.get_text()))[:,0]
relevants = query.get_relevants()
score = self.mesure.evalQuery(ranking,relevants)
if score is not None: # On ne prend pas en compte les requetes n'ayant pas de documents pertinents
evals.append(score)
else:
nb_queries -= 1
if verbose :
print(" Query {} : {}".format(idQ, query.get_text()))
print("\t 10 first documents returned by our model : {}".format(ranking[:10]))
print("\t Relevant documents for this query : {}".format(relevants))
if score is None:
print("\t Cette requete ne contient pas de documents pertinents !")
else:
print("\t Score : {}".format(score))
print("************************************************")
evals = np.array(evals)
mean_ = np.sum(evals) / nb_queries*1.0
std_ = np.sqrt(np.sum(np.square(evals-mean_)) / nb_queries)
return evals, mean_, std_
# Bonus TME3 : paired t-test
def paired_ttest(scores1, scores2, threshold=0.05):
"""
Methode permettant d'effectuer un paired t-test.
-----------------------------------------------------
Args:
- score1 : scores obtenus à l'aide du premier modèle.
- score2 : scores obtenus à l'aide du second modèle.
"""
tstat, pvalue = stats.ttest_ind(scores1,scores2)
if pvalue < threshold:
print("les performances des deux modèles sont significativement différentes à 95% de confiance : t = {}".format(tstat))
else:
print("les performances des deux modèles ne sont pas significativement différentes à 95% de confiance : t = {}".format(tstat))
return pvalue, tstat
| [
"numpy.square",
"numpy.array",
"numpy.sum",
"scipy.stats.ttest_ind",
"sys.path.append"
] | [((11, 32), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (26, 32), False, 'import sys\n'), ((2481, 2514), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (['scores1', 'scores2'], {}), '(scores1, scores2)\n', (2496, 2514), False, 'from scipy import stats\n'), ((1952, 1967), 'numpy.array', 'np.array', (['evals'], {}), '(evals)\n', (1960, 1967), True, 'import numpy as np\n'), ((1985, 1998), 'numpy.sum', 'np.sum', (['evals'], {}), '(evals)\n', (1991, 1998), True, 'import numpy as np\n'), ((2046, 2070), 'numpy.square', 'np.square', (['(evals - mean_)'], {}), '(evals - mean_)\n', (2055, 2070), True, 'import numpy as np\n')] |
import numpy as np
from scipy.spatial.distance import squareform
from random import randint
# there are more efficient algorithms for this
# https://people.csail.mit.edu/virgi/6.890/papers/APBP.pdf
def max_min(A, B):
'''max-min product of two square matrices
params:
A, B: NxN numpy arrays '''
assert A.shape == B.shape
return np.max(np.minimum(A[:, :, None], B[None, :, :]), axis=1)
def mat_gromov_prod(dists, base):
'''Gromov products of N-point metric space relative to base point
Args:
dists (ndarray): NxN matrix of pairwise distances
base (int): index of the basepoint in 0...N-1 '''
assert dists.shape[0] == dists.shape[1] and 0 <= base < dists.shape[0]
row = dists[base, :][None, :]
col = dists[:, base][:, None]
return 0.5*(row+col-dists)
def delta_rel(dists, base=None):
''' Measure the delta-hyperbolicity constant of data
with respect to basepoint, normalized by the diameter (max dist).
Args:
dists (ndarray): NxN matrix of pairwise distances
base (int): index of basepoint in 0...N-1 (default = random)
'''
if base is None:
base = randint(0,dist.shape[0]-1)
assert is_metric(dists) and 0 <= base < dists.shape[0]
G = mat_gromov_prod(dists, base)
delta = np.max(max_min(G,G)-G)
diam = np.max(dists)
return delta/diam
def delta_sample(X, **kwargs):
bs = kwargs.get("bs", X.shape[0])
tries = kwargs.get("tries", 10)
dist = kwargs.get("dist", None)
deltas = []
for i in range(tries):
idx = np.random.choice(X.shape[0], bs)
batch = X[idx]
if dist is None:
dists = np.linalg.norm(
batch[None:,]-batch[:,None],
axis=-1)
else:
dists = dist(batch,batch)
deltas.append(
delta_rel(dists,randint(0,bs-1))
)
return deltas
def is_metric(X, tol=1e-8):
return len(X.shape) == 2 and \
np.all( np.abs(X-X.T)<tol ) and\
np.all( np.abs(np.diag(X))<tol ) and\
np.all(X >= 0)
def avg_distortion(metric1, metric2):
''' Average distortion between two metrics.
Args:
metric1, metric2 (ndarray): N x N distance matrices,
or length N*(N-1)//2 compressed distance matrices
Returns:
average distortion (float)
'''
assert metric1.shape == metric2.shape
if len(metric1.shape) > 1:
assert is_metric(metric1)
X = squareform(metric1)
else:
X = metric1
if len(metric2.shape) > 1:
assert is_metric(metric2)
Y = squareform(metric2)
else:
Y = metric2
return np.mean( np.abs(X-Y)/Y )
| [
"numpy.abs",
"scipy.spatial.distance.squareform",
"numpy.minimum",
"numpy.random.choice",
"numpy.max",
"numpy.diag",
"numpy.linalg.norm",
"numpy.all",
"random.randint"
] | [((1327, 1340), 'numpy.max', 'np.max', (['dists'], {}), '(dists)\n', (1333, 1340), True, 'import numpy as np\n'), ((360, 400), 'numpy.minimum', 'np.minimum', (['A[:, :, None]', 'B[None, :, :]'], {}), '(A[:, :, None], B[None, :, :])\n', (370, 400), True, 'import numpy as np\n'), ((1158, 1187), 'random.randint', 'randint', (['(0)', '(dist.shape[0] - 1)'], {}), '(0, dist.shape[0] - 1)\n', (1165, 1187), False, 'from random import randint\n'), ((1562, 1594), 'numpy.random.choice', 'np.random.choice', (['X.shape[0]', 'bs'], {}), '(X.shape[0], bs)\n', (1578, 1594), True, 'import numpy as np\n'), ((2097, 2111), 'numpy.all', 'np.all', (['(X >= 0)'], {}), '(X >= 0)\n', (2103, 2111), True, 'import numpy as np\n'), ((2533, 2552), 'scipy.spatial.distance.squareform', 'squareform', (['metric1'], {}), '(metric1)\n', (2543, 2552), False, 'from scipy.spatial.distance import squareform\n'), ((2660, 2679), 'scipy.spatial.distance.squareform', 'squareform', (['metric2'], {}), '(metric2)\n', (2670, 2679), False, 'from scipy.spatial.distance import squareform\n'), ((1663, 1718), 'numpy.linalg.norm', 'np.linalg.norm', (['(batch[None:,] - batch[:, None])'], {'axis': '(-1)'}), '(batch[None:,] - batch[:, None], axis=-1)\n', (1677, 1718), True, 'import numpy as np\n'), ((2730, 2743), 'numpy.abs', 'np.abs', (['(X - Y)'], {}), '(X - Y)\n', (2736, 2743), True, 'import numpy as np\n'), ((1872, 1890), 'random.randint', 'randint', (['(0)', '(bs - 1)'], {}), '(0, bs - 1)\n', (1879, 1890), False, 'from random import randint\n'), ((2010, 2025), 'numpy.abs', 'np.abs', (['(X - X.T)'], {}), '(X - X.T)\n', (2016, 2025), True, 'import numpy as np\n'), ((2062, 2072), 'numpy.diag', 'np.diag', (['X'], {}), '(X)\n', (2069, 2072), True, 'import numpy as np\n')] |
import torch
import torchvision.models as models
import os,sys
import numpy as np
from matplotlib import pyplot as plt
from tqdm import tqdm
pwd = os.path.abspath('.')
MP3D_build_path = os.path.join(pwd, 'MP3D_Sim', 'build')
DASA_path = os.path.join(pwd, 'DASA')
sys.path.append(MP3D_build_path)
os.chdir(DASA_path)
import MatterSim
VIEWPOINT_SIZE = 36 # Number of discretized views from one viewpoint
FEATURE_SIZE = 2048
BATCH_SIZE = 9
MODEL = 'data/resnet152.pth'
OUTFILE = 'data/ResNet-152-imagenet-depth'
# Simulator image parameters
WIDTH=640
HEIGHT=480
VFOV=60
GPU_ID = 1
vpids = np.load('data/viewpointIds.npy')
def normalizaiton(img):
_range = np.max(img)-np.min(img)
return (img-np.min(img))/(_range+1e-6)
def load_model():
resnet152 = models.resnet152(pretrained=False)
resnet152.load_state_dict(torch.load(MODEL))
torch.cuda.set_device(GPU_ID)
del resnet152.fc
resnet152.fc=lambda x:x
resnet152 = resnet152.cuda()
return resnet152
sim = MatterSim.Simulator()
sim.setCameraResolution(WIDTH, HEIGHT)
sim.setCameraVFOV(np.radians(VFOV))
sim.setDepthEnabled(True)
sim.setDiscretizedViewingAngles(True)
sim.setBatchSize(1)
sim.initialize()
model = load_model()
feats = []
for vpid in tqdm(vpids):
scanId = vpid[0]
viewpointId = vpid[1]
depth = []
for ix in range(VIEWPOINT_SIZE):
if ix == 0:
sim.newEpisode([scanId], [viewpointId], [0], [np.radians(-30)])
elif ix % 12 == 0:
sim.makeAction([0], [1.0], [1.0])
else:
sim.makeAction([0], [1.0], [0])
state = sim.getState()[0]
assert state.viewIndex == ix
depth.append(normalizaiton(state.depth))
input_depth = torch.from_numpy(np.array(depth)).float()
input_depth = input_depth.repeat(1,1,1,3).permute(0,3,1,2).cuda()
feat = []
with torch.no_grad():
for i in range(VIEWPOINT_SIZE//BATCH_SIZE):
b_feat = model(input_depth[i*BATCH_SIZE:(i+1)*BATCH_SIZE])
feat.append(b_feat.cpu())
feat = torch.stack(feat,dim=0).view(-1,2048)
feats.append(feat.numpy())
np.save(OUTFILE,np.array(feats)) | [
"numpy.radians",
"MatterSim.Simulator",
"torch.load",
"tqdm.tqdm",
"os.path.join",
"torch.stack",
"torch.cuda.set_device",
"os.chdir",
"numpy.array",
"numpy.max",
"torchvision.models.resnet152",
"numpy.min",
"os.path.abspath",
"torch.no_grad",
"numpy.load",
"sys.path.append"
] | [((156, 176), 'os.path.abspath', 'os.path.abspath', (['"""."""'], {}), "('.')\n", (171, 176), False, 'import os, sys\n'), ((196, 234), 'os.path.join', 'os.path.join', (['pwd', '"""MP3D_Sim"""', '"""build"""'], {}), "(pwd, 'MP3D_Sim', 'build')\n", (208, 234), False, 'import os, sys\n'), ((248, 273), 'os.path.join', 'os.path.join', (['pwd', '"""DASA"""'], {}), "(pwd, 'DASA')\n", (260, 273), False, 'import os, sys\n'), ((275, 307), 'sys.path.append', 'sys.path.append', (['MP3D_build_path'], {}), '(MP3D_build_path)\n', (290, 307), False, 'import os, sys\n'), ((309, 328), 'os.chdir', 'os.chdir', (['DASA_path'], {}), '(DASA_path)\n', (317, 328), False, 'import os, sys\n'), ((617, 649), 'numpy.load', 'np.load', (['"""data/viewpointIds.npy"""'], {}), "('data/viewpointIds.npy')\n", (624, 649), True, 'import numpy as np\n'), ((1033, 1054), 'MatterSim.Simulator', 'MatterSim.Simulator', ([], {}), '()\n', (1052, 1054), False, 'import MatterSim\n'), ((1286, 1297), 'tqdm.tqdm', 'tqdm', (['vpids'], {}), '(vpids)\n', (1290, 1297), False, 'from tqdm import tqdm\n'), ((797, 831), 'torchvision.models.resnet152', 'models.resnet152', ([], {'pretrained': '(False)'}), '(pretrained=False)\n', (813, 831), True, 'import torchvision.models as models\n'), ((887, 916), 'torch.cuda.set_device', 'torch.cuda.set_device', (['GPU_ID'], {}), '(GPU_ID)\n', (908, 916), False, 'import torch\n'), ((1114, 1130), 'numpy.radians', 'np.radians', (['VFOV'], {}), '(VFOV)\n', (1124, 1130), True, 'import numpy as np\n'), ((2210, 2225), 'numpy.array', 'np.array', (['feats'], {}), '(feats)\n', (2218, 2225), True, 'import numpy as np\n'), ((691, 702), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (697, 702), True, 'import numpy as np\n'), ((703, 714), 'numpy.min', 'np.min', (['img'], {}), '(img)\n', (709, 714), True, 'import numpy as np\n'), ((863, 880), 'torch.load', 'torch.load', (['MODEL'], {}), '(MODEL)\n', (873, 880), False, 'import torch\n'), ((1921, 1936), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1934, 1936), False, 'import torch\n'), ((732, 743), 'numpy.min', 'np.min', (['img'], {}), '(img)\n', (738, 743), True, 'import numpy as np\n'), ((1798, 1813), 'numpy.array', 'np.array', (['depth'], {}), '(depth)\n', (1806, 1813), True, 'import numpy as np\n'), ((2119, 2143), 'torch.stack', 'torch.stack', (['feat'], {'dim': '(0)'}), '(feat, dim=0)\n', (2130, 2143), False, 'import torch\n'), ((1482, 1497), 'numpy.radians', 'np.radians', (['(-30)'], {}), '(-30)\n', (1492, 1497), True, 'import numpy as np\n')] |
#!/usr/bin/python3
import pyaudio
import os
import numpy as np
from scipy.interpolate import UnivariateSpline
from scipy.signal import butter, lfilter, filtfilt, resample
from scipy.optimize import curve_fit
import scipy as sp
import time
import pygame
from pygame.locals import *
from pygame import gfxdraw
from pygame import event, fastevent
import RPi.GPIO as GPIO
import matplotlib.pyplot as plt
from chirp3 import lchirp
from SC18IS602B import SC18IS602B
from MCP230XX import MCP230XX
from LTC1380 import LTC1380
# set up a bunch of constants
BLUE = ( 0, 0, 255)
WHITE = (255, 255, 255)
DARKRED = (128, 0, 0)
DARKBLUE = ( 0, 0, 128)
RED = (255, 0, 0)
GREEN = ( 0, 255, 0)
DARKGREEN = ( 0, 128, 0)
YELLOW = (255, 255, 0)
DARKYELLOW = (128, 128, 0)
BLACK = ( 0, 0, 0)
SCREEN_WIDTH = 320
SCREEN_HEIGHT = 240
SCREEN_SIZE = (SCREEN_WIDTH, SCREEN_HEIGHT)
splash_screen = {
0: [(SCREEN_HEIGHT / 30) / 2, 25, 30, "couriernew", "PreAmp Tester"]
}
sweeps = {
0: [18000, 34000, 0.004],
1: [7000, 17000, 0.004],
2: [12000, 24000, 0.004],
3: [4000, 10000, 0.004],
4: [48000, 78000, 0.004],
5: [3000, 80000, 0.004],
}
T = 0.02
RATE = 192000
CHUNK = int(RATE*T)
FORMAT = pyaudio.paInt16
sweep = 5
blocksize = 1024 * 4
g_amplitude = 17750 # 17750 - 1.0V P2P
chirp_x = 0
chirp_y = []
sound = []
buffer = []
f_vec = RATE * np.arange(CHUNK / 2) / CHUNK
lowcut = 2000.0
highcut = 100000.0
def sweep_gen():
global chirp_x, chirp_y, g_amplitude, sound
Tn = sweeps[sweep][2]
N = int(RATE * Tn)
chirp_x = np.arange(0, int(Tn * RATE)) / RATE
tmin = 0
tmax = Tn
w0 = lchirp(N, tmin=tmin, tmax=tmax, fmin=sweeps[sweep][0], fmax=sweeps[sweep][1],zero_phase_tmin=True, cos=False)
w180 = w0 * -1
chirp_y = np.column_stack((w0, w180))
chirp_y = chirp_y * g_amplitude
chirp_y = chirp_y.astype(np.int16)
sound = pygame.sndarray.make_sound(chirp_y)
def normalize(data):
amp = 32767/np.max(np.abs(data))
mean = np.mean(data)
norm = [(data[i] - mean) * amp for i, k in enumerate(data)]
return norm
def dB(y):
"Calculate the log ratio of y / max(y) in decibel."
y = np.abs(y)
y /= y.max()
return 20 * np.log10(y)
lowcut = 3000.0
highcut = 80000.0
def butter_bandpass(lowcut, highcut, fs, order=5):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='band')
return b, a
def butter_bandpass_filter(data, lowcut, highcut, fs, order=5):
b, a = butter_bandpass(lowcut, highcut, fs, order=order)
y = lfilter(b, a, data)
return y
def gauss(x, a, mu, sig):
return a**np.exp(-(x-mu)**2/(2.*sig**2))
def linear(x, a, b):
return a * x + b
def pre_amp(i):
switcher={
11:'7/17',
12:'7/17',
13:'7/17',
15:'7/34',
18:'12/24',
19:'12/24',
20:'12/24',
23:'18/34',
24:'18/34',
25:'18/34',
26:'18/34',
46:'40/80',
47:'40/80',
6:'xxxxx'
}
return switcher.get(i,"Invalid pre AMP")
# --Define a function to show a screen of text with button labels
# Note: text items in the screen dictionary can be changed before displaying
def show_text_menu(menuname, highlite, buttons): # buttons can be None
# screen.fill(BLACK) # blank the display
# Build button labels first, so menu can overlap on leading blanks
if buttons != None: # see if there are buttons to show
line = 0 # reset our line count for the labels
for line in buttons: # go through the button line vslues
linedata = buttons[line]
# myfont = pygame.font.SysFont(linedata[3], linedata[1]) # use the font selected
myfont = pygame.font.Font(linedata[3], linedata[1])
textsurface = myfont.render(linedata[4], False, WHITE, BLACK) # write the text
# show the text
screen.blit(textsurface, (linedata[2], linedata[1] * linedata[0]))
line = line + 1 # go to the next line
# Build the rest of the menu
if menuname != None:
line = 0 # start showing at line zero
for line in menuname: # go through the line values
# get the value list from the menu dictionary
linedata = menuname[line]
myfont = pygame.font.SysFont(linedata[3], linedata[1]) # use the font selected
# Build text and position & highlighting a line, if within range
if line == highlite: # check if we should highlight this line
textsurface = myfont.render(
linedata[4], False, BLACK, WHITE
) # highlight it
else:
textsurface = myfont.render(
linedata[4], False, WHITE, BLACK
) # no highlight
# add the line to the screen
screen.blit(textsurface, (linedata[2], linedata[1] * linedata[0]))
line = line + 1
# Show the new screen
# pygame.display.update() # show it all
button_menu1 = {
0: [2.5, 15, 268, 'freesansbold.ttf', "Start->"],
1: [6.5, 15, 260, 'freesansbold.ttf', " -0dB->"],
2: [10.5, 15, 260, 'freesansbold.ttf', "Gain->"],
3: [14.8, 15, 268, 'freesansbold.ttf', "Main->"],
4: [10, 20, 120, 'freesansbold.ttf', "PreAmp"],
}
gains = {
# 0:['-120dB',0x00],
0:[' 0dB->',0x11],
1:[' 6dB->',0x22],
2:[' 12dB->',0x33],
3:['18.1dB->',0x44],
4:['24.1dB->',0x55],
5:['30.1dB->',0x66],
# 6:['36.1dB->',0x77],
# 8:['-12xdB',0x88],
}
gain = 0
Bands = {
7:[28,40,52,63,75,86,66,78,'7/17'],
12:[26,38,50,62,74,85,66,77,'12/24'],
18:[26,38,50,62,74,85,66,77,'18/34'],
40:[25,36,48,60,72,83,66,75,'40/80'],
}
BandsUSBL = {
7:[28,40,52,63,75,86,'7/17'],
12:[26,38,50,62,74,85,'12/24'],
18:[26,38,50,62,74,85,'18/34'],
40:[25,36,48,60,72,83,'40/80'],
}
def check_Level(max_level_dB,num,accuracy):
if max_level_dB in np.arange(num-accuracy,num+accuracy,dtype=float):
return True
return False
def check_Band(num):
global Channel, Typ
if not Typ:
if Channel: # Lim
if num in range(11,13):
return 7
if num in range(16,18):
return 12
if num in range(22,26):
return 18
if num in range(39,67):
return 40
else: #Main
if num in range(11,13):
return 7
if num in range(18,20):
return 12
if num in range(22,26):
return 18
if num in range(44,61):
return 40
else: # USBL
if num in range(11,17):
return 7
if num in range(18,20):
return 12
if num in range(22,27):
return 18
if num in range(44,61):
return 40
def gpiobut(channel):
if channel == 17: # check for button 1
fastevent.post(pygame.event.Event(pygame.USEREVENT + 2, button=1))
elif channel == 22: # check for button 2
fastevent.post(pygame.event.Event(pygame.USEREVENT + 2, button=2))
elif channel == 23: # check for button 3
fastevent.post(pygame.event.Event(pygame.USEREVENT + 2, button=3))
elif channel == 27: # check for button 4
fastevent.post(pygame.event.Event(pygame.USEREVENT + 2, button=4))
def set_start():
global Run, button_menu1
if Run:
button_menu1[0][4] = " Stop->"
else:
button_menu1[0][4] = "Start->"
def set_gain_low():
global G_Low, button_menu1
if G_Low:
button_menu1[1][4] = "-20dB->"
else:
button_menu1[1][4] = " -0dB->"
def set_gain(gain):
global gains, button_menu1
button_menu1[2][4] = gains[gain][0]
def set_typ():
global Typ, button_menu1
if Typ:
button_menu1[4][4] = "USBL"
else:
button_menu1[4][4] = "PreAmp"
def set_channel():
global Channel, button_menu1
if Channel:
button_menu1[3][4] = "Lim->"
else:
button_menu1[3][4] = "Main->"
def set_USBL_ch():
global USBL_Ch, button_menu1
button_menu1[3][4] = ("Ch" + str(USBL_Ch) + "->")
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=1,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
USBL_On = 1
PreAmp_On = 2
Gain_Low = 3
RX_LIM = 5
RX_MAIN = 6
ON = 1
OFF = 0
MCP = MCP230XX('MCP23008', i2cAddress=0x20)
MUX = LTC1380(i2cAddress=0x48)
MCP.set_mode(USBL_On, 'output')
MCP.set_mode(PreAmp_On, 'output')
MCP.set_mode(Gain_Low, 'output')
MUX.SetChannel(RX_MAIN)
MCP.output(USBL_On, OFF)
MCP.output(PreAmp_On, OFF)
MCP.output(Gain_Low, OFF)
LTC69122 = SC18IS602B(i2cAddress=0x28, speed="CLK_1843_kHz", mode="MODE_0", order="MSB")
# LTC69122.spiTransfer(slaveNum=0, txData=[gains[gain][1]], rxLen=len([gains[gain][1]]))
print("*recording")
os.putenv("SDL_FBDEV", "/dev/fb1")
os.putenv("SDL_MOUSEDRV", "TSLIB")
# Mouse is PiTFT touchscreen
os.putenv("SDL_MOUSEDEV", "/dev/input/touchscreen")
os.putenv("SDL_AUDIODRIVER", "alsa")
pygame.mixer.pre_init(frequency=int(RATE), size=-16, channels=2, buffer=blocksize)
pygame.init()
pygame.mouse.set_visible(False)
screen = pygame.display.set_mode(SCREEN_SIZE)
fastevent.init()
pygame.event.set_blocked(pygame.MOUSEMOTION)
pygame.event.set_blocked(pygame.MOUSEBUTTONUP)
# pygame.font.init()
clock = pygame.time.Clock()
Step_interval = 400 # ms 500
pygame.time.set_timer(USEREVENT + 1, Step_interval)
band_font = pygame.font.Font('freesansbold.ttf', round(0.07*SCREEN_HEIGHT))
level_font = pygame.font.Font('freesansbold.ttf', round(0.07*SCREEN_HEIGHT))
preamp_font = pygame.font.Font('freesansbold.ttf', round(0.07*SCREEN_HEIGHT))
button_font = pygame.font.Font('freesansbold.ttf', round(0.05*SCREEN_HEIGHT))
Accuracy = 3
band = [7000,17000]
M_Band = 0
bg_color = 60
terms = 30
Run = 0
Typ = 0
G_Low = 0
Channel = 0
USBL_Ch = 0
max_level = 0
max_level_dB = 0
fault = 1
set_start()
set_gain_low()
set_gain(gain)
set_typ()
set_channel()
show_text_menu(splash_screen, None, None)
GPIO.setmode(GPIO.BCM) # use BCM chip's numbering scheme vs. pin numbers
GPIO.setup(17, GPIO.IN, pull_up_down=GPIO.PUD_UP) # PiTFT button 1
GPIO.setup(22, GPIO.IN, pull_up_down=GPIO.PUD_UP) # PiTFT button 2
GPIO.setup(23, GPIO.IN, pull_up_down=GPIO.PUD_UP) # PiTFT button 3
GPIO.setup(27, GPIO.IN, pull_up_down=GPIO.PUD_UP) # PiTFT button 4
# Define GPIO button event handlers for the PiTFT 2423
GPIO.add_event_detect(17, GPIO.FALLING, callback=gpiobut, bouncetime=300)
GPIO.add_event_detect(22, GPIO.FALLING, callback=gpiobut, bouncetime=300)
GPIO.add_event_detect(23, GPIO.FALLING, callback=gpiobut, bouncetime=300)
GPIO.add_event_detect(27, GPIO.FALLING, callback=gpiobut, bouncetime=300)
sweep_gen()
sound.set_volume(0.01)
sound.play(-1)
step = 0
done = False
while not done:
screen.fill((0,0,0))
s = 0
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
MCP.output(PreAmp_On, OFF)
MCP.output(USBL_On, OFF)
break
elif event.type == pygame.USEREVENT + 1:
if not Typ:
if Run and M_Band:
# print(center_f)
# print(max_level_dB)
step_len = len(Bands[M_Band])-1
if step == step_len-1:
Channel = 0
G_Low = 0
set_channel()
MUX.SetChannel(RX_MAIN)
Run = False
set_start()
MCP.output(PreAmp_On, ON if Run else OFF)
if fault:
button_menu1[4][4] = "PreAmp OK"
else:
button_menu1[4][4] = "PreAmp Fault"
fault = fault and check_Level(int(max_level_dB),Bands[M_Band][step],Accuracy)
if not fault:
print('fault in step -',step,'max level dB ',max_level_dB,' soll level dB ',Bands[M_Band][step])
step += 1
step = step % step_len
if step in range(len(gains)):
LTC69122.spiTransfer(slaveNum=0, txData=[gains[step][1]], rxLen=len([gains[step][1]]))
set_gain(step)
time.sleep(0.02)
if step == step_len-2:
G_Low = 1
set_gain_low()
MCP.output(Gain_Low, 1)
time.sleep(0.02)
if step == step_len-1:
G_Low = 0
set_gain_low()
MCP.output(Gain_Low, 0)
time.sleep(0.02)
Channel = 1
set_channel()
MUX.SetChannel(RX_LIM)
time.sleep(0.02)
else:
if Run and M_Band:
step_len = len(BandsUSBL[M_Band])-1
if step == step_len-1:
USBL_Ch = 0
Run = False
set_USBL_ch()
set_start()
MCP.output(USBL_On, OFF)
if fault:
button_menu1[4][4] = "USBL OK"
else:
button_menu1[4][4] = "USBL Fault"
MUX.SetChannel(RX_MAIN)
break
fault = fault and check_Level(int(max_level_dB),BandsUSBL[M_Band][step],Accuracy)
if USBL_Ch == 4:
step += 1
step = step % step_len
if step in range(len(gains)):
LTC69122.spiTransfer(slaveNum=1, txData=[gains[step][1]], rxLen=len([gains[step][1]]))
set_gain(step)
time.sleep(0.02)
if not fault:
print('fault in Ch',USBL_Ch)
USBL_Ch += 1
USBL_Ch = USBL_Ch % 5
set_USBL_ch()
MUX.SetChannel(USBL_Ch)
time.sleep(0.01)
elif event.type == pygame.USEREVENT + 2:
if event.button == 1: # button 1 = GPIO 17
Run = not Run
fault = 1
step = 0
set_start()
set_typ()
if not Typ:
MCP.output(PreAmp_On, ON if Run else OFF)
time.sleep(0.02)
LTC69122.spiTransfer(slaveNum=0, txData=[gains[gain][1]], rxLen=len([gains[gain][1]]))
time.sleep(0.2)
else:
MCP.output(USBL_On, ON if Run else OFF)
time.sleep(0.02)
set_USBL_ch()
MUX.SetChannel(0 if Run else RX_MAIN)
time.sleep(0.02)
LTC69122.spiTransfer(slaveNum=1, txData=[gains[gain][1]], rxLen=len([gains[gain][1]]))
time.sleep(0.2)
if not Run:
gain = 0
set_gain(gain)
elif event.button == 2: # button 2 = GPIO 22
G_Low = not G_Low
set_gain_low()
MCP.output(Gain_Low, ON if G_Low else OFF)
elif event.button == 3: # button 3 = GPIO 23
if Run:
gain += 1
gain = gain % len(gains)
set_gain(gain)
if not Typ:
LTC69122.spiTransfer(slaveNum=0, txData=[gains[gain][1]], rxLen=len([gains[gain][1]]))
time.sleep(0.02)
else:
LTC69122.spiTransfer(slaveNum=1, txData=[gains[gain][1]], rxLen=len([gains[gain][1]]))
time.sleep(0.02)
elif event.button == 4: # button 3 = GPIO 27
if not Typ:
Channel = not Channel
set_channel()
if Channel:
MUX.SetChannel(RX_LIM)
else:
MUX.SetChannel(RX_MAIN)
else:
USBL_Ch += 1
USBL_Ch = USBL_Ch % 5
set_USBL_ch()
MUX.SetChannel(USBL_Ch)
elif event.type == pygame.MOUSEBUTTONDOWN:
Typ = not Typ
set_typ()
MCP.output(USBL_On, OFF)
MCP.output(PreAmp_On, OFF)
MCP.output(Gain_Low, OFF)
MUX.SetChannel(RX_MAIN)
Run = 0
start = time.time()
buff = stream.read(CHUNK,exception_on_overflow=False)
data = np.frombuffer(buff, dtype=np.int16)
data = butter_bandpass_filter(data, lowcut, highcut, RATE, order=4)
max_level = np.max(data)
max_level_dB = 20 * np.log10(max_level)
data = normalize(data)
data = data * np.hamming(len(data))
fft_complex = np.fft.fft(data, n=CHUNK)
left, right = np.split(np.abs(fft_complex), 2)
fft_complex = np.add(left, right[::-1])
# Y = np.fft.fft(fft_complex)
# np.put(Y, range(terms+1, len(fft_complex)), 0.0) # zero-ing coefficients above "terms"
# fft_complex = np.fft.ifft(Y)
# fft_complex = fft_complex - np.min(fft_complex)
x = np.linspace(0, len(fft_complex), len(fft_complex))
spline = UnivariateSpline(x, fft_complex, s=0)
fitted_curve = spline(x)
max_fitted = np.max(fitted_curve)
fitted_curve2 = fitted_curve - max_fitted * 0.5
fitted_curve2 = np.sqrt(fitted_curve2)
fitted_curve2[np.isnan(fitted_curve2)] = 0
###### Fitting 0dB frequency #########
# fit_point = 5
# x_slope = np.arange(0, fit_point, 1)
# _3dB_slope = [i for i in fitted_curve2 if i > 0]
# x_center = int(len(_3dB_slope)/2)
# y_center = _3dB_slope[x_center]
# # y_center = np.mean(_3dB_slope[x_center-fit_point:x_center-fit_point])
# _3dB_slopeL = _3dB_slope[:fit_point]
# popt, _ = curve_fit(linear, x_slope, _3dB_slopeL)
# a, b = popt
# _0dB_fL = int((y_center-b)/a)
# _3dB_slopeR = _3dB_slope[-fit_point:]
# popt, _ = curve_fit(linear, x_slope, _3dB_slopeR)
# a, b = popt
# _0dB_fR = int((y_center-b)/a)
######################################
r = [idx for idx, val in enumerate(fitted_curve2) if val > 0]
try:
band[0]= (f_vec[-1]*r[0])/len(f_vec)
band[1]= (f_vec[-1]*r[-1])/len(f_vec)
band[2]= 0#(f_vec[-1]*r[_0dB_fL])/len(f_vec)
band[3]= 0#(f_vec[-1]*r[-_0dB_fR])/len(f_vec)s
except:
band = [0000,0000,0000,0000]
max_val = np.max(fft_complex)
scale_value = SCREEN_HEIGHT / max_val
scale_fitted = SCREEN_HEIGHT / max_fitted
fft_complex = resample(fft_complex,SCREEN_WIDTH)
fitted_curve = resample(fitted_curve,SCREEN_WIDTH)
fr = np.sqrt(band[0]*band[1])
center_f = np.ceil(fr/1000)
# print('center f ', center_f)
# pre_str = pre_amp(center_f)
M_Band = check_Band(center_f)
if M_Band:
if Typ and M_Band == 7:
pre_str = "7/34"
else:
pre_str = Bands[M_Band][-1]
else:
pre_str = "Invalid pre AMP"
for i,v in enumerate(fft_complex):
dist = np.real(fft_complex[i])
mapped_dist = dist * scale_value
mapped_fitted = fitted_curve[i] * scale_fitted
pygame.draw.line(screen, DARKYELLOW, (i, SCREEN_HEIGHT), (i, SCREEN_HEIGHT - int(mapped_fitted)),1)
pygame.draw.circle(screen,RED,[i,SCREEN_HEIGHT-int(mapped_dist)],2)
band_text = band_font.render('Band: %.0f / %.0f' %(band[0],band[1]), True, (255, 255, 255) , (bg_color, bg_color, bg_color))
band_textRect = band_text.get_rect()
band_textRect.x, band_textRect.y = round(0.015*SCREEN_WIDTH), round(0.09*SCREEN_HEIGHT)
level_text = level_font.render('Level: %.0f' %(max_level_dB), True, (255, 255, 255) , (bg_color, bg_color, bg_color))
level_textRect = level_text.get_rect()
level_textRect.x, level_textRect.y = round(0.015*SCREEN_WIDTH), round(0.2*SCREEN_HEIGHT)
preamp_text = preamp_font.render('Pre Amp: '+ (pre_str), True, (255, 255, 255) , (bg_color, bg_color, bg_color))
# preamp_text = preamp_font.render('Pre Amp: %.0f / %.0f' %(band[2],band[3]), True, (255, 255, 255) , (bg_color, bg_color, bg_color))
preamp_textRect = preamp_text.get_rect()
preamp_textRect.x, preamp_textRect.y = round(0.015*SCREEN_WIDTH), round(0.3*SCREEN_HEIGHT)
screen.blit(band_text, band_textRect)
screen.blit(level_text, level_textRect)
screen.blit(preamp_text, preamp_textRect)
show_text_menu(None, None, button_menu1)
pygame.display.flip()
end = time.time()
# clock.tick(25)
# print(end - start)
| [
"numpy.log10",
"numpy.sqrt",
"pygame.init",
"SC18IS602B.SC18IS602B",
"numpy.column_stack",
"time.sleep",
"pygame.event.Event",
"pygame.time.set_timer",
"pygame.font.Font",
"RPi.GPIO.setmode",
"numpy.arange",
"numpy.mean",
"pygame.display.set_mode",
"os.putenv",
"numpy.fft.fft",
"pygame... | [((8367, 8384), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (8382, 8384), False, 'import pyaudio\n'), ((8581, 8616), 'MCP230XX.MCP230XX', 'MCP230XX', (['"""MCP23008"""'], {'i2cAddress': '(32)'}), "('MCP23008', i2cAddress=32)\n", (8589, 8616), False, 'from MCP230XX import MCP230XX\n'), ((8625, 8647), 'LTC1380.LTC1380', 'LTC1380', ([], {'i2cAddress': '(72)'}), '(i2cAddress=72)\n', (8632, 8647), False, 'from LTC1380 import LTC1380\n'), ((8864, 8939), 'SC18IS602B.SC18IS602B', 'SC18IS602B', ([], {'i2cAddress': '(40)', 'speed': '"""CLK_1843_kHz"""', 'mode': '"""MODE_0"""', 'order': '"""MSB"""'}), "(i2cAddress=40, speed='CLK_1843_kHz', mode='MODE_0', order='MSB')\n", (8874, 8939), False, 'from SC18IS602B import SC18IS602B\n'), ((9054, 9088), 'os.putenv', 'os.putenv', (['"""SDL_FBDEV"""', '"""/dev/fb1"""'], {}), "('SDL_FBDEV', '/dev/fb1')\n", (9063, 9088), False, 'import os\n'), ((9091, 9125), 'os.putenv', 'os.putenv', (['"""SDL_MOUSEDRV"""', '"""TSLIB"""'], {}), "('SDL_MOUSEDRV', 'TSLIB')\n", (9100, 9125), False, 'import os\n'), ((9157, 9208), 'os.putenv', 'os.putenv', (['"""SDL_MOUSEDEV"""', '"""/dev/input/touchscreen"""'], {}), "('SDL_MOUSEDEV', '/dev/input/touchscreen')\n", (9166, 9208), False, 'import os\n'), ((9209, 9245), 'os.putenv', 'os.putenv', (['"""SDL_AUDIODRIVER"""', '"""alsa"""'], {}), "('SDL_AUDIODRIVER', 'alsa')\n", (9218, 9245), False, 'import os\n'), ((9330, 9343), 'pygame.init', 'pygame.init', ([], {}), '()\n', (9341, 9343), False, 'import pygame\n'), ((9344, 9375), 'pygame.mouse.set_visible', 'pygame.mouse.set_visible', (['(False)'], {}), '(False)\n', (9368, 9375), False, 'import pygame\n'), ((9385, 9421), 'pygame.display.set_mode', 'pygame.display.set_mode', (['SCREEN_SIZE'], {}), '(SCREEN_SIZE)\n', (9408, 9421), False, 'import pygame\n'), ((9422, 9438), 'pygame.fastevent.init', 'fastevent.init', ([], {}), '()\n', (9436, 9438), False, 'from pygame import event, fastevent\n'), ((9439, 9483), 'pygame.event.set_blocked', 'pygame.event.set_blocked', (['pygame.MOUSEMOTION'], {}), '(pygame.MOUSEMOTION)\n', (9463, 9483), False, 'import pygame\n'), ((9484, 9530), 'pygame.event.set_blocked', 'pygame.event.set_blocked', (['pygame.MOUSEBUTTONUP'], {}), '(pygame.MOUSEBUTTONUP)\n', (9508, 9530), False, 'import pygame\n'), ((9561, 9580), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (9578, 9580), False, 'import pygame\n'), ((9612, 9663), 'pygame.time.set_timer', 'pygame.time.set_timer', (['(USEREVENT + 1)', 'Step_interval'], {}), '(USEREVENT + 1, Step_interval)\n', (9633, 9663), False, 'import pygame\n'), ((10248, 10270), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BCM'], {}), '(GPIO.BCM)\n', (10260, 10270), True, 'import RPi.GPIO as GPIO\n'), ((10322, 10371), 'RPi.GPIO.setup', 'GPIO.setup', (['(17)', 'GPIO.IN'], {'pull_up_down': 'GPIO.PUD_UP'}), '(17, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n', (10332, 10371), True, 'import RPi.GPIO as GPIO\n'), ((10390, 10439), 'RPi.GPIO.setup', 'GPIO.setup', (['(22)', 'GPIO.IN'], {'pull_up_down': 'GPIO.PUD_UP'}), '(22, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n', (10400, 10439), True, 'import RPi.GPIO as GPIO\n'), ((10458, 10507), 'RPi.GPIO.setup', 'GPIO.setup', (['(23)', 'GPIO.IN'], {'pull_up_down': 'GPIO.PUD_UP'}), '(23, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n', (10468, 10507), True, 'import RPi.GPIO as GPIO\n'), ((10526, 10575), 'RPi.GPIO.setup', 'GPIO.setup', (['(27)', 'GPIO.IN'], {'pull_up_down': 'GPIO.PUD_UP'}), '(27, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n', (10536, 10575), True, 'import RPi.GPIO as GPIO\n'), ((10649, 10722), 'RPi.GPIO.add_event_detect', 'GPIO.add_event_detect', (['(17)', 'GPIO.FALLING'], {'callback': 'gpiobut', 'bouncetime': '(300)'}), '(17, GPIO.FALLING, callback=gpiobut, bouncetime=300)\n', (10670, 10722), True, 'import RPi.GPIO as GPIO\n'), ((10723, 10796), 'RPi.GPIO.add_event_detect', 'GPIO.add_event_detect', (['(22)', 'GPIO.FALLING'], {'callback': 'gpiobut', 'bouncetime': '(300)'}), '(22, GPIO.FALLING, callback=gpiobut, bouncetime=300)\n', (10744, 10796), True, 'import RPi.GPIO as GPIO\n'), ((10797, 10870), 'RPi.GPIO.add_event_detect', 'GPIO.add_event_detect', (['(23)', 'GPIO.FALLING'], {'callback': 'gpiobut', 'bouncetime': '(300)'}), '(23, GPIO.FALLING, callback=gpiobut, bouncetime=300)\n', (10818, 10870), True, 'import RPi.GPIO as GPIO\n'), ((10871, 10944), 'RPi.GPIO.add_event_detect', 'GPIO.add_event_detect', (['(27)', 'GPIO.FALLING'], {'callback': 'gpiobut', 'bouncetime': '(300)'}), '(27, GPIO.FALLING, callback=gpiobut, bouncetime=300)\n', (10892, 10944), True, 'import RPi.GPIO as GPIO\n'), ((1680, 1795), 'chirp3.lchirp', 'lchirp', (['N'], {'tmin': 'tmin', 'tmax': 'tmax', 'fmin': 'sweeps[sweep][0]', 'fmax': 'sweeps[sweep][1]', 'zero_phase_tmin': '(True)', 'cos': '(False)'}), '(N, tmin=tmin, tmax=tmax, fmin=sweeps[sweep][0], fmax=sweeps[sweep][1\n ], zero_phase_tmin=True, cos=False)\n', (1686, 1795), False, 'from chirp3 import lchirp\n'), ((1823, 1850), 'numpy.column_stack', 'np.column_stack', (['(w0, w180)'], {}), '((w0, w180))\n', (1838, 1850), True, 'import numpy as np\n'), ((1939, 1974), 'pygame.sndarray.make_sound', 'pygame.sndarray.make_sound', (['chirp_y'], {}), '(chirp_y)\n', (1965, 1974), False, 'import pygame\n'), ((2045, 2058), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (2052, 2058), True, 'import numpy as np\n'), ((2220, 2229), 'numpy.abs', 'np.abs', (['y'], {}), '(y)\n', (2226, 2229), True, 'import numpy as np\n'), ((2440, 2480), 'scipy.signal.butter', 'butter', (['order', '[low, high]'], {'btype': '"""band"""'}), "(order, [low, high], btype='band')\n", (2446, 2480), False, 'from scipy.signal import butter, lfilter, filtfilt, resample\n'), ((2631, 2650), 'scipy.signal.lfilter', 'lfilter', (['b', 'a', 'data'], {}), '(b, a, data)\n', (2638, 2650), False, 'from scipy.signal import butter, lfilter, filtfilt, resample\n'), ((11087, 11105), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (11103, 11105), False, 'import pygame\n'), ((17205, 17216), 'time.time', 'time.time', ([], {}), '()\n', (17214, 17216), False, 'import time\n'), ((17287, 17322), 'numpy.frombuffer', 'np.frombuffer', (['buff'], {'dtype': 'np.int16'}), '(buff, dtype=np.int16)\n', (17300, 17322), True, 'import numpy as np\n'), ((17411, 17423), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (17417, 17423), True, 'import numpy as np\n'), ((17554, 17579), 'numpy.fft.fft', 'np.fft.fft', (['data'], {'n': 'CHUNK'}), '(data, n=CHUNK)\n', (17564, 17579), True, 'import numpy as np\n'), ((17649, 17674), 'numpy.add', 'np.add', (['left', 'right[::-1]'], {}), '(left, right[::-1])\n', (17655, 17674), True, 'import numpy as np\n'), ((17966, 18003), 'scipy.interpolate.UnivariateSpline', 'UnivariateSpline', (['x', 'fft_complex'], {'s': '(0)'}), '(x, fft_complex, s=0)\n', (17982, 18003), False, 'from scipy.interpolate import UnivariateSpline\n'), ((18054, 18074), 'numpy.max', 'np.max', (['fitted_curve'], {}), '(fitted_curve)\n', (18060, 18074), True, 'import numpy as np\n'), ((18147, 18169), 'numpy.sqrt', 'np.sqrt', (['fitted_curve2'], {}), '(fitted_curve2)\n', (18154, 18169), True, 'import numpy as np\n'), ((19229, 19248), 'numpy.max', 'np.max', (['fft_complex'], {}), '(fft_complex)\n', (19235, 19248), True, 'import numpy as np\n'), ((19356, 19391), 'scipy.signal.resample', 'resample', (['fft_complex', 'SCREEN_WIDTH'], {}), '(fft_complex, SCREEN_WIDTH)\n', (19364, 19391), False, 'from scipy.signal import butter, lfilter, filtfilt, resample\n'), ((19410, 19446), 'scipy.signal.resample', 'resample', (['fitted_curve', 'SCREEN_WIDTH'], {}), '(fitted_curve, SCREEN_WIDTH)\n', (19418, 19446), False, 'from scipy.signal import butter, lfilter, filtfilt, resample\n'), ((19455, 19481), 'numpy.sqrt', 'np.sqrt', (['(band[0] * band[1])'], {}), '(band[0] * band[1])\n', (19462, 19481), True, 'import numpy as np\n'), ((19495, 19513), 'numpy.ceil', 'np.ceil', (['(fr / 1000)'], {}), '(fr / 1000)\n', (19502, 19513), True, 'import numpy as np\n'), ((21327, 21348), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (21346, 21348), False, 'import pygame\n'), ((21359, 21370), 'time.time', 'time.time', ([], {}), '()\n', (21368, 21370), False, 'import time\n'), ((1414, 1434), 'numpy.arange', 'np.arange', (['(CHUNK / 2)'], {}), '(CHUNK / 2)\n', (1423, 1434), True, 'import numpy as np\n'), ((2263, 2274), 'numpy.log10', 'np.log10', (['y'], {}), '(y)\n', (2271, 2274), True, 'import numpy as np\n'), ((2705, 2746), 'numpy.exp', 'np.exp', (['(-(x - mu) ** 2 / (2.0 * sig ** 2))'], {}), '(-(x - mu) ** 2 / (2.0 * sig ** 2))\n', (2711, 2746), True, 'import numpy as np\n'), ((6054, 6108), 'numpy.arange', 'np.arange', (['(num - accuracy)', '(num + accuracy)'], {'dtype': 'float'}), '(num - accuracy, num + accuracy, dtype=float)\n', (6063, 6108), True, 'import numpy as np\n'), ((17448, 17467), 'numpy.log10', 'np.log10', (['max_level'], {}), '(max_level)\n', (17456, 17467), True, 'import numpy as np\n'), ((17607, 17626), 'numpy.abs', 'np.abs', (['fft_complex'], {}), '(fft_complex)\n', (17613, 17626), True, 'import numpy as np\n'), ((18188, 18211), 'numpy.isnan', 'np.isnan', (['fitted_curve2'], {}), '(fitted_curve2)\n', (18196, 18211), True, 'import numpy as np\n'), ((19852, 19875), 'numpy.real', 'np.real', (['fft_complex[i]'], {}), '(fft_complex[i])\n', (19859, 19875), True, 'import numpy as np\n'), ((2020, 2032), 'numpy.abs', 'np.abs', (['data'], {}), '(data)\n', (2026, 2032), True, 'import numpy as np\n'), ((3806, 3848), 'pygame.font.Font', 'pygame.font.Font', (['linedata[3]', 'linedata[1]'], {}), '(linedata[3], linedata[1])\n', (3822, 3848), False, 'import pygame\n'), ((4377, 4422), 'pygame.font.SysFont', 'pygame.font.SysFont', (['linedata[3]', 'linedata[1]'], {}), '(linedata[3], linedata[1])\n', (4396, 4422), False, 'import pygame\n'), ((7136, 7186), 'pygame.event.Event', 'pygame.event.Event', (['(pygame.USEREVENT + 2)'], {'button': '(1)'}), '(pygame.USEREVENT + 2, button=1)\n', (7154, 7186), False, 'import pygame\n'), ((7257, 7307), 'pygame.event.Event', 'pygame.event.Event', (['(pygame.USEREVENT + 2)'], {'button': '(2)'}), '(pygame.USEREVENT + 2, button=2)\n', (7275, 7307), False, 'import pygame\n'), ((7378, 7428), 'pygame.event.Event', 'pygame.event.Event', (['(pygame.USEREVENT + 2)'], {'button': '(3)'}), '(pygame.USEREVENT + 2, button=3)\n', (7396, 7428), False, 'import pygame\n'), ((7499, 7549), 'pygame.event.Event', 'pygame.event.Event', (['(pygame.USEREVENT + 2)'], {'button': '(4)'}), '(pygame.USEREVENT + 2, button=4)\n', (7517, 7549), False, 'import pygame\n'), ((14613, 14629), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (14623, 14629), False, 'import time\n'), ((12586, 12602), 'time.sleep', 'time.sleep', (['(0.02)'], {}), '(0.02)\n', (12596, 12602), False, 'import time\n'), ((12791, 12807), 'time.sleep', 'time.sleep', (['(0.02)'], {}), '(0.02)\n', (12801, 12807), False, 'import time\n'), ((12999, 13015), 'time.sleep', 'time.sleep', (['(0.02)'], {}), '(0.02)\n', (13009, 13015), False, 'import time\n'), ((13162, 13178), 'time.sleep', 'time.sleep', (['(0.02)'], {}), '(0.02)\n', (13172, 13178), False, 'import time\n'), ((14995, 15011), 'time.sleep', 'time.sleep', (['(0.02)'], {}), '(0.02)\n', (15005, 15011), False, 'import time\n'), ((15139, 15154), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (15149, 15154), False, 'import time\n'), ((15257, 15273), 'time.sleep', 'time.sleep', (['(0.02)'], {}), '(0.02)\n', (15267, 15273), False, 'import time\n'), ((15386, 15402), 'time.sleep', 'time.sleep', (['(0.02)'], {}), '(0.02)\n', (15396, 15402), False, 'import time\n'), ((15534, 15549), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (15544, 15549), False, 'import time\n'), ((14298, 14314), 'time.sleep', 'time.sleep', (['(0.02)'], {}), '(0.02)\n', (14308, 14314), False, 'import time\n'), ((16207, 16223), 'time.sleep', 'time.sleep', (['(0.02)'], {}), '(0.02)\n', (16217, 16223), False, 'import time\n'), ((16385, 16401), 'time.sleep', 'time.sleep', (['(0.02)'], {}), '(0.02)\n', (16395, 16401), False, 'import time\n')] |
from imgaug import augmenters as iaa
import matplotlib.pyplot as plt
from itertools import cycle
from scipy import interp
import tensorflow as tf
import itertools
import numpy as np
import json
import argparse
import warnings
import os
from synth.utils import datagenerate
from sklearn.metrics import roc_curve, auc, accuracy_score
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
def rand_augment(images):
"""Random augmentation
Args:
images (4D array): data images
Returns:
[4D array] data after augmentation
"""
rand_aug = datagenerate()
images = tf.cast(images, tf.uint8)
return rand_aug(images=images.numpy())
def get_data(input_dir, fname):
"""Get data from npy file
Args:
input_dir (string): ịnput directory
fname (string): name of npy file
Returns:
[4D array] data
"""
f = os.path.join(input_dir, fname + ".npy")
return np.load(f)
def ensure_dir(directory):
"""Make sure the directory exists
Args:
directory (string): name of directory
Returns:
None
"""
if not os.path.exists(directory):
warnings.warn('''[WARNING]: Output directory not found.
The default output directory will be created.''')
os.makedirs(directory)
def get_target_names(json_label_decode):
"""Get encode of label
Args:
json_label_decode (string): path to json file
Returns:
[dict] encode of label
"""
with open(json_label_decode) as f:
label_decode = json.load(f)
return label_decode
def history_plot(FLAGS, n_class):
"""Plot traning history about: categorical_accuracy, loss and auc, accuracy
Args:
FLAGS (argument parser): input information
n_class (int): number of classes
Returns:
[None]
"""
with open(FLAGS.json_history) as f:
h = json.load(f)
history = dict()
if n_class == 2:
history['accuracy'] = list(h['binary_accuracy'].values())
history['val_accuracy'] = list(h['val_binary_accuracy'].values())
else:
history['accuracy'] = list(h['categorical_accuracy'].values())
history['val_accuracy'] = list(h['val_categorical_accuracy'].values())
history['loss'] = list(h['loss'].values())
history['val_loss'] = list(h['val_loss'].values())
history['auc'] = list(h['auc'].values())
history['val_auc'] = list(h['val_auc'].values())
x = np.arange(len(history['accuracy']))
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(8, 4))
fig.suptitle('History training of {}'.format(FLAGS.model_name))
#plot accuracy
ax[0].plot(x, history['accuracy'], label = 'accuracy')
ax[0].plot(x, history['val_accuracy'], label = 'val_accuracy')
ax[0].plot(x, history['auc'], label = 'auc')
ax[0].plot(x, history['val_auc'], label = 'val_auc')
ax[0].set_title("accuracy/auc")
ax[0].set_xlabel('epoch')
ax[0].set_ylabel('accuracy/auc')
ax[0].legend(shadow=True, fancybox=True, loc='lower right')
#plot loss
ax[1].plot(x, history['loss'], label = 'loss')
ax[1].plot(x, history['val_loss'], label = 'val_loss')
ax[1].set_title("loss")
ax[1].set_xlabel('epoch')
ax[1].set_ylabel('loss')
ax[1].legend(shadow=True, fancybox=True, loc='upper right')
plt.savefig(os.path.join(FLAGS.output_dir, 'history_of_{}.png'.format(FLAGS.model_name)))
plt.close()
def roc_plot(FLAGS, y_test, y_score, target_names):
"""Plot Receiver Operating Characteristic curve
Args:
FLAGS (argument parser): input information
y_test (2D array): true label of test data
y_score (2D) array: prediction label of test data
target_names (1D array): array of encode label
Returns:
[datagen]
"""
n_classes = y_test.shape[1]
lw = 2
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
# Plot all ROC curves
plt.figure()
# colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
for i in range(n_classes):
plt.plot(fpr[i], tpr[i], lw=lw,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(target_names[i], roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic - {}'.format(FLAGS.model_name))
plt.legend(loc="lower right")
plt.savefig(os.path.join(FLAGS.output_dir, 'roc_of_{}.png'.format(FLAGS.model_name)))
plt.close()
def statistics(FLAGS, y_test, target_names):
"""Statistics for the number of images per class after shuffling.
Args:
FLAGS (argument parser): input information
y_test (2D array): true label of test data
target_names (1D array): array of encode label
Returns:
[None]
"""
sta_test = np.sum(y_test, axis=0); temp = sta_test.copy()
#sum equal one
sta_test = sta_test/y_test.shape[0]
explode = np.ones(len(sta_test))*0.1
#label
target_names = [(name + ':' + str(int(item))) for name, item in zip(target_names, temp)]
#plot
plt.pie(sta_test, explode=explode, labels=target_names, shadow=True, startangle=45)
plt.axis('equal')
plt.legend(title='Statistic On Test Data')
plt.savefig(os.path.join(FLAGS.output_dir, 'label_statistics.png'))
plt.close()
def plot_confusion_matrix(FLAGS, cm, classes,
normalize=True,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
Args:
cm (2D array): confusion matrix
classes (1D list): list of class names
normalize (boolean): normalization or not, default true
Returns:
[None]
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1, keepdims = True)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.savefig(os.path.join(FLAGS.output_dir, 'confusion_matrix_of_{}.png'.format(FLAGS.model_name)))
plt.close()
def evaluate(FLAGS, y_test, y_score, n_class):
"""Evaluate the quality of the model
Args:
FLAGS (argument parser): input information
y_test (2D array): true label of test data
y_score (2D) array: prediction label of test data
n_class (int): number of classes
Returns:
[None]
"""
if os.path.exists(FLAGS.json_label_decode):
label_decode = get_target_names(FLAGS.json_label_decode)
target_names = []
for i in label_decode:
target_names.append(label_decode[i])
else: raise ValueError('[ERROR]: {} is not found'.format(FLAGS.json_label_decode))
#statistics
print("[INFOR]: Plot Statistics\n")
statistics(FLAGS, y_test, target_names)
#plot roc
print("[INFOR]: Plot Receiver Operating Characteristic\n")
roc_plot(FLAGS, y_test, y_score, target_names)
#plot history
print("[INFOR]: Plot History.\n")
if os.path.exists(FLAGS.json_history):
history_plot(FLAGS, n_class)
else: warnings.warn('''[WARNING]: {} is not found,
plot history is ignored'''.format(FLAGS.json_history))
#convert to 1D array
y_test = np.argmax(y_test, axis=1)
y_score = np.argmax(y_score, axis=1)
print("\n\n[INFOR]: Plot confusion matrix\n")
cm = confusion_matrix(y_test, y_score)
plot_confusion_matrix(FLAGS, cm, target_names, normalize=False)
print("\n\n[INFOR]: Report test data\n")
print(classification_report(y_test, y_score,
target_names=target_names))
print("\n\n[INFOR]: Report accuracy\n")
print(accuracy_score(y_test, y_score), '\n')
print("\n\n[INFOR]: See more result in {} folder\n".format(FLAGS.output_dir))
print()
| [
"matplotlib.pyplot.ylabel",
"sklearn.metrics.classification_report",
"sklearn.metrics.auc",
"sklearn.metrics.roc_curve",
"tensorflow.cast",
"synth.utils.datagenerate",
"matplotlib.pyplot.imshow",
"os.path.exists",
"scipy.interp",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotli... | [((623, 637), 'synth.utils.datagenerate', 'datagenerate', ([], {}), '()\n', (635, 637), False, 'from synth.utils import datagenerate\n'), ((651, 676), 'tensorflow.cast', 'tf.cast', (['images', 'tf.uint8'], {}), '(images, tf.uint8)\n', (658, 676), True, 'import tensorflow as tf\n'), ((943, 982), 'os.path.join', 'os.path.join', (['input_dir', "(fname + '.npy')"], {}), "(input_dir, fname + '.npy')\n", (955, 982), False, 'import os\n'), ((994, 1004), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (1001, 1004), True, 'import numpy as np\n'), ((2592, 2638), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(2)', 'figsize': '(8, 4)'}), '(nrows=1, ncols=2, figsize=(8, 4))\n', (2604, 2638), True, 'import matplotlib.pyplot as plt\n'), ((3502, 3513), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3511, 3513), True, 'import matplotlib.pyplot as plt\n'), ((4368, 4390), 'numpy.zeros_like', 'np.zeros_like', (['all_fpr'], {}), '(all_fpr)\n', (4381, 4390), True, 'import numpy as np\n'), ((4573, 4585), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4583, 4585), True, 'import matplotlib.pyplot as plt\n'), ((4849, 4887), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]', '"""k--"""'], {'lw': 'lw'}), "([0, 1], [0, 1], 'k--', lw=lw)\n", (4857, 4887), True, 'import matplotlib.pyplot as plt\n'), ((4892, 4912), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (4900, 4912), True, 'import matplotlib.pyplot as plt\n'), ((4917, 4938), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (4925, 4938), True, 'import matplotlib.pyplot as plt\n'), ((4943, 4976), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (4953, 4976), True, 'import matplotlib.pyplot as plt\n'), ((4981, 5013), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (4991, 5013), True, 'import matplotlib.pyplot as plt\n'), ((5099, 5128), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (5109, 5128), True, 'import matplotlib.pyplot as plt\n'), ((5223, 5234), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5232, 5234), True, 'import matplotlib.pyplot as plt\n'), ((5572, 5594), 'numpy.sum', 'np.sum', (['y_test'], {'axis': '(0)'}), '(y_test, axis=0)\n', (5578, 5594), True, 'import numpy as np\n'), ((5837, 5924), 'matplotlib.pyplot.pie', 'plt.pie', (['sta_test'], {'explode': 'explode', 'labels': 'target_names', 'shadow': '(True)', 'startangle': '(45)'}), '(sta_test, explode=explode, labels=target_names, shadow=True,\n startangle=45)\n', (5844, 5924), True, 'import matplotlib.pyplot as plt\n'), ((5925, 5942), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (5933, 5942), True, 'import matplotlib.pyplot as plt\n'), ((5947, 5989), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'title': '"""Statistic On Test Data"""'}), "(title='Statistic On Test Data')\n", (5957, 5989), True, 'import matplotlib.pyplot as plt\n'), ((6066, 6077), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6075, 6077), True, 'import matplotlib.pyplot as plt\n'), ((6683, 6733), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cm'], {'interpolation': '"""nearest"""', 'cmap': 'cmap'}), "(cm, interpolation='nearest', cmap=cmap)\n", (6693, 6733), True, 'import matplotlib.pyplot as plt\n'), ((6738, 6754), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (6747, 6754), True, 'import matplotlib.pyplot as plt\n'), ((6759, 6773), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (6771, 6773), True, 'import matplotlib.pyplot as plt\n'), ((6819, 6863), 'matplotlib.pyplot.xticks', 'plt.xticks', (['tick_marks', 'classes'], {'rotation': '(45)'}), '(tick_marks, classes, rotation=45)\n', (6829, 6863), True, 'import matplotlib.pyplot as plt\n'), ((6868, 6899), 'matplotlib.pyplot.yticks', 'plt.yticks', (['tick_marks', 'classes'], {}), '(tick_marks, classes)\n', (6878, 6899), True, 'import matplotlib.pyplot as plt\n'), ((7205, 7223), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7221, 7223), True, 'import matplotlib.pyplot as plt\n'), ((7228, 7252), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True label"""'], {}), "('True label')\n", (7238, 7252), True, 'import matplotlib.pyplot as plt\n'), ((7257, 7286), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted label"""'], {}), "('Predicted label')\n", (7267, 7286), True, 'import matplotlib.pyplot as plt\n'), ((7395, 7406), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7404, 7406), True, 'import matplotlib.pyplot as plt\n'), ((7753, 7792), 'os.path.exists', 'os.path.exists', (['FLAGS.json_label_decode'], {}), '(FLAGS.json_label_decode)\n', (7767, 7792), False, 'import os\n'), ((8347, 8381), 'os.path.exists', 'os.path.exists', (['FLAGS.json_history'], {}), '(FLAGS.json_history)\n', (8361, 8381), False, 'import os\n'), ((8578, 8603), 'numpy.argmax', 'np.argmax', (['y_test'], {'axis': '(1)'}), '(y_test, axis=1)\n', (8587, 8603), True, 'import numpy as np\n'), ((8618, 8644), 'numpy.argmax', 'np.argmax', (['y_score'], {'axis': '(1)'}), '(y_score, axis=1)\n', (8627, 8644), True, 'import numpy as np\n'), ((8705, 8738), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_score'], {}), '(y_test, y_score)\n', (8721, 8738), False, 'from sklearn.metrics import confusion_matrix\n'), ((1184, 1209), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (1198, 1209), False, 'import os\n'), ((1219, 1346), 'warnings.warn', 'warnings.warn', (['"""[WARNING]: Output directory not found.\n The default output directory will be created."""'], {}), '(\n """[WARNING]: Output directory not found.\n The default output directory will be created."""\n )\n', (1232, 1346), False, 'import warnings\n'), ((1345, 1367), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (1356, 1367), False, 'import os\n'), ((1619, 1631), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1628, 1631), False, 'import json\n'), ((1964, 1976), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1973, 1976), False, 'import json\n'), ((4095, 4133), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_test[:, i]', 'y_score[:, i]'], {}), '(y_test[:, i], y_score[:, i])\n', (4104, 4133), False, 'from sklearn.metrics import roc_curve, auc, accuracy_score\n'), ((4155, 4174), 'sklearn.metrics.auc', 'auc', (['fpr[i]', 'tpr[i]'], {}), '(fpr[i], tpr[i])\n', (4158, 4174), False, 'from sklearn.metrics import roc_curve, auc, accuracy_score\n'), ((4442, 4473), 'scipy.interp', 'interp', (['all_fpr', 'fpr[i]', 'tpr[i]'], {}), '(all_fpr, fpr[i], tpr[i])\n', (4448, 4473), False, 'from scipy import interp\n'), ((6006, 6060), 'os.path.join', 'os.path.join', (['FLAGS.output_dir', '"""label_statistics.png"""'], {}), "(FLAGS.output_dir, 'label_statistics.png')\n", (6018, 6060), False, 'import os\n'), ((8863, 8928), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'y_score'], {'target_names': 'target_names'}), '(y_test, y_score, target_names=target_names)\n', (8884, 8928), False, 'from sklearn.metrics import classification_report\n'), ((8994, 9025), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_score'], {}), '(y_test, y_score)\n', (9008, 9025), False, 'from sklearn.metrics import roc_curve, auc, accuracy_score\n')] |
from concurrent import futures
from functools import partial
from itertools import product
import os
import numpy as np
from pyx import color, deco, graph, path, text
def mandelbrot_iteration(niter, *args):
nx, ny, c = args[0]
z = np.zeros_like(c)
for n in range(niter):
z = z**2+c
return nx, ny, os.getpid(), np.abs(z) < 2
npts = 1024
xmin = -1.5
xmax = 0.5
width = npts
ymin = -1
ymax = 1
height = npts
niter = 2000
y, x = np.ogrid[ymin:ymax:height*1j, xmin:xmax:width*1j]
c = x+1j*y
nexponent = 3
n = 2**nexponent
nlen = npts//n
clist = []
for nx, ny in product(range(n), repeat=2):
clist.append((nx, ny, c[nx*nlen:(nx+1)*nlen, ny*nlen:(ny+1)*nlen]))
ex = futures.ProcessPoolExecutor(max_workers=4)
results = list(ex.map(partial(mandelbrot_iteration, niter), clist))
data = []
procdict = {}
for r in results:
nx, ny, procid, partialdata = r
for mx, my in product(range(nlen), repeat=2):
cval = c[nx*nlen+mx, ny*nlen+my]
data.append((cval.real, cval.imag, partialdata[mx, my]))
procdict[(nx, ny)] = procid
procids = set(procdict.values())
colors = [color.hsb(n/(len(procids)-1)*0.67, 1, 1) for n in range(len(procids))]
proccolors = dict(zip(procids, colors))
text.set(text.LatexRunner)
text.preamble(r'\usepackage{arev}\usepackage[T1]{fontenc}')
g = graph.graphxy(width=8, height=8,
x=graph.axis.lin(title=r'$\mathrm{Re}(c)$'),
y=graph.axis.lin(title=r'$\mathrm{Im}(c)$'))
g.plot(graph.data.points(data, x=1, y=2, color=3),
[graph.style.density(keygraph=None)])
dx = (xmax-xmin)/n
dy = (ymax-ymin)/n
for k, v in procdict.items():
nx, ny = k
tilecolor = proccolors[v]
xll, yll = g.pos(xmin+dx*nx, ymin+dy*ny)
xur, yur = g.pos(xmin+dx*(nx+1), ymin+dy*(ny+1))
g.fill(path.rect(xll, yll, xur-xll, yur-yll),
[deco.stroked([color.grey(0)]), tilecolor, color.transparency(0.5)])
g.writePDFfile()
# convert to PNG with Gimp to keep transparency
| [
"numpy.abs",
"pyx.graph.axis.lin",
"pyx.text.set",
"pyx.text.preamble",
"pyx.graph.data.points",
"pyx.color.grey",
"pyx.path.rect",
"pyx.graph.style.density",
"functools.partial",
"concurrent.futures.ProcessPoolExecutor",
"os.getpid",
"pyx.color.transparency",
"numpy.zeros_like"
] | [((691, 733), 'concurrent.futures.ProcessPoolExecutor', 'futures.ProcessPoolExecutor', ([], {'max_workers': '(4)'}), '(max_workers=4)\n', (718, 733), False, 'from concurrent import futures\n'), ((1224, 1250), 'pyx.text.set', 'text.set', (['text.LatexRunner'], {}), '(text.LatexRunner)\n', (1232, 1250), False, 'from pyx import color, deco, graph, path, text\n'), ((1251, 1311), 'pyx.text.preamble', 'text.preamble', (['"""\\\\usepackage{arev}\\\\usepackage[T1]{fontenc}"""'], {}), "('\\\\usepackage{arev}\\\\usepackage[T1]{fontenc}')\n", (1264, 1311), False, 'from pyx import color, deco, graph, path, text\n'), ((240, 256), 'numpy.zeros_like', 'np.zeros_like', (['c'], {}), '(c)\n', (253, 256), True, 'import numpy as np\n'), ((1461, 1503), 'pyx.graph.data.points', 'graph.data.points', (['data'], {'x': '(1)', 'y': '(2)', 'color': '(3)'}), '(data, x=1, y=2, color=3)\n', (1478, 1503), False, 'from pyx import color, deco, graph, path, text\n'), ((322, 333), 'os.getpid', 'os.getpid', ([], {}), '()\n', (331, 333), False, 'import os\n'), ((756, 792), 'functools.partial', 'partial', (['mandelbrot_iteration', 'niter'], {}), '(mandelbrot_iteration, niter)\n', (763, 792), False, 'from functools import partial\n'), ((1358, 1399), 'pyx.graph.axis.lin', 'graph.axis.lin', ([], {'title': '"""$\\\\mathrm{Re}(c)$"""'}), "(title='$\\\\mathrm{Re}(c)$')\n", (1372, 1399), False, 'from pyx import color, deco, graph, path, text\n'), ((1411, 1452), 'pyx.graph.axis.lin', 'graph.axis.lin', ([], {'title': '"""$\\\\mathrm{Im}(c)$"""'}), "(title='$\\\\mathrm{Im}(c)$')\n", (1425, 1452), False, 'from pyx import color, deco, graph, path, text\n'), ((1513, 1547), 'pyx.graph.style.density', 'graph.style.density', ([], {'keygraph': 'None'}), '(keygraph=None)\n', (1532, 1547), False, 'from pyx import color, deco, graph, path, text\n'), ((1773, 1814), 'pyx.path.rect', 'path.rect', (['xll', 'yll', '(xur - xll)', '(yur - yll)'], {}), '(xll, yll, xur - xll, yur - yll)\n', (1782, 1814), False, 'from pyx import color, deco, graph, path, text\n'), ((335, 344), 'numpy.abs', 'np.abs', (['z'], {}), '(z)\n', (341, 344), True, 'import numpy as np\n'), ((1866, 1889), 'pyx.color.transparency', 'color.transparency', (['(0.5)'], {}), '(0.5)\n', (1884, 1889), False, 'from pyx import color, deco, graph, path, text\n'), ((1838, 1851), 'pyx.color.grey', 'color.grey', (['(0)'], {}), '(0)\n', (1848, 1851), False, 'from pyx import color, deco, graph, path, text\n')] |
from MLlib.models import Agglomerative_clustering
import numpy as np
X = np.genfromtxt('datasets/agglomerative_clustering.txt')
model = Agglomerative_clustering()
model.work(X, 4)
model.plot(X)
| [
"numpy.genfromtxt",
"MLlib.models.Agglomerative_clustering"
] | [((74, 128), 'numpy.genfromtxt', 'np.genfromtxt', (['"""datasets/agglomerative_clustering.txt"""'], {}), "('datasets/agglomerative_clustering.txt')\n", (87, 128), True, 'import numpy as np\n'), ((139, 165), 'MLlib.models.Agglomerative_clustering', 'Agglomerative_clustering', ([], {}), '()\n', (163, 165), False, 'from MLlib.models import Agglomerative_clustering\n')] |
# convert the downscaled data archive
def run( x ):
''' simple wrapper to open and return a 2-D array from a geotiff '''
import rasterio
return rasterio.open(x).read(1)
def sort_files( files, split_on='_', elem_month=-2, elem_year=-1 ):
'''
sort a list of files properly using the month and year parsed
from the filename. This is useful with SNAP data since the standard
is to name files like '<prefix>_MM_YYYY.tif'. If sorted using base
Pythons sort/sorted functions, things will be sorted by the first char
of the month, which makes thing go 1, 11, ... which sucks for timeseries
this sorts it properly following SNAP standards as the default settings.
ARGUMENTS:
----------
files = [list] list of `str` pathnames to be sorted by month and year. usually from glob.glob.
split_on = [str] `str` character to split the filename on. default:'_', SNAP standard.
elem_month = [int] slice element from resultant split filename list. Follows Python slicing syntax.
default:-2. For SNAP standard.
elem_year = [int] slice element from resultant split filename list. Follows Python slicing syntax.
default:-1. For SNAP standard.
RETURNS:
--------
sorted `list` by month and year ascending.
'''
import pandas as pd
months = [ int(fn.split('.')[0].split( split_on )[elem_month]) for fn in files ]
years = [ int(fn.split('.')[0].split( split_on )[elem_year]) for fn in files ]
df = pd.DataFrame( {'fn':files, 'month':months, 'year':years} )
df_sorted = df.sort_values( ['year', 'month' ] )
return df_sorted.fn.tolist()
def only_years( files, begin=1901, end=2100, split_on='_', elem_year=-1 ):
'''
return new list of filenames where they are truncated to begin:end
ARGUMENTS:
----------
files = [list] list of `str` pathnames to be sorted by month and year. usually from glob.glob.
begin = [int] four digit integer year of the begin time default:1901
end = [int] four digit integer year of the end time default:2100
split_on = [str] `str` character to split the filename on. default:'_', SNAP standard.
elem_year = [int] slice element from resultant split filename list. Follows Python slicing syntax.
default:-1. For SNAP standard.
RETURNS:
--------
sliced `list` to begin and end year.
'''
import pandas as pd
years = [ int(fn.split('.')[0].split( split_on )[elem_year]) for fn in files ]
df = pd.DataFrame( { 'fn':files, 'year':years } )
df_slice = df[ (df.year >= begin ) & (df.year <= end ) ]
return df_slice.fn.tolist()
# seasonal calculations
def coordinates( fn=None, meta=None, numpy_array=None, input_crs=None, to_latlong=False ):
'''
take a raster file as input and return the centroid coords for each
of the grid cells as a pair of numpy 2d arrays (longitude, latitude)
'''
import rasterio
import numpy as np
from affine import Affine
from pyproj import Proj, transform
if fn:
# Read raster
with rasterio.open( fn ) as r:
T0 = r.affine # upper-left pixel corner affine transform
p1 = Proj( r.crs )
A = r.read( 1 ) # pixel values
elif (meta is not None) & (numpy_array is not None):
A = numpy_array
if input_crs != None:
p1 = Proj( input_crs )
T0 = meta[ 'affine' ]
else:
p1 = None
T0 = meta[ 'affine' ]
else:
BaseException( 'check inputs' )
# All rows and columns
cols, rows = np.meshgrid(np.arange(A.shape[1]), np.arange(A.shape[0]))
# Get affine transform for pixel centres
T1 = T0 * Affine.translation( 0.5, 0.5 )
# Function to convert pixel row/column index (from 0) to easting/northing at centre
rc2en = lambda r, c: ( c, r ) * T1
# All eastings and northings (there is probably a faster way to do this)
eastings, northings = np.vectorize(rc2en, otypes=[np.float, np.float])(rows, cols)
if to_latlong == False:
return eastings, northings
elif (to_latlong == True) & (input_crs != None):
# Project all longitudes, latitudes
longs, lats = transform(p1, p1.to_latlong(), eastings, northings)
return longs, lats
else:
BaseException( 'cant reproject to latlong without an input_crs' )
# def cf_attrs( scenario, model, contact='<NAME> - <EMAIL>', ):
# '''
# generate the cf_metadata convention attributes for the NC file
# CONVENTION SPEC HERE:
# http://cfconventions.org/cf-conventions/v1.6.0/cf-conventions.html
# '''
# {'institution': 'Scenarios Network for Alaska + Arctic Planning' ,
# 'institute_id': 'SNAP',
# 'experiment_id':scenario,
# 'source':model,
# 'model_id':model,
# 'forcing':,
# 'parent_experiment_id': ,
# 'parent_experiment_rip': ,
# 'branch_time': ,
# 'contact':contact,
# 'references': ,
# 'initialization_method': ,
# 'physics_version': ,
# 'tracking_id': ,
# 'acknowledgements': ,
# 'cesm_casename': ,
# 'cesm_repotag': ,
# 'cesm_compset': ,
# 'resolution': ,
# 'forcing_note': ,
# 'processed_by': ,
# 'processing_code_information': ,
# 'product': ,
# 'experiment': ,
# 'frequency': ,
# 'creation_date': ,
# 'history': ,
# 'Conventions':'CF-1.6' ,
# 'project_id': ,
# 'table_id': ,
# 'title': ,
# 'parent_experiment': ,
# 'modeling_realm': ,
# 'realization': ,
# 'cmor_version': }
def generate_nc( model, variable, scenario, base_path, output_base_path, begin, end ):
'''
main function to output a netcdf file from a group of
GeoTiff files of downscaled SNAP data.
[MORE DOCS TO COME]
'''
# from pathos.multiprocessing import Pool
from multiprocessing import Pool
import numpy as np
import pandas as pd
import os, glob, rasterio, time, itertools
import xarray as xr
print( 'working on: {} {} {}'.format( variable, model, scenario ) )
# set up pathing
input_path = os.path.join( base_path, model, scenario, variable )
output_path = os.path.join( output_base_path, model, scenario, variable )
try: # try:except to overcome some multiprocessing collision issues
if not os.path.exists( output_path ):
os.makedirs( output_path )
except:
pass
# list the data
l = sort_files( glob.glob( os.path.join( input_path, '*.tif' ) ) )
l = only_years( l, begin=begin, end=end )
# open a pool and turn the list of arrays into an ndarray
pool = Pool( ncpus )
arr = np.array( pool.map( run, l ) )
pool.close()
pool.join()
# mask it
arr = np.ma.masked_where( arr <= np.min( arr ), arr )
# [RECENT ADDITION] swap the axes so we are (lat, lon, time)
arr = np.swapaxes( np.swapaxes(arr, 0, 2), 0, 1)
# get the lons and lats for the NetCDF
lons, lats = coordinates( l[0] )
rst = rasterio.open( l[0] )
# THIS IS A TEST AREA FOR PRODUCING THE *_bnds variables -- NOT IMPLEMENTED
# # the res is standard in both directions.
# res = 2000.0
# half_res = 2000.0 / 2
# lon_bnds = [ [i-half_res,i+half_res ] for i in lons.ravel() ]
# # the lat_bnds variable appears to be the same as the above, but it is
# # forced to the extent of the map so the lat_bnds at the top and bottom are
# # different resolution (half) of the remainder of the rectilinear grid cells.
# # this needs to be taken into account in this calculation.
# # MAYBE JUST HOLD IT TO THE EXTENT FOR THESE LATITUDES?
# lat_bnds = [ [i-half_res,i+half_res ] for i in lats.ravel() ]
# lat_mins, lat_max = rst.bounds
# get some time and date stuff
t = time.time()
# OGC WKT for EPSG:3338 which is the CF standard.
crs_wkt = 'PROJCS["NAD83 / Alaska Albers",GEOGCS["NAD83",DATUM["North_American_Datum_1983",\
SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],TOWGS84[0,0,0,0,0,0,0],\
AUTHORITY["EPSG","6269"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],\
AUTHORITY["EPSG","4269"]],PROJECTION["Albers_Conic_Equal_Area"],PARAMETER["standard_parallel_1",55],\
PARAMETER["standard_parallel_2",65],PARAMETER["latitude_of_center",50],PARAMETER["longitude_of_center",-154],\
PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],\
AXIS["X",EAST],AXIS["Y",NORTH],AUTHORITY["EPSG","3338"]]'
# create the dataset in xarray
ds = xr.Dataset( { variable:(['x','y','time'], arr) },
coords={ 'lon': (['x', 'y'], lons),
'lat': (['x', 'y'], lats),
'time': pd.date_range( str(begin), str(end + 1), freq='M' ) },
attrs={ 'units':'Celcius', 'time_interval':'monthly',
'variable':variable, 'model':model, 'scenario':scenario,
'crs_wkt':crs_wkt,
'creation_date':time.ctime( t ), 'creation_date_UTC':t,
'created by':'<NAME> - <EMAIL>',
'nodata_value':'-3.39999995e+38',
'cell_resolution':'2000 meters' } )
# write it out to disk
encoding = { variable: { '_FillValue':-3.39999995e+38, 'zlib':True } }
output_filename = os.path.join( output_path, '_'.join([ variable, model, scenario, str( begin ), str( end ) ]) + '.nc' )
ds.to_netcdf( output_filename, mode='w', encoding=encoding )
ds.close() # close it
return output_filename
if __name__ == '__main__':
import os, glob
import argparse
# parse the commandline arguments
parser = argparse.ArgumentParser( description='downscale the AR5-CMIP5 data to the AKCAN extent required by SNAP' )
parser.add_argument( "-m", "--model", action='store', dest='model', type=str, help="cmip5 model name (exact)" )
parser.add_argument( "-v", "--variable", action='store', dest='variable', type=str, help="cmip5 variable name (exact)" )
parser.add_argument( "-s", "--scenario", action='store', dest='scenario', type=str, help="cmip5 scenario name (exact)" )
args = parser.parse_args()
# setup args
base_path = '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/downscaled_cmip5'
output_base_path = '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/downscaled_cmip5_netcdf'
units = 'C'
time_interval = 'monthly'
ncpus = 32
if args.scenario == 'historical':
begin = 1900
end = 2005
else:
begin = 2006
end = 2100
# main
_ = generate_nc( args.model, args.variable, args.scenario, base_path, output_base_path, begin, end )
| [
"os.path.exists",
"time.ctime",
"argparse.ArgumentParser",
"os.makedirs",
"rasterio.open",
"os.path.join",
"numpy.swapaxes",
"affine.Affine.translation",
"multiprocessing.Pool",
"pyproj.Proj",
"numpy.min",
"pandas.DataFrame",
"time.time",
"numpy.arange",
"numpy.vectorize"
] | [((1410, 1469), 'pandas.DataFrame', 'pd.DataFrame', (["{'fn': files, 'month': months, 'year': years}"], {}), "({'fn': files, 'month': months, 'year': years})\n", (1422, 1469), True, 'import pandas as pd\n'), ((2349, 2391), 'pandas.DataFrame', 'pd.DataFrame', (["{'fn': files, 'year': years}"], {}), "({'fn': files, 'year': years})\n", (2361, 2391), True, 'import pandas as pd\n'), ((5621, 5671), 'os.path.join', 'os.path.join', (['base_path', 'model', 'scenario', 'variable'], {}), '(base_path, model, scenario, variable)\n', (5633, 5671), False, 'import os, glob\n'), ((5689, 5746), 'os.path.join', 'os.path.join', (['output_base_path', 'model', 'scenario', 'variable'], {}), '(output_base_path, model, scenario, variable)\n', (5701, 5746), False, 'import os, glob\n'), ((6103, 6114), 'multiprocessing.Pool', 'Pool', (['ncpus'], {}), '(ncpus)\n', (6107, 6114), False, 'from multiprocessing import Pool\n'), ((6444, 6463), 'rasterio.open', 'rasterio.open', (['l[0]'], {}), '(l[0])\n', (6457, 6463), False, 'import os, glob, rasterio, time, itertools\n'), ((7189, 7200), 'time.time', 'time.time', ([], {}), '()\n', (7198, 7200), False, 'import os, glob, rasterio, time, itertools\n'), ((8972, 9081), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""downscale the AR5-CMIP5 data to the AKCAN extent required by SNAP"""'}), "(description=\n 'downscale the AR5-CMIP5 data to the AKCAN extent required by SNAP')\n", (8995, 9081), False, 'import argparse\n'), ((3308, 3329), 'numpy.arange', 'np.arange', (['A.shape[1]'], {}), '(A.shape[1])\n', (3317, 3329), True, 'import numpy as np\n'), ((3331, 3352), 'numpy.arange', 'np.arange', (['A.shape[0]'], {}), '(A.shape[0])\n', (3340, 3352), True, 'import numpy as np\n'), ((3407, 3435), 'affine.Affine.translation', 'Affine.translation', (['(0.5)', '(0.5)'], {}), '(0.5, 0.5)\n', (3425, 3435), False, 'from affine import Affine\n'), ((3656, 3704), 'numpy.vectorize', 'np.vectorize', (['rc2en'], {'otypes': '[np.float, np.float]'}), '(rc2en, otypes=[np.float, np.float])\n', (3668, 3704), True, 'import numpy as np\n'), ((6332, 6354), 'numpy.swapaxes', 'np.swapaxes', (['arr', '(0)', '(2)'], {}), '(arr, 0, 2)\n', (6343, 6354), True, 'import numpy as np\n'), ((149, 165), 'rasterio.open', 'rasterio.open', (['x'], {}), '(x)\n', (162, 165), False, 'import os, glob, rasterio, time, itertools\n'), ((2878, 2895), 'rasterio.open', 'rasterio.open', (['fn'], {}), '(fn)\n', (2891, 2895), False, 'import os, glob, rasterio, time, itertools\n'), ((2973, 2984), 'pyproj.Proj', 'Proj', (['r.crs'], {}), '(r.crs)\n', (2977, 2984), False, 'from pyproj import Proj, transform\n'), ((5828, 5855), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (5842, 5855), False, 'import os, glob\n'), ((5862, 5886), 'os.makedirs', 'os.makedirs', (['output_path'], {}), '(output_path)\n', (5873, 5886), False, 'import os, glob\n'), ((5951, 5984), 'os.path.join', 'os.path.join', (['input_path', '"""*.tif"""'], {}), "(input_path, '*.tif')\n", (5963, 5984), False, 'import os, glob\n'), ((6228, 6239), 'numpy.min', 'np.min', (['arr'], {}), '(arr)\n', (6234, 6239), True, 'import numpy as np\n'), ((3127, 3142), 'pyproj.Proj', 'Proj', (['input_crs'], {}), '(input_crs)\n', (3131, 3142), False, 'from pyproj import Proj, transform\n'), ((8373, 8386), 'time.ctime', 'time.ctime', (['t'], {}), '(t)\n', (8383, 8386), False, 'import os, glob, rasterio, time, itertools\n')] |
import numpy as np
class Node():
def __init__(self, params=[]):
self.in_nodes = params
self.value = 0
def forward(self):
return NotImplementedError
def backward(self):
return NotImplementedError
class Input_Node(Node):
def __init__(self, value = 0):
Node.__init__(self, value)
self.value = value
#def forward(self, value):
# this.value = value
class Linear(Node):
def __init__(self, inputs, weights, bias):
Node.__init__(self, [inputs, weights, bias])
def forward(self):
inputs = np.array([x.value for x in self.in_nodes[0]])
weights = np.array([w.value for w in self.in_nodes[1]])
bias = self.in_nodes[2].value
self.value = bias + np.sum(inputs*weights)
if __name__ == "__main__":
a, b = Input_Node(4), Input_Node(3)
w1, w2 = Input_Node(4), Input_Node(3)
bias = Input_Node(1)
inputs = [a, b]
weights = [w1, w2]
lin = Linear(inputs, weights, bias)
lin.forward()
print(lin.value)
print("End.") | [
"numpy.array",
"numpy.sum"
] | [((631, 676), 'numpy.array', 'np.array', (['[x.value for x in self.in_nodes[0]]'], {}), '([x.value for x in self.in_nodes[0]])\n', (639, 676), True, 'import numpy as np\n'), ((695, 740), 'numpy.array', 'np.array', (['[w.value for w in self.in_nodes[1]]'], {}), '([w.value for w in self.in_nodes[1]])\n', (703, 740), True, 'import numpy as np\n'), ((807, 831), 'numpy.sum', 'np.sum', (['(inputs * weights)'], {}), '(inputs * weights)\n', (813, 831), True, 'import numpy as np\n')] |
""" state, observation and action spaces """
from collections import namedtuple, OrderedDict
from io import BytesIO
from itertools import product
from os.path import join
import pkg_resources
import numpy as np
import pandas as pd
import energypy as ep
from energypy.common.spaces import DiscreteSpace, ContinuousSpace
# used in envs
PrimitiveConfig = namedtuple(
'primitive', ['name', 'low', 'high', 'type', 'data']
)
primitive_register = {
'discrete': DiscreteSpace,
'continuous': ContinuousSpace
}
class Space(OrderedDict):
def __init__(self, name):
super().__init__()
self.name = name
self._shape = None
def __repr__(self):
return('<{} space {}>'.format(self.name, self.shape))
@property
def shape(self):
return self._shape
@shape.getter
def shape(self):
return (len(self.keys()), )
@property
def low(self):
return self._low
@low.getter
def low(self):
return np.array([spc.low for spc in self.values()]).reshape(*self.shape)
@property
def high(self):
return self._high
@high.getter
def high(self):
return np.array([spc.high for spc in self.values()]).reshape(*self.shape)
def sample(self):
return np.array([spc.sample() for spc in self.values()]).reshape(1, *self.shape)
def contains(self, x):
return all(spc.contains(part) for (spc, part) in zip(self.values(), x[0]))
def from_primitives(self, *primitives):
for p in primitives:
self[p.name] = primitive_register[p.type](p.name, p.low, p.high, data=p.data)
self.num_samples = self.set_num_samples()
return self
def append(self, primitive):
p = primitive
self[p.name] = primitive_register[p.type](p.name, p.low, p.high, data=p.data)
self.num_samples = self.set_num_samples()
return self
def set_num_samples(self):
num_samples = []
for name, space in self.items():
if isinstance(space.data, str):
assert space.data == 'append'
else:
num_samples.append(np.array(space.data).shape[0])
if num_samples:
assert max(num_samples) == min(num_samples)
return max(num_samples)
else:
return None
class StateSpace(Space):
def __init__(self, name='state'):
super().__init__(name=name)
def __call__(self, steps, offset, append=None):
"""
steps = num steps through episode
start = offset for start of episode
end = offset for end of episode
append = {name: data}, data from env appended to state / obs
"""
data = []
for name, space in self.items():
if space.data == 'append':
# if isinstance(space.data, str):
assert space.data == 'append'
data.append(append[name])
elif space.data is not None:
data.append(space(steps, offset))
else:
raise ValueError
return np.array(data).reshape(1, *self.shape)
def sample_episode(
self,
how='full',
episode_length=None
):
if episode_length:
episode_length = min(episode_length, self.num_samples)
if how == 'full':
return 0, self.num_samples
elif how == 'random':
if self.num_samples == episode_length:
return 0, episode_length
else:
start = np.random.randint(
low=0, high=self.num_samples - episode_length
)
return start, start + episode_length
elif how == 'fixed':
return 0, episode_length
else:
raise ValueError
def from_dataset(self, dataset):
data = self.load_dataset(dataset)
for col in data.columns:
d = np.array(data.loc[:, col]).reshape(-1)
# TODO doing all as continuous spaces here!
self[col] = primitive_register['continuous'](
col, np.min(d), np.max(d), d)
self.num_samples = self.set_num_samples()
return self
def load_dataset(self, dataset):
""" load example dataset or load from user supplied path """
if dataset == 'example':
data = pkg_resources.resource_string(
'energypy',
'examples/{}.csv'.format(self.name)
)
return pd.read_csv(BytesIO(data), index_col=0, parse_dates=True)
else:
return pd.read_csv(join(dataset, self.name + '.csv'), index_col=0, parse_dates=True)
class ActionSpace(Space):
def __init__(self, name='action'):
super().__init__(name=name)
def discretize(self, num_discrete):
# get the discretized elements for each dim of global space
discrete = [spc.discretize(num_discrete) for spc in self.values()]
# get all combinations of each space (this is curse of dimensionality)
discrete = [comb for comb in product(*discrete)]
return np.array(discrete).reshape(-1, *self.shape)
class ObservationSpace():
def __init__(self):
raise NotImplementedError
| [
"collections.namedtuple",
"itertools.product",
"io.BytesIO",
"os.path.join",
"numpy.max",
"numpy.array",
"numpy.random.randint",
"numpy.min"
] | [((358, 422), 'collections.namedtuple', 'namedtuple', (['"""primitive"""', "['name', 'low', 'high', 'type', 'data']"], {}), "('primitive', ['name', 'low', 'high', 'type', 'data'])\n", (368, 422), False, 'from collections import namedtuple, OrderedDict\n'), ((3128, 3142), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (3136, 3142), True, 'import numpy as np\n'), ((4169, 4178), 'numpy.min', 'np.min', (['d'], {}), '(d)\n', (4175, 4178), True, 'import numpy as np\n'), ((4180, 4189), 'numpy.max', 'np.max', (['d'], {}), '(d)\n', (4186, 4189), True, 'import numpy as np\n'), ((4581, 4594), 'io.BytesIO', 'BytesIO', (['data'], {}), '(data)\n', (4588, 4594), False, 'from io import BytesIO\n'), ((4673, 4706), 'os.path.join', 'join', (['dataset', "(self.name + '.csv')"], {}), "(dataset, self.name + '.csv')\n", (4677, 4706), False, 'from os.path import join\n'), ((5145, 5163), 'itertools.product', 'product', (['*discrete'], {}), '(*discrete)\n', (5152, 5163), False, 'from itertools import product\n'), ((5181, 5199), 'numpy.array', 'np.array', (['discrete'], {}), '(discrete)\n', (5189, 5199), True, 'import numpy as np\n'), ((3599, 3663), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(self.num_samples - episode_length)'}), '(low=0, high=self.num_samples - episode_length)\n', (3616, 3663), True, 'import numpy as np\n'), ((3993, 4019), 'numpy.array', 'np.array', (['data.loc[:, col]'], {}), '(data.loc[:, col])\n', (4001, 4019), True, 'import numpy as np\n'), ((2156, 2176), 'numpy.array', 'np.array', (['space.data'], {}), '(space.data)\n', (2164, 2176), True, 'import numpy as np\n')] |
import numpy as np, matplotlib.pyplot as plt, seaborn as sns
from rdkit import Chem
from dataclasses import dataclass
from utils.exp import BaseArgs, BaseExpLog
from utils.data import remove_processed_data
import torch
from torch_geometric.data import DataLoader
from data.data_processors.ts_gen_processor import TSGenDataset
@dataclass
class TSGenArgs(BaseArgs):
# model params, default set to best MIT model params
h_nf: int = 256
gnn_depth: int = 3
n_layers: int = 2
# training params
num_workers: int = 2
loss: str = 'mse'
optimiser: str = 'adam'
lr: float = 1e-3
class TSGenExpLog(BaseExpLog):
def __init__(self, args):
super(TSGenExpLog, self).__init__(args)
def save_Ws(self, file_name, W_folder='experiments/meta_eval/ws/', save_to_log_dir = False):
"""Allows for saving weight matrices from testing model in folder and log file."""
assert self.check_test_batches(self.test_logs[-1].Ws), "You don't have the same number of batched W files as batches."
test_Ws = np.concatenate(self.test_logs[-1].Ws, 0).squeeze() # have to squeeze since extra singleton dim
assert len(test_Ws) == 842, f"Should have 842 test_D_inits when unbatched, you have {len(test_Ws)}."
np.save(W_folder + file_name, test_Ws)
if save_to_log_dir:
np.save(self.args.log_dir + 'W' + file_name, test_Ws)
### construct data
def construct_dataset_and_loaders(args):
if args.remove_existing_data:
remove_processed_data()
# build dataset
dataset = TSGenDataset(args.root_dir, args.n_rxns)
# build loaders using tt_split
n_rxns = len(dataset) # as args.n_rxns may be over the limit
args.n_rxns = n_rxns # set args.n_rxns to real max
n_train = int(np.floor(args.tt_split * n_rxns))
train_loader = DataLoader(dataset[: n_train], batch_size = args.batch_size, \
shuffle = True, num_workers=args.num_workers, pin_memory=True)
test_loader = DataLoader(dataset[n_train: ], batch_size = args.batch_size, \
shuffle = False, num_workers=args.num_workers, pin_memory=True)
return dataset, train_loader, test_loader
### recording d_inits
def all_same(items):
return all(x == items[0] for x in items)
def create_ds_dict(d_files, d_folder='d_inits/', mols_folder=r'data/raw/'):
# base_folder is where the test mol sdf files are
# all_test_res is dict of D_preds, TODO: add assert
# TODO: add way to automate loading multiple files ... pass in file names
# get test mols
test_ts_file = mols_folder + 'test_ts.sdf'
reactant_file = mols_folder + 'test_reactants.sdf'
product_file = mols_folder + 'test_products.sdf'
test_r = Chem.SDMolSupplier(reactant_file, removeHs=False, sanitize=False)
test_r = [x for x in test_r]
test_ts = Chem.SDMolSupplier(test_ts_file, removeHs=False, sanitize=False)
test_ts = [ts for ts in test_ts]
test_p = Chem.SDMolSupplier(product_file, removeHs=False, sanitize=False)
test_p = [x for x in test_p]
# save and load
mit_d_init = np.load(d_folder + 'mit_best.npy')
d_inits = []
for d_file in d_files:
d_inits.append(np.load(d_folder + d_file))
num_d_inits = len(d_inits)
# lists for plotting
gt, mit, lin_approx = [], [], []
d_init_lists = [[] for _ in range(num_d_inits)]
for idx in range(len(test_ts)):
# num_atoms + mask for reaction core
num_atoms = test_ts[idx].GetNumAtoms()
core_mask = (Chem.GetAdjacencyMatrix(test_p[idx]) + Chem.GetAdjacencyMatrix(test_r[idx])) == 1
# main 3
gt.append(np.ravel(Chem.Get3DDistanceMatrix(test_ts[idx]) * core_mask))
mit.append(np.ravel(mit_d_init[idx][0:num_atoms, 0:num_atoms] * core_mask))
lin_approx.append(np.ravel((Chem.Get3DDistanceMatrix(test_r[idx]) + Chem.Get3DDistanceMatrix(test_p[idx])) / 2 * core_mask))
# other d_inits
for j, d_init_list in enumerate(d_init_lists):
d_init_lists[j].append(np.ravel(d_inits[j][idx][0:num_atoms, 0:num_atoms]*core_mask))
# make plottable
all_ds = [gt, mit, lin_approx, *d_init_lists]
all_ds = [np.concatenate(ds).ravel() for ds in all_ds]
assert all_same([len(ds) for ds in all_ds]), "Lengths of all ds after concat don't match."
all_ds = [ds[ds != 0] for ds in all_ds] # only keep non-zero values
assert all_same([len(ds) for ds in all_ds]), "Lengths of all ds after removing zeroes don't match."
ds_dict = {'gt': (all_ds[0], 'Ground Truth'), 'mit': (all_ds[1], 'MIT D_init'), \
'lin_approx': (all_ds[2], 'Linear Approximation')}
base_ds_counter = len(ds_dict)
for d_id in range(len(d_init_lists)):
name = f'D_fin{d_id}'
ds_dict[name] = (all_ds[base_ds_counter + d_id], name)
return ds_dict
def plot_ds(ds_dict, col, no_print=[], save_fig_name=None):
# keys: 'gt', 'lin_approx', 'mit', f'D_fin{i}'
fig, ax = plt.subplots(figsize=(12,9))
num_to_plot = len(ds_dict)
cols = sns.color_palette("Set2", num_to_plot)
# print for all keys not in no_print
for i, key in enumerate(ds_dict.keys()):
if key in no_print:
continue
if key != 'gt' and key != 'mit' and key != 'lin_approx':
sns.distplot(ds_dict[key][0], color=col, kde_kws={"lw": 3, "label": ds_dict[key][1]}, hist=False)
continue
sns.distplot(ds_dict[key][0], color=cols[i], kde_kws={"lw": 3, "label": ds_dict[key][1]}, hist=False)
ax.legend(loc='upper right')
ax.legend(fontsize=12)
ax.set_ylabel('Density', fontsize=22)
ax.set_xlabel(r'Distance ($\AA$)', fontsize=22)
ax.tick_params(axis='both', which='major', labelsize=22)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(True)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(True)
if save_fig_name:
plt.savefig(f'{save_fig_name}.png', bbox_inches='tight')
NUM_STD_DS = 3
def ensemble_plot(ds_dict, ds_not_to_print, print_my_ds = False, sort = False, col = 'b', name = None):
num_my_ds = len(ds_dict) - NUM_STD_DS
# sort the lists so ensemble plots more resemble an average
d_init_lists = [[] for _ in range(num_my_ds)]
for j in range(num_my_ds):
if sort:
d_init_lists[j] = sorted(ds_dict[f'D_fin{j}'][0])
else:
d_init_lists[j] = ds_dict[f'D_fin{j}'][0]
ens_ds = []
for i in range(len(ds_dict['mit'][0])):
ens_d = 0
for j in range(num_my_ds):
ens_d += d_init_lists[j][i]
ens_d /= num_my_ds
ens_ds.append(ens_d)
ds_dict['ens'] = (ens_ds, "Avg Ensemble D_fin")
if not print_my_ds:
for j in range(0, num_my_ds):
ds_not_to_print.append(f'D_fin{j}')
plot_ds(ds_dict, col, ds_not_to_print, name)
"""
def ensemble_plot(ds_dict, ds_not_to_print, print_my_ds = False):
num_my_ds = len(ds_dict) - NUM_STD_DS
ens_ds = []
for i in range(len(ds_dict['mit'][0])):
ens_d = 0
for j in range(0, num_my_ds):
ens_d += ds_dict[f'D_init{j}'][0][i]
ens_d /= num_my_ds
ens_ds.append(ens_d)
ds_dict['ens'] = (ens_ds, "Avg Ensemble D_init")
if not print_my_ds:
for j in range(0, num_my_ds):
ds_not_to_print.append(f'D_init{j}')
plot_ds(ds_dict, ds_not_to_print, None)
""" | [
"rdkit.Chem.Get3DDistanceMatrix",
"data.data_processors.ts_gen_processor.TSGenDataset",
"matplotlib.pyplot.savefig",
"seaborn.color_palette",
"torch_geometric.data.DataLoader",
"seaborn.distplot",
"numpy.floor",
"utils.data.remove_processed_data",
"rdkit.Chem.SDMolSupplier",
"numpy.concatenate",
... | [((1568, 1608), 'data.data_processors.ts_gen_processor.TSGenDataset', 'TSGenDataset', (['args.root_dir', 'args.n_rxns'], {}), '(args.root_dir, args.n_rxns)\n', (1580, 1608), False, 'from data.data_processors.ts_gen_processor import TSGenDataset\n'), ((1840, 1962), 'torch_geometric.data.DataLoader', 'DataLoader', (['dataset[:n_train]'], {'batch_size': 'args.batch_size', 'shuffle': '(True)', 'num_workers': 'args.num_workers', 'pin_memory': '(True)'}), '(dataset[:n_train], batch_size=args.batch_size, shuffle=True,\n num_workers=args.num_workers, pin_memory=True)\n', (1850, 1962), False, 'from torch_geometric.data import DataLoader\n'), ((1992, 2115), 'torch_geometric.data.DataLoader', 'DataLoader', (['dataset[n_train:]'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': 'args.num_workers', 'pin_memory': '(True)'}), '(dataset[n_train:], batch_size=args.batch_size, shuffle=False,\n num_workers=args.num_workers, pin_memory=True)\n', (2002, 2115), False, 'from torch_geometric.data import DataLoader\n'), ((2722, 2787), 'rdkit.Chem.SDMolSupplier', 'Chem.SDMolSupplier', (['reactant_file'], {'removeHs': '(False)', 'sanitize': '(False)'}), '(reactant_file, removeHs=False, sanitize=False)\n', (2740, 2787), False, 'from rdkit import Chem\n'), ((2835, 2899), 'rdkit.Chem.SDMolSupplier', 'Chem.SDMolSupplier', (['test_ts_file'], {'removeHs': '(False)', 'sanitize': '(False)'}), '(test_ts_file, removeHs=False, sanitize=False)\n', (2853, 2899), False, 'from rdkit import Chem\n'), ((2950, 3014), 'rdkit.Chem.SDMolSupplier', 'Chem.SDMolSupplier', (['product_file'], {'removeHs': '(False)', 'sanitize': '(False)'}), '(product_file, removeHs=False, sanitize=False)\n', (2968, 3014), False, 'from rdkit import Chem\n'), ((3087, 3121), 'numpy.load', 'np.load', (["(d_folder + 'mit_best.npy')"], {}), "(d_folder + 'mit_best.npy')\n", (3094, 3121), True, 'import numpy as np, matplotlib.pyplot as plt, seaborn as sns\n'), ((4979, 5008), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 9)'}), '(figsize=(12, 9))\n', (4991, 5008), True, 'import numpy as np, matplotlib.pyplot as plt, seaborn as sns\n'), ((5050, 5088), 'seaborn.color_palette', 'sns.color_palette', (['"""Set2"""', 'num_to_plot'], {}), "('Set2', num_to_plot)\n", (5067, 5088), True, 'import numpy as np, matplotlib.pyplot as plt, seaborn as sns\n'), ((1266, 1304), 'numpy.save', 'np.save', (['(W_folder + file_name)', 'test_Ws'], {}), '(W_folder + file_name, test_Ws)\n', (1273, 1304), True, 'import numpy as np, matplotlib.pyplot as plt, seaborn as sns\n'), ((1505, 1528), 'utils.data.remove_processed_data', 'remove_processed_data', ([], {}), '()\n', (1526, 1528), False, 'from utils.data import remove_processed_data\n'), ((1787, 1819), 'numpy.floor', 'np.floor', (['(args.tt_split * n_rxns)'], {}), '(args.tt_split * n_rxns)\n', (1795, 1819), True, 'import numpy as np, matplotlib.pyplot as plt, seaborn as sns\n'), ((5429, 5534), 'seaborn.distplot', 'sns.distplot', (['ds_dict[key][0]'], {'color': 'cols[i]', 'kde_kws': "{'lw': 3, 'label': ds_dict[key][1]}", 'hist': '(False)'}), "(ds_dict[key][0], color=cols[i], kde_kws={'lw': 3, 'label':\n ds_dict[key][1]}, hist=False)\n", (5441, 5534), True, 'import numpy as np, matplotlib.pyplot as plt, seaborn as sns\n'), ((5955, 6011), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{save_fig_name}.png"""'], {'bbox_inches': '"""tight"""'}), "(f'{save_fig_name}.png', bbox_inches='tight')\n", (5966, 6011), True, 'import numpy as np, matplotlib.pyplot as plt, seaborn as sns\n'), ((1345, 1398), 'numpy.save', 'np.save', (["(self.args.log_dir + 'W' + file_name)", 'test_Ws'], {}), "(self.args.log_dir + 'W' + file_name, test_Ws)\n", (1352, 1398), True, 'import numpy as np, matplotlib.pyplot as plt, seaborn as sns\n'), ((3189, 3215), 'numpy.load', 'np.load', (['(d_folder + d_file)'], {}), '(d_folder + d_file)\n', (3196, 3215), True, 'import numpy as np, matplotlib.pyplot as plt, seaborn as sns\n'), ((3717, 3780), 'numpy.ravel', 'np.ravel', (['(mit_d_init[idx][0:num_atoms, 0:num_atoms] * core_mask)'], {}), '(mit_d_init[idx][0:num_atoms, 0:num_atoms] * core_mask)\n', (3725, 3780), True, 'import numpy as np, matplotlib.pyplot as plt, seaborn as sns\n'), ((5302, 5404), 'seaborn.distplot', 'sns.distplot', (['ds_dict[key][0]'], {'color': 'col', 'kde_kws': "{'lw': 3, 'label': ds_dict[key][1]}", 'hist': '(False)'}), "(ds_dict[key][0], color=col, kde_kws={'lw': 3, 'label': ds_dict\n [key][1]}, hist=False)\n", (5314, 5404), True, 'import numpy as np, matplotlib.pyplot as plt, seaborn as sns\n'), ((1054, 1094), 'numpy.concatenate', 'np.concatenate', (['self.test_logs[-1].Ws', '(0)'], {}), '(self.test_logs[-1].Ws, 0)\n', (1068, 1094), True, 'import numpy as np, matplotlib.pyplot as plt, seaborn as sns\n'), ((3518, 3554), 'rdkit.Chem.GetAdjacencyMatrix', 'Chem.GetAdjacencyMatrix', (['test_p[idx]'], {}), '(test_p[idx])\n', (3541, 3554), False, 'from rdkit import Chem\n'), ((3557, 3593), 'rdkit.Chem.GetAdjacencyMatrix', 'Chem.GetAdjacencyMatrix', (['test_r[idx]'], {}), '(test_r[idx])\n', (3580, 3593), False, 'from rdkit import Chem\n'), ((4030, 4093), 'numpy.ravel', 'np.ravel', (['(d_inits[j][idx][0:num_atoms, 0:num_atoms] * core_mask)'], {}), '(d_inits[j][idx][0:num_atoms, 0:num_atoms] * core_mask)\n', (4038, 4093), True, 'import numpy as np, matplotlib.pyplot as plt, seaborn as sns\n'), ((4183, 4201), 'numpy.concatenate', 'np.concatenate', (['ds'], {}), '(ds)\n', (4197, 4201), True, 'import numpy as np, matplotlib.pyplot as plt, seaborn as sns\n'), ((3645, 3683), 'rdkit.Chem.Get3DDistanceMatrix', 'Chem.Get3DDistanceMatrix', (['test_ts[idx]'], {}), '(test_ts[idx])\n', (3669, 3683), False, 'from rdkit import Chem\n'), ((3818, 3855), 'rdkit.Chem.Get3DDistanceMatrix', 'Chem.Get3DDistanceMatrix', (['test_r[idx]'], {}), '(test_r[idx])\n', (3842, 3855), False, 'from rdkit import Chem\n'), ((3858, 3895), 'rdkit.Chem.Get3DDistanceMatrix', 'Chem.Get3DDistanceMatrix', (['test_p[idx]'], {}), '(test_p[idx])\n', (3882, 3895), False, 'from rdkit import Chem\n')] |
#!/usr/bin/env python
"""Script used to generate a cuboid dataset with cubes and rectangles under
various shapes, rotations, translations following the general format of
ShapeNet.
"""
import argparse
import random
import os
from string import ascii_letters, digits
import sys
import numpy as np
from progress.bar import Bar
from pyquaternion import Quaternion
from shapes import Shape, Cuboid, Ellipsoid
from learnable_primitives.mesh import MeshFromOBJ
def get_single_cube(minimum, maximum):
minimum = minimum[0]
maximum = maximum[0]
r = minimum + np.random.rand() * (maximum-minimum)
return Cuboid(-r, r, -r, r, -r, r)
def get_single_rectangle(minimum, maximum):
minimum = np.array(minimum)
maximum = np.array(maximum)
rs = minimum + np.random.rand(3) * (maximum - minimum)
return Cuboid(-rs[0], rs[0], -rs[1], rs[1], -rs[2], rs[2])
def adjacent_cubes(R):
x_max1, y_max1, z_max1 = tuple(np.random.rand(3))
x_max2, y_max2, z_max2 = tuple(np.random.rand(3))
c1 = Cuboid(-x_max1, x_max1, -y_max1, y_max1, -z_max1, z_max1)
c2 = Cuboid(-x_max2, x_max2, -y_max2, y_max2, -z_max2, z_max2)
t1 = np.array([
[0.0, y_max2 + y_max1, 0.0],
[x_max2 + x_max1, 0.0, 0.0],
[0.0, 0.0, z_max2 + z_max1]
])
t = t1[np.random.choice(np.arange(3))].reshape(3, 1)
c2.translate(t)
c1.rotate(R)
c2.rotate(R)
return c1, c2
def multiple_cubes(R1, R2, t):
x_max1, y_max1, z_max1 = tuple(np.random.rand(3))
x_max2, y_max2, z_max2 = tuple(np.random.rand(3))
c1 = Cuboid(-x_max1, x_max1, -y_max1, y_max1, -z_max1, z_max1)
c2 = Cuboid(-x_max2, x_max2, -y_max2, y_max2, -z_max2, z_max2)
c1.rotate(R1)
c2.translate(t)
c2.rotate(R2)
#c2.translate(R2.dot(t))
return c1, c2
def main(argv):
parser = argparse.ArgumentParser(
description="Generate a cuboid dataset"
)
parser.add_argument(
"output_directory",
help="Save the dataset in this directory"
)
parser.add_argument(
"--n_samples",
type=int,
default=10,
help="Number of training samples to be generated"
)
parser.add_argument(
"--shapes_type",
default="cubes",
choices=[
"cubes",
"cubes_translated",
"cubes_rotated_translated",
"cubes_rotated",
"rectangles",
"rectangles_translated",
"rectangles_rotated",
"rectangles_rotated_translated",
"ellipsoid",
"random"
],
help="The type of the shapes in every sample"
)
parser.add_argument(
"--n_shapes_per_samples",
type=int,
default=1,
help="Number of shapes per sample"
)
parser.add_argument(
"--maximum",
type=lambda x: tuple(map(float, x.split(","))),
default="0.5,0.5,0.5",
help="Maximum size along every axis"
)
parser.add_argument(
"--minimum",
type=lambda x: tuple(map(float, x.split(","))),
default="0.13,0.13,0.13",
help="Maximum size along every axis"
)
args = parser.parse_args(argv)
# Check if output directory exists and if it doesn't create it
if not os.path.exists(args.output_directory):
os.makedirs(args.output_directory)
# Create a directory based on the type of the shapes inside the output
# directory
output_directory = os.path.join(
args.output_directory,
args.shapes_type
)
ranges = None
if "cubes" in args.shapes_type:
# Make sure that the maximum and minimum range are equal along each
# axis
assert args.maximum[0] == args.maximum[1]
assert args.maximum[1] == args.maximum[2]
assert args.minimum[0] == args.minimum[1]
assert args.minimum[1] == args.minimum[2]
ranges = np.linspace(
args.minimum[0],
args.maximum[0],
10,
endpoint=False
)
# elif "rectangles" in args.shapes_type:
else:
ranges = [
np.linspace(args.minimum[0], args.maximum[0], 10, endpoint=False),
np.linspace(args.minimum[1], args.maximum[1], 10, endpoint=False),
np.linspace(args.minimum[2], args.maximum[2], 10, endpoint=False),
]
bar = Bar("Generating %d cuboids" % (args.n_samples,), max=args.n_samples)
c = None
for i in range(args.n_samples):
if "cubes" in args.shapes_type:
c = get_single_cube(args.minimum, args.maximum)
if "rectangles" in args.shapes_type:
c = get_single_rectangle(args.minimum, args.maximum)
if "translated" in args.shapes_type:
t = 0.3*np.random.random((3, 1))
c.translate(t)
if "rotated" in args.shapes_type:
q = Quaternion.random()
R = q.rotation_matrix
c.rotate(R)
if "ellipsoid" in args.shapes_type:
abc = np.random.random((3, 1))
c1 = Ellipsoid(abc[0], abc[1], abc[2])
c2 = Ellipsoid(abc[0], abc[1], abc[2])
c3 = Ellipsoid(abc[0], abc[1], abc[2])
q = Quaternion.random()
R = q.rotation_matrix
c2.rotate(R)
q = Quaternion.random()
R = q.rotation_matrix
c3.rotate(R)
# t = 0.3*np.random.random((3, 1))
# c1.translate(t)
c = Shape.from_shapes([c1, c2, c3])
if "random" in args.shapes_type:
#if random.choice((True, False)):
#if True:
# q = Quaternion.random()
# c1, c2 = adjacent_cubes(q.rotation_matrix)
#else:
if True:
q1 = Quaternion.random()
q2 = Quaternion.random()
c1, c2 = multiple_cubes(
q1.rotation_matrix,
q2.rotation_matrix,
3.5*np.random.random((3, 1))
)
# q = Quaternion.random()
# c1, c2 = adjacent_cubes(q.rotation_matrix)
# q1 = Quaternion.random()
# x_max1, y_max1, z_max1 = tuple(np.random.rand(3))
# c3 = Cuboid(-x_max1, x_max1, -y_max1, y_max1, -z_max1, z_max1)
# c3.rotate(q1.rotation_matrix)
# c3.translate(np.random.random((3,1)).reshape(3, -1))
c = Shape.from_shapes([c1, c2])
# Create subdirectory to save the sample
folder_name = ''.join([
random.choice(ascii_letters + digits) for n in xrange(32)
])
base_dir = os.path.join(output_directory, folder_name, "models")
if not os.path.exists(base_dir):
os.makedirs(base_dir)
# print base_dir
# Save as obj file
c.save_as_mesh(os.path.join(base_dir, "model_normalized.obj"), "obj")
c.save_as_mesh(os.path.join(base_dir, "model_normalized.ply"), "ply")
c.save_as_pointcloud(
os.path.join(base_dir, "model_normalized_pcl.obj"), "obj"
)
if "translated" in args.shapes_type:
print( os.path.join(base_dir, "model_normalized_pcl.obj"), t.T)
if "rotated" in args.shapes_type:
print (os.path.join(base_dir, "model_normalized_pcl.obj"), q)
bar.next()
for i in os.listdir(output_directory):
x = os.path.join(output_directory, i, "models/model_normalized.obj")
m = MeshFromOBJ(x)
print (x, m.points.max(-1))
if __name__ == "__main__":
main(sys.argv[1:])
| [
"os.path.exists",
"os.listdir",
"pyquaternion.Quaternion.random",
"numpy.random.rand",
"argparse.ArgumentParser",
"os.makedirs",
"numpy.random.random",
"random.choice",
"os.path.join",
"learnable_primitives.mesh.MeshFromOBJ",
"shapes.Shape.from_shapes",
"numpy.array",
"numpy.linspace",
"sh... | [((615, 642), 'shapes.Cuboid', 'Cuboid', (['(-r)', 'r', '(-r)', 'r', '(-r)', 'r'], {}), '(-r, r, -r, r, -r, r)\n', (621, 642), False, 'from shapes import Shape, Cuboid, Ellipsoid\n'), ((703, 720), 'numpy.array', 'np.array', (['minimum'], {}), '(minimum)\n', (711, 720), True, 'import numpy as np\n'), ((735, 752), 'numpy.array', 'np.array', (['maximum'], {}), '(maximum)\n', (743, 752), True, 'import numpy as np\n'), ((824, 875), 'shapes.Cuboid', 'Cuboid', (['(-rs[0])', 'rs[0]', '(-rs[1])', 'rs[1]', '(-rs[2])', 'rs[2]'], {}), '(-rs[0], rs[0], -rs[1], rs[1], -rs[2], rs[2])\n', (830, 875), False, 'from shapes import Shape, Cuboid, Ellipsoid\n'), ((1018, 1075), 'shapes.Cuboid', 'Cuboid', (['(-x_max1)', 'x_max1', '(-y_max1)', 'y_max1', '(-z_max1)', 'z_max1'], {}), '(-x_max1, x_max1, -y_max1, y_max1, -z_max1, z_max1)\n', (1024, 1075), False, 'from shapes import Shape, Cuboid, Ellipsoid\n'), ((1085, 1142), 'shapes.Cuboid', 'Cuboid', (['(-x_max2)', 'x_max2', '(-y_max2)', 'y_max2', '(-z_max2)', 'z_max2'], {}), '(-x_max2, x_max2, -y_max2, y_max2, -z_max2, z_max2)\n', (1091, 1142), False, 'from shapes import Shape, Cuboid, Ellipsoid\n'), ((1152, 1254), 'numpy.array', 'np.array', (['[[0.0, y_max2 + y_max1, 0.0], [x_max2 + x_max1, 0.0, 0.0], [0.0, 0.0, \n z_max2 + z_max1]]'], {}), '([[0.0, y_max2 + y_max1, 0.0], [x_max2 + x_max1, 0.0, 0.0], [0.0, \n 0.0, z_max2 + z_max1]])\n', (1160, 1254), True, 'import numpy as np\n'), ((1559, 1616), 'shapes.Cuboid', 'Cuboid', (['(-x_max1)', 'x_max1', '(-y_max1)', 'y_max1', '(-z_max1)', 'z_max1'], {}), '(-x_max1, x_max1, -y_max1, y_max1, -z_max1, z_max1)\n', (1565, 1616), False, 'from shapes import Shape, Cuboid, Ellipsoid\n'), ((1626, 1683), 'shapes.Cuboid', 'Cuboid', (['(-x_max2)', 'x_max2', '(-y_max2)', 'y_max2', '(-z_max2)', 'z_max2'], {}), '(-x_max2, x_max2, -y_max2, y_max2, -z_max2, z_max2)\n', (1632, 1683), False, 'from shapes import Shape, Cuboid, Ellipsoid\n'), ((1818, 1882), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generate a cuboid dataset"""'}), "(description='Generate a cuboid dataset')\n", (1841, 1882), False, 'import argparse\n'), ((3457, 3510), 'os.path.join', 'os.path.join', (['args.output_directory', 'args.shapes_type'], {}), '(args.output_directory, args.shapes_type)\n', (3469, 3510), False, 'import os\n'), ((4353, 4421), 'progress.bar.Bar', 'Bar', (["('Generating %d cuboids' % (args.n_samples,))"], {'max': 'args.n_samples'}), "('Generating %d cuboids' % (args.n_samples,), max=args.n_samples)\n", (4356, 4421), False, 'from progress.bar import Bar\n'), ((7340, 7368), 'os.listdir', 'os.listdir', (['output_directory'], {}), '(output_directory)\n', (7350, 7368), False, 'import os\n'), ((936, 953), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (950, 953), True, 'import numpy as np\n'), ((990, 1007), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (1004, 1007), True, 'import numpy as np\n'), ((1477, 1494), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (1491, 1494), True, 'import numpy as np\n'), ((1531, 1548), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (1545, 1548), True, 'import numpy as np\n'), ((3260, 3297), 'os.path.exists', 'os.path.exists', (['args.output_directory'], {}), '(args.output_directory)\n', (3274, 3297), False, 'import os\n'), ((3307, 3341), 'os.makedirs', 'os.makedirs', (['args.output_directory'], {}), '(args.output_directory)\n', (3318, 3341), False, 'import os\n'), ((3896, 3961), 'numpy.linspace', 'np.linspace', (['args.minimum[0]', 'args.maximum[0]', '(10)'], {'endpoint': '(False)'}), '(args.minimum[0], args.maximum[0], 10, endpoint=False)\n', (3907, 3961), True, 'import numpy as np\n'), ((6623, 6676), 'os.path.join', 'os.path.join', (['output_directory', 'folder_name', '"""models"""'], {}), "(output_directory, folder_name, 'models')\n", (6635, 6676), False, 'import os\n'), ((7382, 7446), 'os.path.join', 'os.path.join', (['output_directory', 'i', '"""models/model_normalized.obj"""'], {}), "(output_directory, i, 'models/model_normalized.obj')\n", (7394, 7446), False, 'import os\n'), ((7459, 7473), 'learnable_primitives.mesh.MeshFromOBJ', 'MeshFromOBJ', (['x'], {}), '(x)\n', (7470, 7473), False, 'from learnable_primitives.mesh import MeshFromOBJ\n'), ((566, 582), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (580, 582), True, 'import numpy as np\n'), ((772, 789), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (786, 789), True, 'import numpy as np\n'), ((4107, 4172), 'numpy.linspace', 'np.linspace', (['args.minimum[0]', 'args.maximum[0]', '(10)'], {'endpoint': '(False)'}), '(args.minimum[0], args.maximum[0], 10, endpoint=False)\n', (4118, 4172), True, 'import numpy as np\n'), ((4186, 4251), 'numpy.linspace', 'np.linspace', (['args.minimum[1]', 'args.maximum[1]', '(10)'], {'endpoint': '(False)'}), '(args.minimum[1], args.maximum[1], 10, endpoint=False)\n', (4197, 4251), True, 'import numpy as np\n'), ((4265, 4330), 'numpy.linspace', 'np.linspace', (['args.minimum[2]', 'args.maximum[2]', '(10)'], {'endpoint': '(False)'}), '(args.minimum[2], args.maximum[2], 10, endpoint=False)\n', (4276, 4330), True, 'import numpy as np\n'), ((4858, 4877), 'pyquaternion.Quaternion.random', 'Quaternion.random', ([], {}), '()\n', (4875, 4877), False, 'from pyquaternion import Quaternion\n'), ((4999, 5023), 'numpy.random.random', 'np.random.random', (['(3, 1)'], {}), '((3, 1))\n', (5015, 5023), True, 'import numpy as np\n'), ((5041, 5074), 'shapes.Ellipsoid', 'Ellipsoid', (['abc[0]', 'abc[1]', 'abc[2]'], {}), '(abc[0], abc[1], abc[2])\n', (5050, 5074), False, 'from shapes import Shape, Cuboid, Ellipsoid\n'), ((5092, 5125), 'shapes.Ellipsoid', 'Ellipsoid', (['abc[0]', 'abc[1]', 'abc[2]'], {}), '(abc[0], abc[1], abc[2])\n', (5101, 5125), False, 'from shapes import Shape, Cuboid, Ellipsoid\n'), ((5143, 5176), 'shapes.Ellipsoid', 'Ellipsoid', (['abc[0]', 'abc[1]', 'abc[2]'], {}), '(abc[0], abc[1], abc[2])\n', (5152, 5176), False, 'from shapes import Shape, Cuboid, Ellipsoid\n'), ((5193, 5212), 'pyquaternion.Quaternion.random', 'Quaternion.random', ([], {}), '()\n', (5210, 5212), False, 'from pyquaternion import Quaternion\n'), ((5288, 5307), 'pyquaternion.Quaternion.random', 'Quaternion.random', ([], {}), '()\n', (5305, 5307), False, 'from pyquaternion import Quaternion\n'), ((5460, 5491), 'shapes.Shape.from_shapes', 'Shape.from_shapes', (['[c1, c2, c3]'], {}), '([c1, c2, c3])\n', (5477, 5491), False, 'from shapes import Shape, Cuboid, Ellipsoid\n'), ((6413, 6440), 'shapes.Shape.from_shapes', 'Shape.from_shapes', (['[c1, c2]'], {}), '([c1, c2])\n', (6430, 6440), False, 'from shapes import Shape, Cuboid, Ellipsoid\n'), ((6692, 6716), 'os.path.exists', 'os.path.exists', (['base_dir'], {}), '(base_dir)\n', (6706, 6716), False, 'import os\n'), ((6730, 6751), 'os.makedirs', 'os.makedirs', (['base_dir'], {}), '(base_dir)\n', (6741, 6751), False, 'import os\n'), ((6827, 6873), 'os.path.join', 'os.path.join', (['base_dir', '"""model_normalized.obj"""'], {}), "(base_dir, 'model_normalized.obj')\n", (6839, 6873), False, 'import os\n'), ((6905, 6951), 'os.path.join', 'os.path.join', (['base_dir', '"""model_normalized.ply"""'], {}), "(base_dir, 'model_normalized.ply')\n", (6917, 6951), False, 'import os\n'), ((7002, 7052), 'os.path.join', 'os.path.join', (['base_dir', '"""model_normalized_pcl.obj"""'], {}), "(base_dir, 'model_normalized_pcl.obj')\n", (7014, 7052), False, 'import os\n'), ((4747, 4771), 'numpy.random.random', 'np.random.random', (['(3, 1)'], {}), '((3, 1))\n', (4763, 4771), True, 'import numpy as np\n'), ((5762, 5781), 'pyquaternion.Quaternion.random', 'Quaternion.random', ([], {}), '()\n', (5779, 5781), False, 'from pyquaternion import Quaternion\n'), ((5803, 5822), 'pyquaternion.Quaternion.random', 'Quaternion.random', ([], {}), '()\n', (5820, 5822), False, 'from pyquaternion import Quaternion\n'), ((6535, 6572), 'random.choice', 'random.choice', (['(ascii_letters + digits)'], {}), '(ascii_letters + digits)\n', (6548, 6572), False, 'import random\n'), ((7134, 7184), 'os.path.join', 'os.path.join', (['base_dir', '"""model_normalized_pcl.obj"""'], {}), "(base_dir, 'model_normalized_pcl.obj')\n", (7146, 7184), False, 'import os\n'), ((7252, 7302), 'os.path.join', 'os.path.join', (['base_dir', '"""model_normalized_pcl.obj"""'], {}), "(base_dir, 'model_normalized_pcl.obj')\n", (7264, 7302), False, 'import os\n'), ((1308, 1320), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (1317, 1320), True, 'import numpy as np\n'), ((5968, 5992), 'numpy.random.random', 'np.random.random', (['(3, 1)'], {}), '((3, 1))\n', (5984, 5992), True, 'import numpy as np\n')] |
import os
# If server, need to use osmesa for pyopengl/pyrender
if os.cpu_count() > 20:
os.environ['PYOPENGL_PLATFORM'] = 'osmesa'
# https://github.com/marian42/mesh_to_sdf/issues/13
# https://pyrender.readthedocs.io/en/latest/install/index.html?highlight=ssh#getting-pyrender-working-with-osmesa
else:
os.environ['PYOPENGL_PLATFORM'] = 'egl' # default one was pyglet, which hangs sometime for unknown reason: https://github.com/marian42/mesh_to_sdf/issues/19;
import sys
import yaml
import logging
import logging.config
import time
import random
import math
import numpy as np
from numpy import array
import torch
import matplotlib.pyplot as plt
from src import INIT_TYPE, TEST_TYPE, GEN_TYPE
from src.sample_sdf import PointSampler
from src.sdf_net import SDFDecoder
from src.pointnet_encoder import PointNetEncoder
from src.cost_predictor import CostPredictor
from train_grasp import TrainGrasp
from src.dataset_grasp import TrainDataset
from eval_grasp import EvaluateGrasp
from util.misc import *
from util.mesh import *
class Runner:
def __init__(self, yaml_path, result_dir, device):
save__init__args(locals())
self.model_dir = result_dir + 'model/'
self.latent_img_dir = result_dir + 'latent_img/'
# Configure from yaml file
with open(yaml_path+'.yaml', 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
self.config = config
self.voxel_resolution = config['voxel_resolution']
# always be one because of dataset design
self.batch_size = config['batch_size']
# NN params
self.dim_latent = config['dim_latent']
self.encoder_breadth = config['encoder_breadth']
self.decoder_breadth = config['decoder_breadth']
self.predictor_breadth = config['predictor_breadth']
# Set up networks, calculate number of params
self.encoder = PointNetEncoder(dim_latent=self.dim_latent,
breadth=self.encoder_breadth).to(device)
self.decoder = SDFDecoder(dim_latent=self.dim_latent,
breadth=self.decoder_breadth,
device=device).to(device)
self.predictor = CostPredictor(dim_latent=self.dim_latent,
dim_hidden=self.predictor_breadth).to(device)
print('Num of encoder parameters: %d' % sum(p.numel() for p in self.encoder.parameters() if p.requires_grad))
print('Num of decoder parameters: %d' % sum(p.numel() for p in self.decoder.parameters() if p.requires_grad))
print('Num of cost predictor parameters: %d' % sum(p.numel() for p in self.predictor.parameters() if p.requires_grad))
# Use one GPU
self.decoder_accessor = self.decoder
self.predictor_accessor = self.predictor
# Set up optimizer
self.optimizer = torch.optim.AdamW([
{'params': self.encoder.parameters(),
'lr': config['encoder_lr'],
'weight_decay': config['encoder_weight_decay']},
{'params': self.decoder.parameters(),
'lr': config['decoder_lr'],
'weight_decay': config['decoder_weight_decay']},
{'params': self.predictor.parameters(),
'lr': config['predictor_lr'],
'weight_decay': config['predictor_weight_decay']},
])
if config['decayLR_use']:
self.scheduler = torch.optim.lr_scheduler.MultiStepLR(
self.optimizer,
milestones=config['decayLR_milestones'],
gamma=config['decayLR_gamma'])
else:
self.scheduler = None
def create_dataset(self, env_dir_dict, embed_id_dir_dict,
num_sdf_available_per_obj, num_sdf_per_obj,
num_surface_per_obj, **kwargs):
'''
Create dataholder, to be updated once new distribution generated
# num_sdf_available_per_obj: number of sdf points for each object available before downsampled
# num_sdf_per_obj: number of sdf points for each object - target!
# num_surface_per_obj: number of surface points for each object (for pointnet encoder)
'''
self.train_data = TrainDataset(env_dir_dict,
embed_id_dir_dict,
num_sdf_available_per_obj,
num_sdf_per_obj,
num_surface_per_obj,
device='cpu')
self.train_dataloader = torch.utils.data.DataLoader(
self.train_data,
batch_size=self.batch_size,
shuffle=True,
drop_last=True,
pin_memory=True,
num_workers=4)
def embed(self, epoch, norm_loss_ratio, latent_all, label_all, num_sdf_per_obj, clamp_lip):
"""
Resets latent
"""
epoch_loss = 0
epoch_rec_loss = 0
epoch_reg_loss = 0
epoch_lip_loss = 0
num_batch = 0
# Switch NN mode
self.encoder.train()
self.decoder.train()
self.predictor.train()
l2 = torch.nn.MSELoss(reduction='none')
# Save all the predictions for debugging
pred_all = np.empty((0))
# Run batches
for batch_ind, data_batch in enumerate(self.train_dataloader):
# Zero gradient
self.optimizer.zero_grad(set_to_none=True)
###################### Extract data ######################
batch_sdf, batch_surface, batch_obj_id_chosen = data_batch
batch_sdf = batch_sdf.reshape(-1,4).to(self.device)
batch_sdf_values = batch_sdf[:,-1]
batch_sdf_points = batch_sdf[:,:3]
batch_surface = batch_surface.to(self.device)
batch_obj_id_chosen = batch_obj_id_chosen.squeeze(0)
###################### Encode ######################
batch_latent = self.encoder.forward(batch_surface) # batch x latent
###################### Decode ######################
batch_latent_all = batch_latent.repeat_interleave(num_sdf_per_obj, dim=0) # Assign latent to each point of the object
batch_sdf_pred = self.decoder.forward(batch_sdf_points, batch_latent_all) # Decode each latent/point to get sdf predictions
###################### Rec loss ######################
rec_loss = torch.mean((batch_sdf_pred - batch_sdf_values)**2)
###################### Reg loss ######################
batch_reward_pred = self.predictor.forward(batch_latent).flatten()
batch_label = torch.from_numpy(label_all[batch_obj_id_chosen]).float().to(self.device)
reg_loss = torch.mean(l2(batch_reward_pred, batch_label))
###################### Lip loss ######################
if clamp_lip is None:
lip_loss = torch.linalg.norm(self.predictor_accessor.linear_hidden[0].weight, ord=2)+torch.linalg.norm(self.predictor_accessor.linear_out[0].weight, ord=2) # spectral norm
else:
lip_loss = (torch.linalg.norm(self.predictor_accessor.linear_hidden[0].weight, ord=2)+torch.linalg.norm(self.predictor_accessor.linear_out[0].weight, ord=2)-clamp_lip*16)**2 # clamping
# Add reconstruction and regularization losses together
batch_loss = rec_loss+\
self.config['reg_loss_ratio']*reg_loss+\
self.config['lip_loss_ratio']*lip_loss+\
norm_loss_ratio*torch.mean(batch_latent**2)
# Backward pass to get gradients
batch_loss.backward()
# Clip gradient if specified
if self.config['gradientClip_use']:
torch.nn.utils.clip_grad_norm_(self.encoder.parameters(), self.config['gradientClip_thres'])
torch.nn.utils.clip_grad_norm_(self.decoder.parameters(), self.config['gradientClip_thres'])
torch.nn.utils.clip_grad_norm_(self.predictor.parameters(), self.config['gradientClip_thres'])
# Update weights using gradient
self.optimizer.step()
# Store loss
epoch_loss += batch_loss.item()
epoch_rec_loss += rec_loss.item()
epoch_reg_loss += reg_loss.item()
epoch_lip_loss += lip_loss.item()
num_batch += 1
# Update latents for all distributions
latent_all[batch_obj_id_chosen] =batch_latent.detach().cpu().numpy()
pred_all = np.concatenate((pred_all, batch_reward_pred.detach().cpu().numpy()))
# Decay learning rate if specified
if self.scheduler is not None:
self.scheduler.step()
# Get batch average loss
epoch_loss /= num_batch
epoch_rec_loss /= num_batch
epoch_reg_loss /= num_batch
epoch_lip_loss /= num_batch
return epoch_loss, epoch_rec_loss, epoch_reg_loss, epoch_lip_loss, latent_all, pred_all
def get_predictor_lip(self):
return self.predictor_accessor.get_lip()
def encode_batch(self, surface_batch):
"""
Assume the shape as N x num_surface_per_obj x 3
"""
surface_test = torch.from_numpy(surface_batch).float().to(self.device)
latent_test = self.encoder.forward(surface_test) # num_test_obj x latent_dim
return latent_test
def predict(self, latent):
"""
Using the cost predictor
"""
if isinstance(latent, np.ndarray):
latent = torch.from_numpy(latent).float().to(self.device)
with torch.no_grad():
pred = self.predictor.forward(latent).detach().cpu()
return pred.squeeze(1).numpy()
# return torch.where(pred > 0.5, 1., 0.).numpy()
def adversarial(self, latent, eta=1.0, gamma=1.0, steps=10, target_drop=0.0):
"""
Adversarially perturb latent using the cost predictor and evaluated label/cost. Following https://github.com/duchi-lab/certifiable-distributional-robustness/blob/master/attacks_tf.py
Also see https://github.com/ricvolpi/generalize-unseen-domains/blob/master/model.py
Only takes a single datapoint for now; tricky to get batch to work
"""
l2 = torch.nn.MSELoss()
latent = torch.from_numpy(latent).float().to(self.device).requires_grad_().reshape(1,-1)
latent_detach = latent.detach()
# Gradient ascent
max_num_itr = 10
for _ in range(max_num_itr):
# make a copy
eta_env = eta
gamma_env = gamma
latent_adv = latent.clone()
ini_pred_reward = self.predictor.forward(latent_adv)
latent_path_all = np.zeros((steps+1, latent.shape[1]))
latent_path_all[0] = latent_adv.detach().cpu().numpy()
for step in range(steps):
pred_reward = self.predictor.forward(latent_adv) # reward
loss = -pred_reward - gamma_env*l2(latent_adv, latent_detach)
grad = torch.autograd.grad(loss, latent_adv)[0] # returns a tuple of grads
latent_adv += eta_env*grad
# logging.info(f'step {step}, pred {pred_reward.item()}')
latent_path_all[step+1] = latent_adv.detach().cpu().numpy()
if (ini_pred_reward-pred_reward) > target_drop*1.5:
eta *= 0.8 # too much perturbation
gamma *= 2.0
elif (ini_pred_reward-pred_reward) > target_drop:
break # good
else:
eta *= 1.2 # too little perturbation
gamma *= 0.5
return latent_adv.detach().cpu().numpy(), latent_path_all
def generate(self, epoch, gen_dir, base_latent_all, eta, gamma, steps, target_drop=0.1, max_num_attempt=5):
"""
Generate new objects by adversarially perturbing existing latents using the cost predictor
Sometimes some latent cannot generate new object, so we need to re-sample latent adversarially for the same new distribution
"""
num_new = len(base_latent_all)
old_latent_all = base_latent_all
new_latent_all = np.zeros((num_new, self.dim_latent))
# Another attempt if not all objects processed
flags = np.ones((num_new))
height_all = np.zeros((num_new))
keep_concave_part = config['keep_concave_part']
for _ in range(max_num_attempt):
for env_ind in range(num_new):
# Skip if already generated
if flags[env_ind] < 1:
continue
# Generate new
old_latent = base_latent_all[env_ind]
new_latent, latent_path_all = self.adversarial(
latent=old_latent,
eta=eta, gamma=gamma, steps=steps,
target_drop=target_drop)
# Get mesh using decoder, possibly corrupt
old_mesh = self.decoder_accessor.get_mesh(torch.from_numpy(old_latent).float().to(self.device), voxel_resolution=self.voxel_resolution)
new_mesh = self.decoder_accessor.get_mesh(torch.from_numpy(new_latent).float().to(self.device), voxel_resolution=self.voxel_resolution)
if new_mesh is None or old_mesh is None:
print('Cannot generate from latent!')
continue
# Try processing
try:
old_mesh = process_mesh(old_mesh,
scale_down=True,
smooth=False, #!
random_scale=False)
new_mesh = process_mesh(new_mesh,
scale_down=True,
smooth=False, #!
random_scale=False)
# Scale to original height
new_mesh = match_mesh_height(new_mesh, old_mesh)
# Export as decomposed stl and urdf - create new subdir for convex obj - for pybullet
ensure_directory_hard(gen_dir + str(env_ind) + '/')
convex_pieces = save_convex_urdf(new_mesh,
gen_dir,
env_ind,
mass=0.1,
keep_concave_part=keep_concave_part)
except:
print('Cannot process generated!')
continue
if len(convex_pieces) > 20:
print('Too concave!')
continue
#? Use decompsoed parts as stl? avoid peculiarities when sampling sdf and causing reconstruction issue
if keep_concave_part: # Export as (un-decomposed) stl - for sdf
save_mesh = new_mesh
else:
save_mesh = create_mesh_from_pieces(convex_pieces)
save_mesh.export(gen_dir+str(env_ind)+'.stl')
# Add to all sampled dist; mark generated
new_latent_all[env_ind] = new_latent
flags[env_ind] = 0
height_all[env_ind]=(save_mesh.bounds[1]-save_mesh.bounds[0])[2]
# Quit if all objects perturbed
if np.sum(flags) < 1e-3:
break
# Find closer latent
eta /= 2
gamma *= 2
# steps = min(int(steps/2), 1)
logging.info(f'Epoch {epoch} generate, double gamma locally')
return old_latent_all, new_latent_all, flags, height_all
def visualize(self, old_latent_all, new_latent_all, num_random_obj=20):
"""
Sample latent from all existing and visualize objects
"""
num_obj_generated = 0
num_obj_attempt = 0
obj_ind_all = random.sample(range(new_latent_all.shape[0]), k=num_random_obj)
# Use subplots for all objects
fig_obj, _ = plt.subplots(5, 4) # assume 20 rn
while num_obj_generated < num_random_obj:
# Sample more if used up
if num_obj_attempt >= num_random_obj:
obj_ind_all = random.sample(range(new_latent_all.shape[0]), k=num_random_obj)
num_obj_attempt = 0
# Extract sample
old_obj = old_latent_all[obj_ind_all[num_obj_attempt]]
new_obj = new_latent_all[obj_ind_all[num_obj_attempt]]
# Try
num_obj_attempt += 1
# Reconstruct mesh from latent
old_mesh = self.decoder_accessor.get_mesh(torch.from_numpy(old_obj).float().to(self.device), voxel_resolution=self.voxel_resolution)
new_mesh = self.decoder_accessor.get_mesh(torch.from_numpy(new_obj).float().to(self.device), voxel_resolution=self.voxel_resolution)
if old_mesh is None or new_mesh is None:
print('Cannot generate sample!')
continue
# Center, orient, scale
try:
old_mesh = process_mesh(old_mesh,
scale_down=True,
smooth=False,
random_scale=False)
new_mesh = process_mesh(new_mesh,
scale_down=True,
smooth=False,
random_scale=False)
except:
print('Cannot process sampled!')
continue
# Save mesh for inspection - bot not decomposed
if num_obj_generated < 5:
old_mesh.export(self.latent_img_dir+str(epoch)+'_'+str(num_obj_generated)+'_old.stl')
new_mesh.export(self.latent_img_dir+str(epoch)+'_'+str(num_obj_generated)+'_new.stl')
# Predict
old_reward =self.predict(latent=old_obj.reshape(1,-1))[0]
new_reward =self.predict(latent=new_obj.reshape(1,-1))[0]
# Save image of 2D cross section
slice_2D_old, _ = old_mesh.section(plane_origin=old_mesh.centroid,
plane_normal=[0,0,1]).to_planar()
slice_2D_new, _ = new_mesh.section(plane_origin=new_mesh.centroid,
plane_normal=[0,0,1]).to_planar()
ax = fig_obj.axes[num_obj_generated]
ax.set_aspect('equal')
ax.scatter(slice_2D_old.vertices[:,0], slice_2D_old.vertices[:,1],
s=1,color='lightgray')
ax.scatter(slice_2D_new.vertices[:,0], slice_2D_new.vertices[:,1],
s=2,color='gray')
ax.text(x=0., y=0.01, s="{:.2f}".format(old_reward), fontsize=12, color='coral')
ax.text(x=0., y=-0.01, s="{:.2f}".format(new_reward), fontsize=12, color='red')
ax.axis('off')
# Count
num_obj_generated += 1
plt.savefig(self.latent_img_dir+str(epoch)+'_random_obj.png')
plt.close()
def save_model(self, dir):
torch.save(self.encoder.state_dict(), dir+'encoder.pt')
torch.save(self.decoder.state_dict(), dir+'decoder.pt')
torch.save(self.predictor.state_dict(), dir+'predictor.pt')
def load_model(self, dir):
self.encoder.load_state_dict(torch.load(dir+'encoder.pt', map_location=self.device))
self.decoder.load_state_dict(torch.load(dir+'decoder.pt', map_location=self.device))
self.predictor.load_state_dict(torch.load(dir+'predictor.pt', map_location=self.device))
def get_non_test_num_env_list(env_dict, dir_type_all=[INIT_TYPE, GEN_TYPE]):
l = []
for env_id_list, _, dir_type in env_dict.values():
if dir_type in dir_type_all:
l += [len(env_id_list)]
return l
if __name__ == '__main__':
# from IPython import embed; embed()
if os.cpu_count() > 20: # somehow on server, the default fork method does not work with pytorch, but works fine on desktop
import multiprocessing
multiprocessing.set_start_method('forkserver')
# Read config
yaml_file_name = sys.argv[1]
yaml_path = 'configs/'+yaml_file_name
with open(yaml_path+'.yaml', 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
# Fix seeds
seed = config['seed']
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.benchmark = True # may speed up
# Hardware
cuda_idx = config['cuda_idx']
device = 'cuda:'+str(cuda_idx)
# Misc
num_eval_per_env = config['num_eval_per_env']
dim_latent = config['dim_latent']
norm_loss_ratio = config['norm_loss_ratio']
clamp_lip = config['clamp_lip']
# Data
initial_env_dir_list = config['initial_env_dir_list']
num_env_per_initial_dir = config['num_env_per_initial_dir']
test_env_dir_list = config['test_env_dir_list']
num_env_per_test_dir = config['num_env_per_test_dir']
# Generation (from latent)
num_epoch_per_gen = config['num_epoch_per_gen']
num_epoch_before_first_gen = config['num_epoch_before_first_gen']
num_env_per_gen = config['num_env_per_gen']
# Improving policy
num_env_per_retrain = config['num_env_per_retrain']
num_epoch_per_retrain = config['num_epoch_per_retrain']
num_epoch_before_first_retrain = config['num_epoch_before_first_retrain']
mu_list = config['mu_list']
mu = config['mu']
sigma = config['sigma']
retrain_args = config['retrain_args']
eval_args = config['eval_args']
# Adversarial (gradient ascent)
eta = config['eta']
gamma = config['gamma']
ga_steps = config['ga_steps']
target_drop_percentage = config['target_drop_percentage']
target_drop_percentage_rate = config['target_drop_percentage_rate']
# Env params
sdf_args = config['sdf_args']
# Initialize folders
data_parent_dir = config['data_parent_dir']
result_dir = 'result/'+yaml_file_name+'/'
model_dir = result_dir + 'runner_model/'
latent_img_dir = result_dir + 'latent_img/'
data_dir = data_parent_dir+yaml_file_name+'/'
ensure_directory(result_dir)
ensure_directory(model_dir)
ensure_directory(latent_img_dir)
ensure_directory(data_dir)
# Initialize dir dict: key is dir_path, value is a tuple of (1) id list and (2) type (0 for initial, 1 for test, 2 for gen)
env_dir_dict = {}
for env_dir in initial_env_dir_list:
height_all =list(np.load(env_dir+'dim.npy')[:num_env_per_initial_dir,2])
env_dir_dict[env_dir] = ([*range(num_env_per_initial_dir)], height_all, INIT_TYPE)
# Save a copy of configuration
with open(result_dir+'config.yaml', 'w') as f:
yaml.dump(config, f, sort_keys=False)
# Initialize evaluating policy (always cpu)
evaluator = EvaluateGrasp(initial_policy_path=None,
mu_list=mu_list, mu=mu, sigma=sigma, **eval_args)
# Initialize training policy
trainer = TrainGrasp(result_dir=result_dir, device=device,
mu=mu, sigma=sigma, **retrain_args)
# Initialize running env
runner = Runner(yaml_path=yaml_path, result_dir=result_dir, device=device)
# Initialize point sampler
point_sampler = PointSampler(**sdf_args)
# Training details to be recorded
train_loss_list = []
train_rec_loss_list = []
train_reg_loss_list = []
train_lip_loss_list = []
train_success_list = []
test_success_list = []
train_lip_list = []
# Save the latent and (groun-truth) label/reward of all images
latent_all = np.zeros((num_env_per_initial_dir*len(initial_env_dir_list),
dim_latent))
# Add test dir to dict
for env_dir in test_env_dir_list:
height_all = list(np.load(env_dir+'dim.npy')[:num_env_per_test_dir,2])
env_dir_dict[env_dir] = ([*range(num_env_per_test_dir)], height_all, TEST_TYPE)
# Name of saved training details
train_details_path = None
# Initialize counter
num_epoch_since_last_gen = 0
num_epoch_since_last_retrain = 0
num_env_gen = 0
num_dir_gen = 0
num_retrain = 0
# Logging
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': True,
})
logging.basicConfig(filename=result_dir+'log.txt',
level=logging.NOTSET,
format='%(process)d-%(levelname)s-%(asctime)s-%(message)s',
datefmt='%m/%d/%Y %I:%M:%S')
logging.info('start')
# Run
num_epoch = (config['num_retrain']-2)*num_epoch_per_retrain+num_epoch_before_first_retrain # minus 2 to account for retrain at epoch 0
epoch = 0
while epoch <= num_epoch:
# Record time for each epoch
epoch_start_time = time.time()
######################### New #########################
# Generate a new distribution every some epochs
if epoch >= num_epoch_before_first_gen and \
num_epoch_since_last_gen >= num_epoch_per_gen:
# Declare new path
new_gen_dir = data_dir + 'gen_' + str(num_dir_gen) + '/'
ensure_directory(new_gen_dir)
# Adversarially generate and save new envs - Note that not all latent are updated during last embedding since only a set of envs are embedded now
#? Favor sampling old envs with higher reward - prevent too difficult envs generated - not all envs re-evaluated in later stage of training, so less likely to sample them, but should be ok
print('Generating new...')
old_env_weights_all = np.exp(label_all*0) # uniform weight
old_env_weights_all /= np.sum(old_env_weights_all)
adv_env_id_all, _ = weighted_sample_without_replacement([*range(len(label_all))], weights=old_env_weights_all, k=min(num_env_per_gen, len(label_all)))
# Estimate the range of predictions
pred_range = np.max(pred_all)-np.min(pred_all)
target_drop = pred_range*target_drop_percentage
# Save hist
fig = plt.figure()
plt.hist(pred_all, bins=np.linspace(0.0, 1.0, 20))
plt.savefig(latent_img_dir+str(epoch)+'_pred_hist.png')
plt.close(fig)
# Perturb sampled latent adversarially
old_latent, new_latent, flags, height_all = runner.generate(
epoch=epoch,
gen_dir=new_gen_dir,
base_latent_all=latent_all[adv_env_id_all],
eta=eta, gamma=gamma, steps=ga_steps,
target_drop=target_drop)
# Filter ones actually generated
new_env_id_list = np.where(flags<1)[0]
old_latent_generated = old_latent[new_env_id_list]
new_latent_generated = new_latent[new_env_id_list]
# Sample surface points and sdf, 4 mins for each 100 objects
print('Sampling surface points and sdf for newly generated...')
point_sampler.reset_dir(directory=new_gen_dir)
point_sampler.sample_new_surface_point_sdf(obj_id_list=new_env_id_list)
# Evaluate label of new envs - use mu_list
print('Evaluating newly generated...')
mu_batch = array(evaluator.evaluate(obj_dir=new_gen_dir,
obj_id_list=new_env_id_list,
obj_height_list=height_all[new_env_id_list],
num_eval=num_eval_per_env)[1], dtype='float')
label_batch = get_label_from_mu(mu_batch, mu_list)
print('Reward of newly generated: ', np.mean(label_batch))
logging.info(f'Reward of newly generated: {np.mean(label_batch)}')
# Add to latent - keep ungenerated ones - do not add to label here since labels reset after retraining
latent_all = np.concatenate((latent_all, new_latent))
# Add to dir dict - keep un-generated ones in height_all
env_dir_dict[new_gen_dir] = (new_env_id_list, list(height_all), GEN_TYPE)
np.save(new_gen_dir+'height.npy', height_all)
# Visualize newly generated
runner.visualize(old_latent_generated, new_latent_generated, num_random_obj=20)
# Reset epoch count
num_epoch_since_last_gen = 0
num_env_gen += len(new_env_id_list)
num_dir_gen += 1
######################### Retrain #########################
# Retrain using all existing images
if epoch == 0 or (epoch >= num_epoch_before_first_retrain and \
num_epoch_since_last_retrain >= num_epoch_per_retrain):
print(f'Retraining policy {num_retrain}...')
logging.info(f'Retraining policy {num_retrain}...')
# Pick which envs for training
retrain_env_path_available_all = []
retrain_env_height_available_all = []
retrain_env_weight_all = []
gen_dir_count = 0
for env_dir, (env_id_list, height_list, dir_type) in env_dir_dict.items():
if dir_type != TEST_TYPE:
retrain_env_path_available_all += [env_dir+str(id)+'.urdf' for id in env_id_list]
retrain_env_height_available_all += list(array(height_list)[env_id_list])
if dir_type == INIT_TYPE:
retrain_env_weight_all += [1]*len(env_id_list)
elif dir_type == GEN_TYPE:
retrain_env_weight_all += [1]*len(env_id_list)
gen_dir_count += 1
retrain_env_weight_all = array(retrain_env_weight_all)/np.sum(array(retrain_env_weight_all)) # uniform weight for now
retrain_env_path_list, chosen_id_list = weighted_sample_without_replacement(retrain_env_path_available_all, retrain_env_weight_all, k=min(num_env_per_retrain, len(retrain_env_path_available_all)))
retrain_env_height_list = list(array(retrain_env_height_available_all)[chosen_id_list])
# Use more itrs at 1st retrain
retrain_args_copy = dict(retrain_args) # make a copy
retrain_args_copy.pop('num_step_initial', None)
if epoch == 0:
retrain_args_copy['num_step'] = retrain_args['num_step_initial']
new_policy_path = trainer.run(obj_path_all=retrain_env_path_list,
obj_height_all=retrain_env_height_list,
prefix='epoch_'+str(epoch),
**retrain_args_copy)
logging.info(f'Epoch {epoch} retrain, new policy {new_policy_path}')
# Update evaluator
trainer.load_policy(new_policy_path)
evaluator.load_policy(new_policy_path)
######################### Re-evaluate #########################
# Sample envs to be embedded in the next iteration #! use all!
embed_id_dir_dict = {}
for env_dir, (env_id_list, _, dir_type) in env_dir_dict.items():
if dir_type == INIT_TYPE or dir_type == GEN_TYPE:
embed_id_dir_dict[env_dir] = env_id_list
label_all = np.empty((0), dtype='float') # reset
train_success_batch = np.empty((0), dtype='float')
train_success_dirs = []
print('Re-evaluating for all...')
# INIT - eval for train_success and label
for env_dir, (env_id_list, height_list, dir_type) in env_dir_dict.items():
if dir_type == INIT_TYPE:
mu_batch = array(evaluator.evaluate(obj_dir=env_dir,
obj_id_list=env_id_list,
obj_height_list=height_list, # all
num_eval=num_eval_per_env)[1], dtype='float')
label_batch = get_label_from_mu(mu_batch, mu_list)
train_success_batch = np.concatenate((train_success_batch, label_batch))
train_success_dirs += [np.mean(label_batch)]
label_all = np.concatenate((label_all, label_batch))
# GEN - eval for label for chosen ids
for env_dir, (_, height_list, dir_type) in env_dir_dict.items():
if dir_type == GEN_TYPE:
chosen_id_list = embed_id_dir_dict[env_dir]
label_batch = np.zeros(len(height_list))
mu_batch_chosen = array(evaluator.evaluate(obj_dir=env_dir,
obj_id_list=chosen_id_list,
obj_height_list=list(array(height_list)[chosen_id_list]), # chosen ones
num_eval=num_eval_per_env)[1], dtype='float')
label_batch_chosen = get_label_from_mu(mu_batch_chosen,
mu_list)
label_batch[chosen_id_list] = label_batch_chosen
label_all = np.concatenate((label_all, label_batch))
# TEST - eval for test_success
test_success_batch = np.empty((0), dtype='float')
test_success_dirs = []
for env_dir, (env_id_list, height_list, dir_type) in env_dir_dict.items():
if dir_type == TEST_TYPE:
mu_batch = array(evaluator.evaluate(obj_dir=env_dir,
obj_id_list=env_id_list,
obj_height_list=height_list,
num_eval=num_eval_per_env)[1], dtype='float')
label_batch = get_label_from_mu(mu_batch, mu_list)
test_success_batch = np.concatenate((test_success_batch,
label_batch))
test_success_dirs += [np.mean(label_batch)]
train_success_list += [np.mean(train_success_batch)]
logging.info(f'Epoch {epoch} retrain, train reward {train_success_dirs}, avg {train_success_list[-1]:.3f}')
test_success_list += [np.mean(array(test_success_batch))]
logging.info(f'Epoch {epoch} retrain, test reward {test_success_dirs}, avg {test_success_list[-1]:.3f}')
# Reset epoch count
num_epoch_since_last_retrain = 0
num_retrain += 1
######################### Embed #########################
# Reset dataset and dataloader to add the new distribution
if num_epoch_since_last_retrain == 0:
runner.create_dataset(env_dir_dict, embed_id_dir_dict, **sdf_args)
# Embed
epoch_loss, epoch_rec_loss, epoch_reg_loss, epoch_lip_loss, latent_all, pred_all = runner.embed(epoch=epoch,
norm_loss_ratio=norm_loss_ratio,
latent_all=latent_all,
label_all=label_all,
num_sdf_per_obj=sdf_args['num_sdf_per_obj'],
clamp_lip=clamp_lip)
# Get Lipschitz constant of the cost predictor
lip_predictor = runner.get_predictor_lip()
######################## Record ########################
train_loss_list += [epoch_loss]
train_rec_loss_list += [epoch_rec_loss]
train_reg_loss_list += [epoch_reg_loss]
train_lip_loss_list += [epoch_lip_loss]
train_lip_list += [lip_predictor]
# Debug
epoch_duration = time.time() - epoch_start_time
if epoch % config['num_epoch_per_loss_print'] == 0:
print("Epoch {:d}, Loss: {:.8f}, Rec loss: {:.5f}, Reg loss: {:.5f}, Lip loss: {:.3f}; Time: {:.3f}".format(epoch, epoch_loss, epoch_rec_loss, epoch_reg_loss, epoch_lip_loss, epoch_duration))
if epoch % config['num_epoch_per_loss_log'] == 0:
logging.info(f'Epoch {epoch}, Rec loss: {epoch_rec_loss:.6f}, Reg loss: {epoch_reg_loss:.6f}, Lip loss: {epoch_lip_loss:.6f}')
# Save model before advancing epoch count
if (epoch % config['num_epoch_per_save_model'] == 0 or epoch==num_epoch) and epoch >= 0:
runner.save_model(dir=model_dir)
# Remove old
if train_details_path is not None:
os.remove(train_details_path)
train_details_path = result_dir+'train_details_'+str(epoch)
# Save training details
torch.save({
'epoch': epoch,
'optimizer_state_dict': runner.optimizer.state_dict(),
'train_lists': [train_loss_list, train_rec_loss_list, train_reg_loss_list, train_lip_loss_list, train_success_list, test_success_list, latent_all, label_all], # reward_all
"seed_data": (seed, random.getstate(), np.random.get_state(), torch.get_rng_state()),
"num_data": [num_epoch_since_last_gen, num_env_gen, num_dir_gen, env_dir_dict],
}, train_details_path)
# Count
num_epoch_since_last_gen += 1
num_epoch_since_last_retrain += 1
epoch += 1
| [
"numpy.random.get_state",
"torch.optim.lr_scheduler.MultiStepLR",
"yaml.load",
"src.dataset_grasp.TrainDataset",
"torch.from_numpy",
"torch.nn.MSELoss",
"src.pointnet_encoder.PointNetEncoder",
"numpy.array",
"torch.get_rng_state",
"os.cpu_count",
"logging.info",
"multiprocessing.set_start_meth... | [((67, 81), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (79, 81), False, 'import os\n'), ((17202, 17219), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (17213, 17219), False, 'import random\n'), ((17221, 17241), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (17235, 17241), True, 'import numpy as np\n'), ((17243, 17266), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (17260, 17266), False, 'import torch\n'), ((19509, 19602), 'eval_grasp.EvaluateGrasp', 'EvaluateGrasp', ([], {'initial_policy_path': 'None', 'mu_list': 'mu_list', 'mu': 'mu', 'sigma': 'sigma'}), '(initial_policy_path=None, mu_list=mu_list, mu=mu, sigma=sigma,\n **eval_args)\n', (19522, 19602), False, 'from eval_grasp import EvaluateGrasp\n'), ((19648, 19737), 'train_grasp.TrainGrasp', 'TrainGrasp', ([], {'result_dir': 'result_dir', 'device': 'device', 'mu': 'mu', 'sigma': 'sigma'}), '(result_dir=result_dir, device=device, mu=mu, sigma=sigma, **\n retrain_args)\n', (19658, 19737), False, 'from train_grasp import TrainGrasp\n'), ((19890, 19914), 'src.sample_sdf.PointSampler', 'PointSampler', ([], {}), '(**sdf_args)\n', (19902, 19914), False, 'from src.sample_sdf import PointSampler\n'), ((20709, 20784), 'logging.config.dictConfig', 'logging.config.dictConfig', (["{'version': 1, 'disable_existing_loggers': True}"], {}), "({'version': 1, 'disable_existing_loggers': True})\n", (20734, 20784), False, 'import logging\n'), ((20794, 20966), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': "(result_dir + 'log.txt')", 'level': 'logging.NOTSET', 'format': '"""%(process)d-%(levelname)s-%(asctime)s-%(message)s"""', 'datefmt': '"""%m/%d/%Y %I:%M:%S"""'}), "(filename=result_dir + 'log.txt', level=logging.NOTSET,\n format='%(process)d-%(levelname)s-%(asctime)s-%(message)s', datefmt=\n '%m/%d/%Y %I:%M:%S')\n", (20813, 20966), False, 'import logging\n'), ((20972, 20993), 'logging.info', 'logging.info', (['"""start"""'], {}), "('start')\n", (20984, 20993), False, 'import logging\n'), ((3814, 3942), 'src.dataset_grasp.TrainDataset', 'TrainDataset', (['env_dir_dict', 'embed_id_dir_dict', 'num_sdf_available_per_obj', 'num_sdf_per_obj', 'num_surface_per_obj'], {'device': '"""cpu"""'}), "(env_dir_dict, embed_id_dir_dict, num_sdf_available_per_obj,\n num_sdf_per_obj, num_surface_per_obj, device='cpu')\n", (3826, 3942), False, 'from src.dataset_grasp import TrainDataset\n'), ((4016, 4154), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['self.train_data'], {'batch_size': 'self.batch_size', 'shuffle': '(True)', 'drop_last': '(True)', 'pin_memory': '(True)', 'num_workers': '(4)'}), '(self.train_data, batch_size=self.batch_size,\n shuffle=True, drop_last=True, pin_memory=True, num_workers=4)\n', (4043, 4154), False, 'import torch\n'), ((4529, 4563), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (4545, 4563), False, 'import torch\n'), ((4621, 4632), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (4629, 4632), True, 'import numpy as np\n'), ((9006, 9024), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (9022, 9024), False, 'import torch\n'), ((10616, 10652), 'numpy.zeros', 'np.zeros', (['(num_new, self.dim_latent)'], {}), '((num_new, self.dim_latent))\n', (10624, 10652), True, 'import numpy as np\n'), ((10713, 10729), 'numpy.ones', 'np.ones', (['num_new'], {}), '(num_new)\n', (10720, 10729), True, 'import numpy as np\n'), ((10747, 10764), 'numpy.zeros', 'np.zeros', (['num_new'], {}), '(num_new)\n', (10755, 10764), True, 'import numpy as np\n'), ((13533, 13551), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(5)', '(4)'], {}), '(5, 4)\n', (13545, 13551), True, 'import matplotlib.pyplot as plt\n'), ((16002, 16013), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (16011, 16013), True, 'import matplotlib.pyplot as plt\n'), ((16796, 16810), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (16808, 16810), False, 'import os\n'), ((16943, 16989), 'multiprocessing.set_start_method', 'multiprocessing.set_start_method', (['"""forkserver"""'], {}), "('forkserver')\n", (16975, 16989), False, 'import multiprocessing\n'), ((17127, 17163), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (17136, 17163), False, 'import yaml\n'), ((19412, 19449), 'yaml.dump', 'yaml.dump', (['config', 'f'], {'sort_keys': '(False)'}), '(config, f, sort_keys=False)\n', (19421, 19449), False, 'import yaml\n'), ((21229, 21240), 'time.time', 'time.time', ([], {}), '()\n', (21238, 21240), False, 'import time\n'), ((1307, 1343), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (1316, 1343), False, 'import yaml\n'), ((3125, 3254), 'torch.optim.lr_scheduler.MultiStepLR', 'torch.optim.lr_scheduler.MultiStepLR', (['self.optimizer'], {'milestones': "config['decayLR_milestones']", 'gamma': "config['decayLR_gamma']"}), "(self.optimizer, milestones=config[\n 'decayLR_milestones'], gamma=config['decayLR_gamma'])\n", (3161, 3254), False, 'import torch\n'), ((5659, 5711), 'torch.mean', 'torch.mean', (['((batch_sdf_pred - batch_sdf_values) ** 2)'], {}), '((batch_sdf_pred - batch_sdf_values) ** 2)\n', (5669, 5711), False, 'import torch\n'), ((8408, 8423), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8421, 8423), False, 'import torch\n'), ((9386, 9424), 'numpy.zeros', 'np.zeros', (['(steps + 1, latent.shape[1])'], {}), '((steps + 1, latent.shape[1]))\n', (9394, 9424), True, 'import numpy as np\n'), ((13093, 13154), 'logging.info', 'logging.info', (['f"""Epoch {epoch} generate, double gamma locally"""'], {}), "(f'Epoch {epoch} generate, double gamma locally')\n", (13105, 13154), False, 'import logging\n'), ((16283, 16339), 'torch.load', 'torch.load', (["(dir + 'encoder.pt')"], {'map_location': 'self.device'}), "(dir + 'encoder.pt', map_location=self.device)\n", (16293, 16339), False, 'import torch\n'), ((16370, 16426), 'torch.load', 'torch.load', (["(dir + 'decoder.pt')"], {'map_location': 'self.device'}), "(dir + 'decoder.pt', map_location=self.device)\n", (16380, 16426), False, 'import torch\n'), ((16459, 16517), 'torch.load', 'torch.load', (["(dir + 'predictor.pt')"], {'map_location': 'self.device'}), "(dir + 'predictor.pt', map_location=self.device)\n", (16469, 16517), False, 'import torch\n'), ((21961, 21982), 'numpy.exp', 'np.exp', (['(label_all * 0)'], {}), '(label_all * 0)\n', (21967, 21982), True, 'import numpy as np\n'), ((22024, 22051), 'numpy.sum', 'np.sum', (['old_env_weights_all'], {}), '(old_env_weights_all)\n', (22030, 22051), True, 'import numpy as np\n'), ((22372, 22384), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (22382, 22384), True, 'import matplotlib.pyplot as plt\n'), ((22501, 22515), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (22510, 22515), True, 'import matplotlib.pyplot as plt\n'), ((23862, 23902), 'numpy.concatenate', 'np.concatenate', (['(latent_all, new_latent)'], {}), '((latent_all, new_latent))\n', (23876, 23902), True, 'import numpy as np\n'), ((24044, 24091), 'numpy.save', 'np.save', (["(new_gen_dir + 'height.npy')", 'height_all'], {}), "(new_gen_dir + 'height.npy', height_all)\n", (24051, 24091), True, 'import numpy as np\n'), ((24599, 24650), 'logging.info', 'logging.info', (['f"""Retraining policy {num_retrain}..."""'], {}), "(f'Retraining policy {num_retrain}...')\n", (24611, 24650), False, 'import logging\n'), ((26114, 26182), 'logging.info', 'logging.info', (['f"""Epoch {epoch} retrain, new policy {new_policy_path}"""'], {}), "(f'Epoch {epoch} retrain, new policy {new_policy_path}')\n", (26126, 26182), False, 'import logging\n'), ((26632, 26658), 'numpy.empty', 'np.empty', (['(0)'], {'dtype': '"""float"""'}), "(0, dtype='float')\n", (26640, 26658), True, 'import numpy as np\n'), ((26694, 26720), 'numpy.empty', 'np.empty', (['(0)'], {'dtype': '"""float"""'}), "(0, dtype='float')\n", (26702, 26720), True, 'import numpy as np\n'), ((28102, 28128), 'numpy.empty', 'np.empty', (['(0)'], {'dtype': '"""float"""'}), "(0, dtype='float')\n", (28110, 28128), True, 'import numpy as np\n'), ((28706, 28823), 'logging.info', 'logging.info', (['f"""Epoch {epoch} retrain, train reward {train_success_dirs}, avg {train_success_list[-1]:.3f}"""'], {}), "(\n f'Epoch {epoch} retrain, train reward {train_success_dirs}, avg {train_success_list[-1]:.3f}'\n )\n", (28718, 28823), False, 'import logging\n'), ((28879, 28993), 'logging.info', 'logging.info', (['f"""Epoch {epoch} retrain, test reward {test_success_dirs}, avg {test_success_list[-1]:.3f}"""'], {}), "(\n f'Epoch {epoch} retrain, test reward {test_success_dirs}, avg {test_success_list[-1]:.3f}'\n )\n", (28891, 28993), False, 'import logging\n'), ((29976, 29987), 'time.time', 'time.time', ([], {}), '()\n', (29985, 29987), False, 'import time\n'), ((30311, 30447), 'logging.info', 'logging.info', (['f"""Epoch {epoch}, Rec loss: {epoch_rec_loss:.6f}, Reg loss: {epoch_reg_loss:.6f}, Lip loss: {epoch_lip_loss:.6f}"""'], {}), "(\n f'Epoch {epoch}, Rec loss: {epoch_rec_loss:.6f}, Reg loss: {epoch_reg_loss:.6f}, Lip loss: {epoch_lip_loss:.6f}'\n )\n", (30323, 30447), False, 'import logging\n'), ((1785, 1858), 'src.pointnet_encoder.PointNetEncoder', 'PointNetEncoder', ([], {'dim_latent': 'self.dim_latent', 'breadth': 'self.encoder_breadth'}), '(dim_latent=self.dim_latent, breadth=self.encoder_breadth)\n', (1800, 1858), False, 'from src.pointnet_encoder import PointNetEncoder\n'), ((1920, 2008), 'src.sdf_net.SDFDecoder', 'SDFDecoder', ([], {'dim_latent': 'self.dim_latent', 'breadth': 'self.decoder_breadth', 'device': 'device'}), '(dim_latent=self.dim_latent, breadth=self.decoder_breadth, device\n =device)\n', (1930, 2008), False, 'from src.sdf_net import SDFDecoder\n'), ((2055, 2131), 'src.cost_predictor.CostPredictor', 'CostPredictor', ([], {'dim_latent': 'self.dim_latent', 'dim_hidden': 'self.predictor_breadth'}), '(dim_latent=self.dim_latent, dim_hidden=self.predictor_breadth)\n', (2068, 2131), False, 'from src.cost_predictor import CostPredictor\n'), ((12973, 12986), 'numpy.sum', 'np.sum', (['flags'], {}), '(flags)\n', (12979, 12986), True, 'import numpy as np\n'), ((19188, 19216), 'numpy.load', 'np.load', (["(env_dir + 'dim.npy')"], {}), "(env_dir + 'dim.npy')\n", (19195, 19216), True, 'import numpy as np\n'), ((20361, 20389), 'numpy.load', 'np.load', (["(env_dir + 'dim.npy')"], {}), "(env_dir + 'dim.npy')\n", (20368, 20389), True, 'import numpy as np\n'), ((22262, 22278), 'numpy.max', 'np.max', (['pred_all'], {}), '(pred_all)\n', (22268, 22278), True, 'import numpy as np\n'), ((22279, 22295), 'numpy.min', 'np.min', (['pred_all'], {}), '(pred_all)\n', (22285, 22295), True, 'import numpy as np\n'), ((22874, 22893), 'numpy.where', 'np.where', (['(flags < 1)'], {}), '(flags < 1)\n', (22882, 22893), True, 'import numpy as np\n'), ((23647, 23667), 'numpy.mean', 'np.mean', (['label_batch'], {}), '(label_batch)\n', (23654, 23667), True, 'import numpy as np\n'), ((25309, 25338), 'numpy.array', 'array', (['retrain_env_weight_all'], {}), '(retrain_env_weight_all)\n', (25314, 25338), False, 'from numpy import array\n'), ((28673, 28701), 'numpy.mean', 'np.mean', (['train_success_batch'], {}), '(train_success_batch)\n', (28680, 28701), True, 'import numpy as np\n'), ((30669, 30698), 'os.remove', 'os.remove', (['train_details_path'], {}), '(train_details_path)\n', (30678, 30698), False, 'import os\n'), ((6093, 6166), 'torch.linalg.norm', 'torch.linalg.norm', (['self.predictor_accessor.linear_hidden[0].weight'], {'ord': '(2)'}), '(self.predictor_accessor.linear_hidden[0].weight, ord=2)\n', (6110, 6166), False, 'import torch\n'), ((6167, 6237), 'torch.linalg.norm', 'torch.linalg.norm', (['self.predictor_accessor.linear_out[0].weight'], {'ord': '(2)'}), '(self.predictor_accessor.linear_out[0].weight, ord=2)\n', (6184, 6237), False, 'import torch\n'), ((6655, 6684), 'torch.mean', 'torch.mean', (['(batch_latent ** 2)'], {}), '(batch_latent ** 2)\n', (6665, 6684), False, 'import torch\n'), ((9650, 9687), 'torch.autograd.grad', 'torch.autograd.grad', (['loss', 'latent_adv'], {}), '(loss, latent_adv)\n', (9669, 9687), False, 'import torch\n'), ((22412, 22437), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', '(20)'], {}), '(0.0, 1.0, 20)\n', (22423, 22437), True, 'import numpy as np\n'), ((25346, 25375), 'numpy.array', 'array', (['retrain_env_weight_all'], {}), '(retrain_env_weight_all)\n', (25351, 25375), False, 'from numpy import array\n'), ((25636, 25675), 'numpy.array', 'array', (['retrain_env_height_available_all'], {}), '(retrain_env_height_available_all)\n', (25641, 25675), False, 'from numpy import array\n'), ((27213, 27263), 'numpy.concatenate', 'np.concatenate', (['(train_success_batch, label_batch)'], {}), '((train_success_batch, label_batch))\n', (27227, 27263), True, 'import numpy as np\n'), ((27331, 27371), 'numpy.concatenate', 'np.concatenate', (['(label_all, label_batch)'], {}), '((label_all, label_batch))\n', (27345, 27371), True, 'import numpy as np\n'), ((28002, 28042), 'numpy.concatenate', 'np.concatenate', (['(label_all, label_batch)'], {}), '((label_all, label_batch))\n', (28016, 28042), True, 'import numpy as np\n'), ((28531, 28580), 'numpy.concatenate', 'np.concatenate', (['(test_success_batch, label_batch)'], {}), '((test_success_batch, label_batch))\n', (28545, 28580), True, 'import numpy as np\n'), ((28848, 28873), 'numpy.array', 'array', (['test_success_batch'], {}), '(test_success_batch)\n', (28853, 28873), False, 'from numpy import array\n'), ((8077, 8108), 'torch.from_numpy', 'torch.from_numpy', (['surface_batch'], {}), '(surface_batch)\n', (8093, 8108), False, 'import torch\n'), ((23715, 23735), 'numpy.mean', 'np.mean', (['label_batch'], {}), '(label_batch)\n', (23722, 23735), True, 'import numpy as np\n'), ((27292, 27312), 'numpy.mean', 'np.mean', (['label_batch'], {}), '(label_batch)\n', (27299, 27312), True, 'import numpy as np\n'), ((28624, 28644), 'numpy.mean', 'np.mean', (['label_batch'], {}), '(label_batch)\n', (28631, 28644), True, 'import numpy as np\n'), ((31085, 31102), 'random.getstate', 'random.getstate', ([], {}), '()\n', (31100, 31102), False, 'import random\n'), ((31104, 31125), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (31123, 31125), True, 'import numpy as np\n'), ((31127, 31148), 'torch.get_rng_state', 'torch.get_rng_state', ([], {}), '()\n', (31146, 31148), False, 'import torch\n'), ((5858, 5906), 'torch.from_numpy', 'torch.from_numpy', (['label_all[batch_obj_id_chosen]'], {}), '(label_all[batch_obj_id_chosen])\n', (5874, 5906), False, 'import torch\n'), ((6279, 6352), 'torch.linalg.norm', 'torch.linalg.norm', (['self.predictor_accessor.linear_hidden[0].weight'], {'ord': '(2)'}), '(self.predictor_accessor.linear_hidden[0].weight, ord=2)\n', (6296, 6352), False, 'import torch\n'), ((6353, 6423), 'torch.linalg.norm', 'torch.linalg.norm', (['self.predictor_accessor.linear_out[0].weight'], {'ord': '(2)'}), '(self.predictor_accessor.linear_out[0].weight, ord=2)\n', (6370, 6423), False, 'import torch\n'), ((8351, 8375), 'torch.from_numpy', 'torch.from_numpy', (['latent'], {}), '(latent)\n', (8367, 8375), False, 'import torch\n'), ((25059, 25077), 'numpy.array', 'array', (['height_list'], {}), '(height_list)\n', (25064, 25077), False, 'from numpy import array\n'), ((14039, 14064), 'torch.from_numpy', 'torch.from_numpy', (['old_obj'], {}), '(old_obj)\n', (14055, 14064), False, 'import torch\n'), ((14175, 14200), 'torch.from_numpy', 'torch.from_numpy', (['new_obj'], {}), '(new_obj)\n', (14191, 14200), False, 'import torch\n'), ((11275, 11303), 'torch.from_numpy', 'torch.from_numpy', (['old_latent'], {}), '(old_latent)\n', (11291, 11303), False, 'import torch\n'), ((11415, 11443), 'torch.from_numpy', 'torch.from_numpy', (['new_latent'], {}), '(new_latent)\n', (11431, 11443), False, 'import torch\n'), ((9036, 9060), 'torch.from_numpy', 'torch.from_numpy', (['latent'], {}), '(latent)\n', (9052, 9060), False, 'import torch\n'), ((27741, 27759), 'numpy.array', 'array', (['height_list'], {}), '(height_list)\n', (27746, 27759), False, 'from numpy import array\n')] |
import numpy as np
import pybullet as p
import gym
import numpy as np
import roboverse.bullet as bullet
import os
from tqdm import tqdm
import argparse
import time
import roboverse
import datetime
# =========================================================
# Index corresponds to POSITION, ORIENTATION, BUTTTON etc
POSITION = 1
ORIENTATION = 2
ANALOG = 3
BUTTONS = 6
ORIENTATION_ENABLED = True
EPSILON = 0.005
# =========================================================
def collect_one_trajectory(env, num_timesteps):
prev_vr_theta = 0
def get_gripper_input(e):
# Detect change in button, and change trigger state
if e[BUTTONS][33] & p.VR_BUTTON_IS_DOWN:
trigger = -0.8
elif e[BUTTONS][33] & p.VR_BUTTON_WAS_RELEASED:
trigger = 0.8
else:
trigger = 0
return trigger
def accept_traj(info):
return info["grasp_success"] # TODO: just grasping for now; will add info["push_success"] etc
# get VR controller output at one timestamp
def get_vr_output():
nonlocal prev_vr_theta
ee_pos, ee_theta = bullet.get_link_state(
env.robot_id, env.end_effector_index)
events = p.getVREvents()
# detect input from controllers
assert events, "no input from controller!"
e = events[0]
# obtain gripper state from controller trigger
trigger = get_gripper_input(e)
# pass controller position and orientation into the environment
cont_pos = e[POSITION]
cont_orient = bullet.deg_to_quat([180, 0, 0])
if ORIENTATION_ENABLED:
cont_orient = e[ORIENTATION]
cont_orient = bullet.quat_to_deg(list(cont_orient))
action = [cont_pos[0] - ee_pos[0],
cont_pos[1] - ee_pos[1],
cont_pos[2] - ee_pos[2]]
action = np.array(action) * 3.5 # to make grasp success < 20 timesteps
grip = trigger
for _ in range(2):
action = np.append(action, 0)
wrist_theta = cont_orient[2] - prev_vr_theta
action = np.append(action, wrist_theta)
action = np.append(action, grip)
action = np.append(action, 0)
# ===========================================================
# Add noise during actual data collection
noise = 0.1
noise_scalings = [noise] * 3 + [0.1 * noise] * 3 + [noise] * 2
action += np.random.normal(scale=noise_scalings)
# ===========================================================
action = np.clip(action, -1 + EPSILON, 1 - EPSILON)
prev_vr_theta = cont_orient[2]
return action
o = env.reset()
time.sleep(1.5)
images = []
accept = False
traj = dict(
observations=[],
actions=[],
rewards=[],
next_observations=[],
terminals=[],
agent_infos=[],
env_infos=[],
original_object_positions=env.original_object_positions,
)
first_time = True
# Collect a fixed length of trajectory
for i in range(num_timesteps):
action = get_vr_output()
observation = env.get_observation()
traj["observations"].append(observation)
next_state, reward, done, info = env.step(action)
traj["next_observations"].append(next_state)
traj["actions"].append(action)
traj["rewards"].append(reward)
traj["terminals"].append(done)
traj["agent_infos"].append(info)
traj["env_infos"].append(info)
time.sleep(0.03)
if accept_traj(info) and first_time:
print("num_timesteps: ", i)
first_time = False
# ===========================================================
if accept_traj(info):
accept = "y"
# ===========================================================
return accept, images, traj
def timestamp(divider='-', datetime_divider='T'):
now = datetime.datetime.now()
return now.strftime(
'%Y{d}%m{d}%dT%H{d}%M{d}%S'
''.format(d=divider, dtd=datetime_divider))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--num-trajectories", type=int, required=True)
parser.add_argument("-t", "--num-timesteps", type=int, required=True)
parser.add_argument("-e", "--env-name", type=str, required=True)
parser.add_argument("--task-name", type=str, required=True)
args = parser.parse_args()
timestamp = timestamp()
data_save_path = os.path.join(__file__, "../..", 'data', timestamp)
data_save_path = os.path.abspath(data_save_path)
if not os.path.exists(data_save_path):
os.makedirs(data_save_path)
data = []
env = roboverse.make(args.env_name,
gui=True,
control_mode='discrete_gripper')
env.reset()
for j in tqdm(range(args.num_trajectories)):
success, images, traj = collect_one_trajectory(env, args.num_timesteps)
while success != 'y' and success != 'Y':
print("failed for trajectory {}, collect again".format(j))
success, images, traj = collect_one_trajectory(env, args.num_timesteps)
data.append(traj)
if j % 50 == 0:
path = os.path.join(data_save_path, "{}_{}_{}_{}.npy".format(args.env_name, args.task_name, timestamp, j))
np.save(path, data)
path = os.path.join(data_save_path, "{}_{}_{}.npy".format(args.env_name, args.task_name, timestamp))
np.save(path, data)
| [
"numpy.random.normal",
"numpy.clip",
"os.path.exists",
"pybullet.getVREvents",
"argparse.ArgumentParser",
"roboverse.make",
"os.makedirs",
"os.path.join",
"time.sleep",
"numpy.append",
"datetime.datetime.now",
"numpy.array",
"roboverse.bullet.deg_to_quat",
"roboverse.bullet.get_link_state"... | [((2696, 2711), 'time.sleep', 'time.sleep', (['(1.5)'], {}), '(1.5)\n', (2706, 2711), False, 'import time\n'), ((3948, 3971), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3969, 3971), False, 'import datetime\n'), ((4128, 4153), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4151, 4153), False, 'import argparse\n'), ((4519, 4569), 'os.path.join', 'os.path.join', (['__file__', '"""../.."""', '"""data"""', 'timestamp'], {}), "(__file__, '../..', 'data', timestamp)\n", (4531, 4569), False, 'import os\n'), ((4591, 4622), 'os.path.abspath', 'os.path.abspath', (['data_save_path'], {}), '(data_save_path)\n', (4606, 4622), False, 'import os\n'), ((4727, 4799), 'roboverse.make', 'roboverse.make', (['args.env_name'], {'gui': '(True)', 'control_mode': '"""discrete_gripper"""'}), "(args.env_name, gui=True, control_mode='discrete_gripper')\n", (4741, 4799), False, 'import roboverse\n'), ((5512, 5531), 'numpy.save', 'np.save', (['path', 'data'], {}), '(path, data)\n', (5519, 5531), True, 'import numpy as np\n'), ((1117, 1176), 'roboverse.bullet.get_link_state', 'bullet.get_link_state', (['env.robot_id', 'env.end_effector_index'], {}), '(env.robot_id, env.end_effector_index)\n', (1138, 1176), True, 'import roboverse.bullet as bullet\n'), ((1207, 1222), 'pybullet.getVREvents', 'p.getVREvents', ([], {}), '()\n', (1220, 1222), True, 'import pybullet as p\n'), ((1558, 1589), 'roboverse.bullet.deg_to_quat', 'bullet.deg_to_quat', (['[180, 0, 0]'], {}), '([180, 0, 0])\n', (1576, 1589), True, 'import roboverse.bullet as bullet\n'), ((2101, 2131), 'numpy.append', 'np.append', (['action', 'wrist_theta'], {}), '(action, wrist_theta)\n', (2110, 2131), True, 'import numpy as np\n'), ((2149, 2172), 'numpy.append', 'np.append', (['action', 'grip'], {}), '(action, grip)\n', (2158, 2172), True, 'import numpy as np\n'), ((2190, 2210), 'numpy.append', 'np.append', (['action', '(0)'], {}), '(action, 0)\n', (2199, 2210), True, 'import numpy as np\n'), ((2440, 2478), 'numpy.random.normal', 'np.random.normal', ([], {'scale': 'noise_scalings'}), '(scale=noise_scalings)\n', (2456, 2478), True, 'import numpy as np\n'), ((2567, 2609), 'numpy.clip', 'np.clip', (['action', '(-1 + EPSILON)', '(1 - EPSILON)'], {}), '(action, -1 + EPSILON, 1 - EPSILON)\n', (2574, 2609), True, 'import numpy as np\n'), ((3540, 3556), 'time.sleep', 'time.sleep', (['(0.03)'], {}), '(0.03)\n', (3550, 3556), False, 'import time\n'), ((4634, 4664), 'os.path.exists', 'os.path.exists', (['data_save_path'], {}), '(data_save_path)\n', (4648, 4664), False, 'import os\n'), ((4674, 4701), 'os.makedirs', 'os.makedirs', (['data_save_path'], {}), '(data_save_path)\n', (4685, 4701), False, 'import os\n'), ((1874, 1890), 'numpy.array', 'np.array', (['action'], {}), '(action)\n', (1882, 1890), True, 'import numpy as np\n'), ((2009, 2029), 'numpy.append', 'np.append', (['action', '(0)'], {}), '(action, 0)\n', (2018, 2029), True, 'import numpy as np\n'), ((5382, 5401), 'numpy.save', 'np.save', (['path', 'data'], {}), '(path, data)\n', (5389, 5401), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Checking the mixing of trajectories - limits of number of trajectories and
the limits on running the network for long time. Plus, the contribution
of inserting the trajectories to the network.
For each number of trajectories (5, 20, 50, 100, 200, 1000, inf)
For each RNN (with or without trajectories)
Run for 200 epochs
Run a control of the trajectories where you don't input the trajectories'
Save test_accuracy for each run
Save a dataset with the final accuracy from the trajectories
Print fig of with/without trajectories
sys.argv gets:
[1] = how many trajectories
[2] = number of epochs per run
[3] = number of epochs
[4] = resolution factor
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats as st
import sys
import torch
from torch.optim import Adam, SGD
import torch.nn as nn
import tensorflow as tf
import tensorflow.keras as keras
from mnist import MNIST
from utils import *
#DEfine the number of trajectories to use
num_trajectories = int(sys.argv[1])
num_learning_epochs = int(sys.argv[2])
num_trials = int(sys.argv[3])
res = int(sys.argv[4])
#define the place holders to hold the detials of each run
#One dataframe for the RNN eith the coordinates insertes
#columns_train = []
#columns_test = []
#columns_test_no_ccor
#for trial in range(num_trials):
# columns_train.append('trial_{}_train_loss'.format(trial))
# columns_test.append('trial_{}_test_accur'.format(trial))
# columns_test_no_ccor.append('trial_{}_no_coord_test_accur'.format(trial))
train_dataset = pd.DataFrame()
test_dataset = pd.DataFrame()
test_no_coor_dataset = pd.DataFrame()
#The second dataframe is for the RNN without the coordinates
columns = []
for trial in range(num_trials):
columns.append('trial_{}_train_loss'.format(trial))
columns.append('trial_{}_test_accur'.format(trial))
train_dataset_no_coordinates = pd.DataFrame()
test_dataset_no_coordinates = pd.DataFrame()
mnist = MNIST('/home/labs/ahissarlab/orra/datasets/mnist')
images, labels = mnist.load_training()
def train(net, epochs):
lr = 3e-3
#net = CNN().double()
optimizer = Adam(net.parameters(), lr = lr)
loss_func = nn.CrossEntropyLoss()
if torch.cuda.is_available():
net = net.cuda()
#Create a list to hold the q_seq example, the q_seq always holds the last q_seq
#of the dataframe that we created, if it is the same not good.
q_seq_list = []
train_loss = []
no_traject_test_accuracy = []
test_accur = []
for epoch in range(epochs):
train_dataloader, test_dataloader, ts_train, train_labels, q_sequence = \
create_mnist_dataset(images, labels, res = res, add_seed = num_trajectories)
q_seq_list.append(q_sequence)
batch_loss = []
for batch_idx, (data, traject, targets) in enumerate(train_dataloader):
if torch.cuda.is_available():
data = data.to('cuda', non_blocking=True)
targets = targets.to('cuda', non_blocking = True)
traject = traject.to('cuda', non_blocking = True)
#print(batch_idx, data.shape, targets.shape)
if net.__class__.__name__ == 'RNN_Net':
data = data.unsqueeze(2)
optimizer.zero_grad()
output = net(data.double(), traject.double())
loss = loss_func(output, targets)
loss.backward()
optimizer.step()
batch_loss.append(loss.item())
train_loss.append(np.mean(batch_loss))
if epoch%1 == 0:
correct = 0
no_traject_correct = 0
test_accuracy = []
for batch_idx, (test_data, test_traject, test_targets) in enumerate(test_dataloader):
if torch.cuda.is_available():
test_data = test_data.to('cuda', non_blocking=True)
test_targets = test_targets.to('cuda', non_blocking = True)
test_traject = test_traject.to('cuda', non_blocking = True)
#print(batch_idx, data.shape, targets.shape)
if net.__class__.__name__ == 'RNN_Net':
test_data = test_data.unsqueeze(2)
#Run Regular Test##############################################
test_output = net(test_data,test_traject)
test_pred = test_output.data.max(1, keepdim = True)[1]
correct = test_pred.eq(test_targets.data.view_as(test_pred)).sum()
test_accuracy.append(100.*correct.to('cpu')/len(test_targets))
#Run Test without Trajectories ###############################
no_traject_test_output = net(test_data,test_traject*0)
no_traject_test_pred = no_traject_test_output.data.max(1, keepdim = True)[1]
no_traject_correct = no_traject_test_pred.eq(test_targets.data.view_as(no_traject_test_pred)).sum()
no_traject_test_accuracy.append(100.*no_traject_correct.to('cpu')/len(test_targets))
print('Net',net.__class__.__name__,'Epoch : ',epoch+1, '\t', 'loss :', loss.to('cpu').item(), 'accuracy :',np.mean(test_accuracy) )
test_accur.append(np.mean(test_accuracy))
return train_loss, test_accur , no_traject_test_accuracy, q_seq_list
for trial in range(num_trials):
print("RNN + Trajectories")
net = RNN_Net(traject = True).double()
train_loss, test_accur, no_traject_test_accuracy,q_seq_list = \
train(net, epochs = num_learning_epochs)
train_dataset['trial_{}_train_loss'.format(trial)] = train_loss
test_dataset['trial_{}_test_accur'.format(trial)] = test_accur
test_no_coor_dataset['trial_{}_no_coord_test_accur'.format(trial)] = no_traject_test_accuracy
print("Only RNN")
net_no = RNN_Net(traject = False).double()
train_loss, test_accur, _ , _= \
train(net_no, epochs = num_learning_epochs)
train_dataset_no_coordinates['trial_{}_train_loss'.format(trial)] = train_loss
test_dataset_no_coordinates['trial_{}_test_accur'.format(trial)] = test_accur
######################### Plot and Save ######################################
#Plot
train_dataset['mean'] = train_dataset.mean(numeric_only = True, axis = 1)
train_dataset['confidance-'] = st.t.interval(alpha = 0.95, df = len(train_dataset) - 1, loc = train_dataset.mean(axis = 1), scale = st.sem(train_dataset, axis = 1))[0]
train_dataset['confidance+'] = st.t.interval(alpha = 0.95, df = len(train_dataset) - 1, loc = train_dataset.mean(axis = 1), scale = st.sem(train_dataset, axis = 1))[1]
plt.figure()
x = np.arange(len(train_dataset))
y = train_dataset['mean']
plt.plot(x, y)
plt.fill_between(x, train_dataset['confidance-'] , train_dataset['confidance+'])
plt.savefig('train_accuracy_{}.png'.format(num_trajectories))
test_dataset['mean'] = test_dataset.mean(numeric_only = True, axis = 1)
test_dataset['confidance-'] = st.t.interval(alpha = 0.95, df = len(test_dataset) - 1, loc = test_dataset.mean(axis = 1), scale = st.sem(test_dataset, axis = 1))[0]
test_dataset['confidance+'] = st.t.interval(alpha = 0.95, df = len(test_dataset) - 1, loc = test_dataset.mean(axis = 1), scale = st.sem(test_dataset, axis = 1))[1]
plt.figure()
x = np.arange(len(test_dataset))
y = test_dataset['mean']
plt.plot(x, y, label = 'with coordinates')
plt.fill_between(x, test_dataset['confidance-'] , test_dataset['confidance+'])
test_no_coor_dataset['mean'] = test_no_coor_dataset.mean(numeric_only = True, axis = 1)
test_no_coor_dataset['confidance-'] = st.t.interval(alpha = 0.95, df = len(test_no_coor_dataset) - 1, loc = test_no_coor_dataset.mean(axis = 1), scale = st.sem(test_no_coor_dataset, axis = 1))[0]
test_no_coor_dataset['confidance+'] = st.t.interval(alpha = 0.95, df = len(test_no_coor_dataset) - 1, loc = test_no_coor_dataset.mean(axis = 1), scale = st.sem(test_no_coor_dataset, axis = 1))[1]
plt.figure()
x = np.arange(len(test_no_coor_dataset))
y = test_no_coor_dataset['mean']
plt.plot(x, y, label = 'without coordinates')
plt.fill_between(x, test_no_coor_dataset['confidance-'] , test_no_coor_dataset['confidance+'])
plt.legend
plt.savefig('test_accuracy_{}.png'.format(num_trajectories))
| [
"numpy.mean",
"mnist.MNIST",
"torch.nn.CrossEntropyLoss",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.fill_between",
"matplotlib.pyplot.figure",
"torch.cuda.is_available",
"scipy.stats.sem",
"pandas.DataFrame"
] | [((1620, 1634), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1632, 1634), True, 'import pandas as pd\n'), ((1650, 1664), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1662, 1664), True, 'import pandas as pd\n'), ((1688, 1702), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1700, 1702), True, 'import pandas as pd\n'), ((1958, 1972), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1970, 1972), True, 'import pandas as pd\n'), ((2003, 2017), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2015, 2017), True, 'import pandas as pd\n'), ((2029, 2079), 'mnist.MNIST', 'MNIST', (['"""/home/labs/ahissarlab/orra/datasets/mnist"""'], {}), "('/home/labs/ahissarlab/orra/datasets/mnist')\n", (2034, 2079), False, 'from mnist import MNIST\n'), ((6670, 6682), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6680, 6682), True, 'import matplotlib.pyplot as plt\n'), ((6743, 6757), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (6751, 6757), True, 'import matplotlib.pyplot as plt\n'), ((6758, 6837), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['x', "train_dataset['confidance-']", "train_dataset['confidance+']"], {}), "(x, train_dataset['confidance-'], train_dataset['confidance+'])\n", (6774, 6837), True, 'import matplotlib.pyplot as plt\n'), ((7302, 7314), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7312, 7314), True, 'import matplotlib.pyplot as plt\n'), ((7373, 7413), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'label': '"""with coordinates"""'}), "(x, y, label='with coordinates')\n", (7381, 7413), True, 'import matplotlib.pyplot as plt\n'), ((7416, 7493), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['x', "test_dataset['confidance-']", "test_dataset['confidance+']"], {}), "(x, test_dataset['confidance-'], test_dataset['confidance+'])\n", (7432, 7493), True, 'import matplotlib.pyplot as plt\n'), ((7976, 7988), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7986, 7988), True, 'import matplotlib.pyplot as plt\n'), ((8063, 8106), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'label': '"""without coordinates"""'}), "(x, y, label='without coordinates')\n", (8071, 8106), True, 'import matplotlib.pyplot as plt\n'), ((8109, 8206), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['x', "test_no_coor_dataset['confidance-']", "test_no_coor_dataset['confidance+']"], {}), "(x, test_no_coor_dataset['confidance-'],\n test_no_coor_dataset['confidance+'])\n", (8125, 8206), True, 'import matplotlib.pyplot as plt\n'), ((2248, 2269), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (2267, 2269), True, 'import torch.nn as nn\n'), ((2277, 2302), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2300, 2302), False, 'import torch\n'), ((2935, 2960), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2958, 2960), False, 'import torch\n'), ((3584, 3603), 'numpy.mean', 'np.mean', (['batch_loss'], {}), '(batch_loss)\n', (3591, 3603), True, 'import numpy as np\n'), ((6466, 6495), 'scipy.stats.sem', 'st.sem', (['train_dataset'], {'axis': '(1)'}), '(train_dataset, axis=1)\n', (6472, 6495), True, 'import scipy.stats as st\n'), ((6634, 6663), 'scipy.stats.sem', 'st.sem', (['train_dataset'], {'axis': '(1)'}), '(train_dataset, axis=1)\n', (6640, 6663), True, 'import scipy.stats as st\n'), ((7103, 7131), 'scipy.stats.sem', 'st.sem', (['test_dataset'], {'axis': '(1)'}), '(test_dataset, axis=1)\n', (7109, 7131), True, 'import scipy.stats as st\n'), ((7267, 7295), 'scipy.stats.sem', 'st.sem', (['test_dataset'], {'axis': '(1)'}), '(test_dataset, axis=1)\n', (7273, 7295), True, 'import scipy.stats as st\n'), ((7737, 7773), 'scipy.stats.sem', 'st.sem', (['test_no_coor_dataset'], {'axis': '(1)'}), '(test_no_coor_dataset, axis=1)\n', (7743, 7773), True, 'import scipy.stats as st\n'), ((7933, 7969), 'scipy.stats.sem', 'st.sem', (['test_no_coor_dataset'], {'axis': '(1)'}), '(test_no_coor_dataset, axis=1)\n', (7939, 7969), True, 'import scipy.stats as st\n'), ((3838, 3863), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3861, 3863), False, 'import torch\n'), ((5220, 5242), 'numpy.mean', 'np.mean', (['test_accuracy'], {}), '(test_accuracy)\n', (5227, 5242), True, 'import numpy as np\n'), ((5275, 5297), 'numpy.mean', 'np.mean', (['test_accuracy'], {}), '(test_accuracy)\n', (5282, 5297), True, 'import numpy as np\n')] |
import numpy as np
import random
from collections import namedtuple, deque, defaultdict
import matplotlib.pyplot as plt
from mdp import *
import pdb
import util
import json
import pprint
import logging
import utils_nn as utils
BUFFER_SIZE = int(1e4)
BATCH_SIZE = 1
LR = 1e-3
class ReplayBuffer:
def __init__(self, buffer_size, batch_size, seed):
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.seed = random.seed(seed)
def add(self, state, action, reward, next_state, done):
e = (state, action, reward, next_state, done)
self.memory.append(e)
def sample(self):
experiences = random.sample(self.memory, k=self.batch_size)
return experiences
def __len__(self):
return len(self.memory)
# Performs Q-learning. Read util.RLAlgorithm for more information.
# actions: a function that takes a state and returns a list of actions.
# discount: a number between 0 and 1, which determines the discount factor
# featureExtractor: a function that takes a state and action and returns a list of (feature name, feature value) pairs.
# explorationProb: the epsilon value indicating how frequently the policy
# returns a random action
class QLearningAlgorithm(util.RLAlgorithm):
def __init__(self, actions, discount, featureExtractor, mdp, explorationProb=0.2):
self.actions = actions
self.discount = discount
self.featureExtractor = featureExtractor
self.explorationProb = explorationProb
self.weights = defaultdict(float)
self.numIters = 0
self.mdp = mdp
# Return the Q function associated with the weights and features
def getQ(self, state, action):
score = 0
for f, v in self.featureExtractor(state, action, self.mdp):
score += self.weights[f] * v
return score
# This algorithm will produce an action given a state.
# Here we use the epsilon-greedy algorithm: with probability
# |explorationProb|, take a random action.
def getAction(self, state, eps):
self.numIters += 1
#if random.random() < self.explorationProb:
if random.random() < eps: # align qlearning and dqn exploration strategy
return random.choice(self.actions(state))
else:
return max((self.getQ(state, action), action) for action in self.actions(state))[1]
# Call this function to get the step size to update the weights.
def getStepSize(self):
return LR
return 1e-4 / math.sqrt(self.numIters)
# We will call this function with (s, a, r, s'), which you should use to update |weights|.
# Note that if s is a terminal state, then s' will be None. Remember to check for this.
# You should update the weights using self.getStepSize(); use
# self.getQ() to compute the current estimate of the parameters.
def incorporateFeedback(self, state, action, reward, newState, done=False):
if newState is None or done:
error = self.getQ(state, action) - reward
else:
error = self.getQ(state, action) - (reward + self.discount * max([self.getQ(newState, a) for a in self.actions(newState)]))
loss = error
#print("error={}".format(error))
error = min(10, error)
error = max(-10, error)
error *= self.getStepSize()
for f, v in self.featureExtractor(state, action, self.mdp):
self.weights[f] = self.weights[f] - error * v
return loss
def dumpWeights(self):
pprint.pprint(json.loads(json.dumps(self.weights)), weightsFile)
#print(dict(self.weights))
def actFeatureExtractor(state, action, mdp):
features = []
order = 1 # polynomial approx
dmax = 200
vmax = 30
amax = 2
ttcmax = 100
pos, speed, ttc_info = state[1], state[3], mdp._get_smallest_TTC(state)
ttc, nobj = ttc_info
idx = 4+nobj*4
ttcX, ttcY, ttcVx, ttcVy = state[idx:idx+4]
ttcX, ttcY, ttcVx, ttcVy = ttcX/dmax, ttcY/dmax, ttcVx/vmax, ttcVy/vmax
features.append(('bias', 1))
# NB: trying to play with these features. I had to lower donw the learning rate (cf LR)
#for i in range(1,order+1):
# features.append(('ttcX'+str(i), ttcX**i))
# features.append(('ttcY'+str(i), ttcY**i))
# features.append(('ttcVx'+str(i), ttcVx**i))
# features.append(('ttcVy'+str(i), ttcVy**i))
#features.append(('ttcR', 1 - math.exp(-ttc/100.)))
#features.append(('speedR', 1 - abs((speed-20.)/20.)))
# normalize features, otherwise it does not work at all
ttc = min(ttc,ttcmax)
pos, speed, ttc, action = pos/dmax, speed/vmax, ttc/ttcmax, action/amax
for i in range(1,order+1):
#features.append(('pos'+str(i), pos**i))
features.append(('speed'+str(i), speed**i))
features.append(('ttc'+str(i), ttc**i))
features.append(('action'+str(i), action**i))
return features
def qlearning(mdp, n_epochs=20, max_t=1000, eps_start=1.0, eps_end=0.01, eps_decay=0.995):
rl = QLearningAlgorithm(mdp.actions, mdp.discount(), actFeatureExtractor, mdp, 0.2)
memory = ReplayBuffer(BUFFER_SIZE, batch_size=BATCH_SIZE, seed=0)
best_score = -math.inf
mean_score = -math.inf
avg_tr_loss = 0
eps = eps_start
iters = 0
for num_epoch in range(n_epochs):
random.shuffle(mdp.train_set)
tr_scores_window = deque(maxlen=100) # last 100 scores
for num_s, s in enumerate(mdp.train()):
score = 0
for t in range(max_t):
iters += 1
#a = agent.act(mdp.reduce_state(s), eps) # a is an index !!!
a = rl.getAction(s, eps)
sp, r = mdp.sampleSuccReward(s, a)
done = mdp.isEnd(sp)[0]
memory.add(s, a, r, sp, done)
if len(memory) > BATCH_SIZE:
samples = memory.sample()
for sample in samples:
state, action, reward, next_state, isDone = sample
l = rl.incorporateFeedback(state, action, reward, next_state, isDone)
else:
l = rl.incorporateFeedback(s, a, r, sp, done)
avg_tr_loss += l
score += r
if done:
break
s = sp
if iters%100 == 99:
logging.info("Epoch no {}: sample {} iter {} avg_tr_loss: {:0.4f} tr_mean_score: {:.2f}".format(num_epoch, num_s, iters, avg_tr_loss/100, mean_score))
avg_tr_loss = 0
tr_scores_window.append(score)
mean_score = np.mean(tr_scores_window)
eps = max(eps_end, eps_decay*eps)
dev_scores_window = deque(maxlen=100) # last 100 scores
for num_s, s in enumerate(mdp.dev()):
score = 0
for t in range(max_t):
#a = agent.act(mdp.reduce_state(s), eps=0.) # a is an index !!!
a = rl.getAction(s, eps)
sp, r = mdp.sampleSuccReward(s, a)
done = mdp.isEnd(sp)[0]
score += r
if done:
break
s = sp
dev_scores_window.append(score)
dev_mean_score = np.mean(dev_scores_window)
logging.info("Epoch no {}: dev_mean_score: {:.2f}".format(num_epoch, dev_mean_score))
if dev_mean_score > best_score:
weightsFile.write("Epoch {} dev_mean_score: {:.2f}\n".format(num_epoch, dev_mean_score))
rl.dumpWeights()
best_score = dev_mean_score
# scores_window = deque(maxlen=100) # last 100 scores
# eps = eps_start
# for i_episode in range(1, n_episodes+1):
# s = mdp.startState()
# score = 0
# for t in range(max_t):
# #a = agent.act(s, eps)
# a = rl.getAction(s, eps)
# #pdb.set_trace()
# sp, r = mdp.sampleSuccReward(s, a)
# done = mdp.isEnd(sp)[0]
# #agent.step(s, a, r, sp, done)
# memory.add(s, a, r, sp, done)
# if len(memory) > BATCH_SIZE:
# samples = memory.sample()
# for sample in samples:
# state, action, reward, next_state, isDone = sample
# rl.incorporateFeedback(state, action, reward, next_state, isDone)
# else:
# rl.incorporateFeedback(s, a, r, sp, done)
# score += r
# if done:
# break
# s = sp
# scores_window.append(score)
# eps = max(eps_end, eps_decay*eps)
# avg_sliding_score = np.mean(scores_window)
# print("Episode {} Average sliding score: {:.2f}".format(i_episode, avg_sliding_score))
# if avg_sliding_score > -10:
# weightsFile.write("Episode {} Average sliding score: {:.2f}\n".format(i_episode, avg_sliding_score))
# rl.dumpWeights()
utils.set_logger('qlearning.log')
weightsFile = open("models/qlearning.weights", "a")
mdp = ActMDP()
qlearning(mdp)
| [
"numpy.mean",
"random.sample",
"collections.deque",
"random.shuffle",
"json.dumps",
"random.seed",
"collections.defaultdict",
"utils_nn.set_logger",
"random.random"
] | [((7767, 7800), 'utils_nn.set_logger', 'utils.set_logger', (['"""qlearning.log"""'], {}), "('qlearning.log')\n", (7783, 7800), True, 'import utils_nn as utils\n'), ((366, 391), 'collections.deque', 'deque', ([], {'maxlen': 'buffer_size'}), '(maxlen=buffer_size)\n', (371, 391), False, 'from collections import namedtuple, deque, defaultdict\n'), ((437, 454), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (448, 454), False, 'import random\n'), ((621, 666), 'random.sample', 'random.sample', (['self.memory'], {'k': 'self.batch_size'}), '(self.memory, k=self.batch_size)\n', (634, 666), False, 'import random\n'), ((1451, 1469), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (1462, 1469), False, 'from collections import namedtuple, deque, defaultdict\n'), ((4901, 4930), 'random.shuffle', 'random.shuffle', (['mdp.train_set'], {}), '(mdp.train_set)\n', (4915, 4930), False, 'import random\n'), ((4952, 4969), 'collections.deque', 'deque', ([], {'maxlen': '(100)'}), '(maxlen=100)\n', (4957, 4969), False, 'from collections import namedtuple, deque, defaultdict\n'), ((5975, 5992), 'collections.deque', 'deque', ([], {'maxlen': '(100)'}), '(maxlen=100)\n', (5980, 5992), False, 'from collections import namedtuple, deque, defaultdict\n'), ((6358, 6384), 'numpy.mean', 'np.mean', (['dev_scores_window'], {}), '(dev_scores_window)\n', (6365, 6384), True, 'import numpy as np\n'), ((1996, 2011), 'random.random', 'random.random', ([], {}), '()\n', (2009, 2011), False, 'import random\n'), ((5889, 5914), 'numpy.mean', 'np.mean', (['tr_scores_window'], {}), '(tr_scores_window)\n', (5896, 5914), True, 'import numpy as np\n'), ((3258, 3282), 'json.dumps', 'json.dumps', (['self.weights'], {}), '(self.weights)\n', (3268, 3282), False, 'import json\n')] |
import numpy as np
from aleph.consts import *
from reamber.algorithms.generate.sv.generators.svOsuMeasureLineMD import svOsuMeasureLineMD, SvOsuMeasureLineEvent
from reamber.osu.OsuMap import OsuMap
LINES = 20
amps, curves = np.random.rand(LINES), np.random.rand(LINES) + 3
def f319(m: OsuMap):
events = [*[SvOsuMeasureLineEvent(
firstOffset=125993, lastOffset=128093,
startX=-10, endX=0,
startY=-1, endY=1,
funcs=[
lambda x, a=a, c=c: a * (-1 / (x + 3 * c) + 1 / c + 1) * (-1 / (x + 11) + 1),
lambda x, a=a, c=c: -a * (-1 / (x + 3 * c) + 1 / c + 1) * (-1 / (x + 11) + 1)
]) for a, c in zip(amps, curves)],
*[SvOsuMeasureLineEvent(
firstOffset=128093, lastOffset=128243,
startX=0, endX=2,
startY=-1, endY=1,
funcs=[
lambda x, a=a, c=c: a / np.power(x + 1, 4),
lambda x, a=a, c=c: -a / np.power(x + 1, 4)
]) for a, c in zip(amps, curves)],
SvOsuMeasureLineEvent(
firstOffset=128243, lastOffset=128393,
startX=0, endX=1,
startY=0, endY=1,
funcs=[
lambda x: 0.5
]),
]
svs, bpms = svOsuMeasureLineMD(events,
scalingFactor=SCALE,
firstOffset=125993,
lastOffset=128393,
paddingSize=PADDING,
endBpm=250)
m.svs.extend(svs)
m.bpms.extend(bpms)
| [
"reamber.algorithms.generate.sv.generators.svOsuMeasureLineMD.SvOsuMeasureLineEvent",
"numpy.power",
"numpy.random.rand",
"reamber.algorithms.generate.sv.generators.svOsuMeasureLineMD.svOsuMeasureLineMD"
] | [((228, 249), 'numpy.random.rand', 'np.random.rand', (['LINES'], {}), '(LINES)\n', (242, 249), True, 'import numpy as np\n'), ((1349, 1472), 'reamber.algorithms.generate.sv.generators.svOsuMeasureLineMD.svOsuMeasureLineMD', 'svOsuMeasureLineMD', (['events'], {'scalingFactor': 'SCALE', 'firstOffset': '(125993)', 'lastOffset': '(128393)', 'paddingSize': 'PADDING', 'endBpm': '(250)'}), '(events, scalingFactor=SCALE, firstOffset=125993,\n lastOffset=128393, paddingSize=PADDING, endBpm=250)\n', (1367, 1472), False, 'from reamber.algorithms.generate.sv.generators.svOsuMeasureLineMD import svOsuMeasureLineMD, SvOsuMeasureLineEvent\n'), ((251, 272), 'numpy.random.rand', 'np.random.rand', (['LINES'], {}), '(LINES)\n', (265, 272), True, 'import numpy as np\n'), ((1114, 1238), 'reamber.algorithms.generate.sv.generators.svOsuMeasureLineMD.SvOsuMeasureLineEvent', 'SvOsuMeasureLineEvent', ([], {'firstOffset': '(128243)', 'lastOffset': '(128393)', 'startX': '(0)', 'endX': '(1)', 'startY': '(0)', 'endY': '(1)', 'funcs': '[lambda x: 0.5]'}), '(firstOffset=128243, lastOffset=128393, startX=0, endX\n =1, startY=0, endY=1, funcs=[lambda x: 0.5])\n', (1135, 1238), False, 'from reamber.algorithms.generate.sv.generators.svOsuMeasureLineMD import svOsuMeasureLineMD, SvOsuMeasureLineEvent\n'), ((316, 593), 'reamber.algorithms.generate.sv.generators.svOsuMeasureLineMD.SvOsuMeasureLineEvent', 'SvOsuMeasureLineEvent', ([], {'firstOffset': '(125993)', 'lastOffset': '(128093)', 'startX': '(-10)', 'endX': '(0)', 'startY': '(-1)', 'endY': '(1)', 'funcs': '[lambda x, a=a, c=c: a * (-1 / (x + 3 * c) + 1 / c + 1) * (-1 / (x + 11) + \n 1), lambda x, a=a, c=c: -a * (-1 / (x + 3 * c) + 1 / c + 1) * (-1 / (x +\n 11) + 1)]'}), '(firstOffset=125993, lastOffset=128093, startX=-10,\n endX=0, startY=-1, endY=1, funcs=[lambda x, a=a, c=c: a * (-1 / (x + 3 *\n c) + 1 / c + 1) * (-1 / (x + 11) + 1), lambda x, a=a, c=c: -a * (-1 / (\n x + 3 * c) + 1 / c + 1) * (-1 / (x + 11) + 1)])\n', (337, 593), False, 'from reamber.algorithms.generate.sv.generators.svOsuMeasureLineMD import svOsuMeasureLineMD, SvOsuMeasureLineEvent\n'), ((965, 983), 'numpy.power', 'np.power', (['(x + 1)', '(4)'], {}), '(x + 1, 4)\n', (973, 983), True, 'import numpy as np\n'), ((1032, 1050), 'numpy.power', 'np.power', (['(x + 1)', '(4)'], {}), '(x + 1, 4)\n', (1040, 1050), True, 'import numpy as np\n')] |
import os
import six
import numpy as np
import pandas as pd
from math import pi
from copy import copy
from abc import ABCMeta
from functools import lru_cache
from collections import defaultdict
from scipy.spatial.qhull import ConvexHull
from amlearn.featurize.base import BaseFeaturize
from amlearn.featurize.nearest_neighbor import VoroNN, DistanceNN, BaseNN
from amlearn.utils.verbose import VerboseReporter
from amlearn.utils.data import read_imd, read_lammps_dump, \
get_isometric_lists, list_like
from amlearn.utils.packing import load_radii, pbc_image_nn_coords, \
solid_angle, triangular_angle, calc_stats, triangle_area, tetra_volume
try:
from amlearn.featurize.src import voronoi_stats, boop
except Exception:
print("import fortran file voronoi_stats/boop error!\n")
module_dir = os.path.dirname(os.path.abspath(__file__))
__author__ = "<NAME>"
__email__ = "<EMAIL>"
class PackingOfSite(object):
def __init__(self, pbc, bds, atom_type, coords, neighbors_type,
neighbors_coords, radii=None, radius_type="miracle_radius"):
self.pbc = pbc
self.bds = bds
self.atom_type = atom_type
self.coords = coords.astype(float)
self.neighbors_type = neighbors_type
self.neighbors_coords = neighbors_coords
self.radii = load_radii() if radii is None else radii
self.radius_type = radius_type
def nn_coords(self):
if not hasattr(self, 'nn_coords_'):
self.nn_coords_ = [pbc_image_nn_coords(self.coords,
neighbor_coords,
self.bds, self.pbc)
for neighbor_coords in self.neighbors_coords]
return self.nn_coords_
def convex_hull(self):
if not hasattr(self, 'convex_hull_'):
self.convex_hull_ = ConvexHull(self.nn_coords())
return self.convex_hull_
def convex_hull_simplices(self):
if not hasattr(self, 'convex_hull_simplices_'):
self.convex_hull_simplices_ = self.convex_hull().simplices
return self.convex_hull_simplices_
def analyze_area_interstice(self):
area_list = list()
area_interstice_list = list()
triplet_array = np.array([[0, 1, 2], [1, 0, 2], [2, 0, 1]])
for facet_indices in self.convex_hull_simplices():
packed_area = 0
facet_coords = np.array(self.nn_coords())[facet_indices]
for facet_idx, triplet in zip(facet_indices, triplet_array):
triangle_angle = triangular_angle(*facet_coords[triplet])
r = self.radii[str(self.neighbors_type[facet_idx])][
self.radius_type]
packed_area += triangle_angle / 2 * pow(r, 2)
area = triangle_area(*facet_coords)
area_list.append(area)
area_interstice = 1 - packed_area/area
area_interstice_list.append(
area_interstice if area_interstice > 0 else 0)
self.area_list_ = area_list
self.area_interstice_list_ = area_interstice_list
def get_solid_angle_lists(self):
if not hasattr(self, 'solid_angle_lists_'):
solid_angle_lists = list()
triplet_array = np.array([[0, 1, 2], [1, 0, 2], [2, 0, 1]])
for facet_indices in self.convex_hull_simplices():
solid_angle_list = list()
facet_coords = np.array(self.nn_coords())[facet_indices]
for triplet in triplet_array:
solid_angle_ = solid_angle(*facet_coords[triplet],
self.coords)
solid_angle_list.append(solid_angle_)
solid_angle_lists.append(solid_angle_list)
self.solid_angle_lists_ = solid_angle_lists
return self.solid_angle_lists_
def analyze_vol_interstice(self):
volume_list = list()
volume_interstice_list = list()
for facet_indices, solid_angle_list in \
zip(self.convex_hull_simplices(), self.get_solid_angle_lists()):
packed_volume = 0
facet_coords = np.array(self.nn_coords())[facet_indices]
# calculate neighbors' packed_volume
for facet_idx, sol_angle in zip(facet_indices, solid_angle_list):
if sol_angle == 0:
continue
r = self.radii[str(self.neighbors_type[facet_idx])][
self.radius_type]
packed_volume += sol_angle / 3 * pow(r, 3)
# add center's packed_volume
center_solid_angle = solid_angle(self.coords, *facet_coords)
center_r = self.radii[str(self.atom_type)][self.radius_type]
packed_volume += center_solid_angle / 3 * pow(center_r, 3)
volume = tetra_volume(self.coords, *facet_coords)
volume_list.append(volume)
volume_interstice = 1 - packed_volume/volume
volume_interstice_list.append(
volume_interstice if volume_interstice > 0 else 0)
self.volume_list_ = volume_list
self.volume_interstice_list_ = volume_interstice_list
def cluster_packed_volume(self):
"""
Calculate the cluster volume that is packed with atoms, including the
volume of center atoms plus the volume cones (from solid angle) of
all the neighbors.
Returns:
packed_volume
"""
types_solid_angle = [0] * len(self.neighbors_type)
for facet_indices, solid_angle_list in \
zip(self.convex_hull_simplices(), self.get_solid_angle_lists()):
for facet_idx, solid_angle_ in zip(facet_indices, solid_angle_list):
types_solid_angle[facet_idx] += solid_angle_
packed_volume = 4/3 * pi * pow(
self.radii[str(self.atom_type)][self.radius_type], 3)
for neighbor_type, type_solid_angle in \
zip(self.neighbors_type, types_solid_angle):
if type_solid_angle == 0:
continue
packed_volume += type_solid_angle * 1/3 * pow(
self.radii[str(int(neighbor_type))][self.radius_type], 3)
return packed_volume
def cluster_packing_efficiency(self):
return self.cluster_packed_volume() / self.convex_hull().volume
def atomic_packing_efficiency(self):
ideal_ratio_ = {3: 0.154701, 4: 0.224745, 5: 0.361654, 6: 0.414214,
7: 0.518145, 8: 0.616517, 9: 0.709914, 10: 0.798907,
11: 0.884003, 12: 0.902113, 13: 0.976006, 14: 1.04733,
15: 1.11632, 16: 1.18318, 17: 1.2481, 18: 1.31123,
19: 1.37271, 20: 1.43267, 21: 1.49119, 22: 1.5484,
23: 1.60436, 24: 1.65915}
nn_type_dict = defaultdict(int)
for neighbor_type in self.neighbors_type:
nn_type_dict[neighbor_type] += 1
r = 0
for t, n in nn_type_dict.items():
r += self.radii[str(t)][self.radius_type] * n
r = r / len(self.neighbors_type)
return self.radii[str(self.atom_type)][self.radius_type] / r - \
ideal_ratio_[len(self.neighbors_type)]
@lru_cache(maxsize=10)
def get_nn_instance(dependent_name, backend, **nn_kwargs):
"""
Get Nearest Neighbor instance, for most SRO depends on the same Nearest
Neighbor instance, we cache the most recently used Nearest Neighbor
instance by lru_cache.
Args:
dependent_name (str): "voro"/"voronoi" or "dist"/"distance".
backend (Backend): Amlearn Backend object, to prepare amlearn needed
paths and define the common amlearn's load/save method.
nn_kwargs: Nearest Neighbor class's keyword arguments.
Returns:
dependent_class (object): Nearest Neighbor instance.
"""
if dependent_name == "voro":
dependent_class = VoroNN(backend=backend, **nn_kwargs)
elif dependent_name == "dist":
dependent_class = DistanceNN(backend=backend, **nn_kwargs)
else:
raise ValueError('dependent name {} is unknown, Possible values '
'are {}'.format(dependent_name,
'[voro, voronoi, '
'dist, distance]'))
return dependent_class
class BaseSRO(six.with_metaclass(ABCMeta, BaseFeaturize)):
"""
Base class of Short Range Order(SRO) Featurizer, most SRO Featurizer
depends on the output of the Nearest Neighbor class, so this base class
implements dependency checking. For most SRO depends on the same Nearest
Neighbor instance, we cache the most recently used Nearest Neighbor
instance by lru_cache.
Args:
save (Boolean): save file or not.
backend (object): Amlearn Backend object, to prepare amlearn needed
paths and define the common amlearn's load/save method.
dependent_class (object or str):
if object, it can be "VoroNN()" or "DistanceNN()";
if str, it can be "voro"/"voronoi" or "dist"/"distance"
nn_kwargs: Nearest Neighbor class's keyword arguments.
"""
def __init__(self, save=True, backend=None, dependent_class=None,
verbose=1, output_path=None, **nn_kwargs):
super(BaseSRO, self).__init__(save=save,
verbose=verbose,
backend=backend,
output_path=output_path)
self.calculated_X = None
if dependent_class is None:
self.dependent_class_ = None
self.dependent_name_ = 'voro'
elif isinstance(dependent_class, BaseNN):
self.dependent_class_ = dependent_class
self.dependent_name_ = dependent_class.__class__.__name__.lower()[:4]
elif isinstance(dependent_class, str):
self.dependent_name_ = dependent_class[:4]
self.dependent_class_ = get_nn_instance(
self.dependent_name_, getattr(self, 'backend', None),
save=self.save, **nn_kwargs)
else:
raise ValueError(
'dependent_class {} is unknown, Possible values are {} or '
'voro/dist object.'.format(dependent_class,
'[voro, voronoi, dist, distance]'))
self.neighbor_num_col = \
'neighbor_num_{}'.format(self.dependent_name_)
self.neighbor_ids_col = \
'neighbor_ids_{}'.format(self.dependent_name_)
self.neighbor_dists_col = \
'neighbor_dists_{}'.format(self.dependent_name_)
self.neighbor_areas_col = \
'neighbor_areas_{}'.format(self.dependent_name_)
self.neighbor_vols_col = \
'neighbor_vols_{}'.format(self.dependent_name_)
self.neighbor_edges_col = \
'neighbor_edges_{}'.format(self.dependent_name_)
def fit(self, X=None):
self.dependent_class_ = self.check_dependency(X)
if self.dependent_class_:
if self.save:
self.backend.context.logger_.info(
"Input X don't have it's dependent columns, "
"now calculate it automatically")
else:
print("Input X don't have it's dependent columns, "
"now calculate it automatically")
self.calculated_X = self.dependent_class_.fit_transform(X)
return self
@property
def category(self):
return 'sro'
class BaseInterstice(six.with_metaclass(ABCMeta, BaseSRO)):
def __init__(self, backend=None, dependent_class="voro", type_col='type',
atomic_number_list=None, neighbor_num_limit=80,
save=True, radii=None, radius_type="miracle_radius",
verbose=1, output_path=None, **nn_kwargs):
super(BaseInterstice, self).__init__(
save=save, backend=backend, dependent_class=dependent_class,
output_path=output_path, **nn_kwargs)
self.type_col = type_col
self.atomic_number_list = atomic_number_list
self.neighbor_num_limit = neighbor_num_limit
self.radii = load_radii() if radii is None else radii
self.radius_type = radius_type
self.verbose = verbose
def fit(self, X=None, lammps_df=None, bds=None, lammps_path=None):
"""
Args:
X (DataFrame): X can be a DataFrame which composed of partial
columns of Nearest Neighbor class's output; or X can be the
input of Nearest Neighbor class, which should contains
['type', 'x', 'y', 'z'...] columns, we will automatic call
Nearest Neighbor class to calculate X's output by self.fit()
method, then feed it as input to this transform() method.
lammps_df (DataFrame): Constructed from the output of lammps, which
common columns is ['type', 'x', 'y', 'z'...] columns.
bds (list like): X, y, z boundaries.
lammps_path (DataFrame): If lammps_df is None, then we automatically
construct the DataFrame from lammps output path.
Returns:
self (object): Interstice or Packing instance.
"""
if lammps_df is None and lammps_path is not None \
and os.path.exists(lammps_path):
self.lammps_df, self.bds = read_lammps_dump(lammps_path)
else:
self.lammps_df = copy(lammps_df)
self.bds = bds
if self.atomic_number_list is not None:
self.lammps_df[self.type_col] = self.lammps_df[self.type_col].apply(
lambda x: self.atomic_number_list[x-1])
self.dependent_class_ = self.check_dependency(X)
if self.dependent_class_:
self.calculated_X = self.dependent_class_.fit_transform(X)
self.calculated_X = self.calculated_X.join(self.lammps_df)
return self
@property
def category(self):
return 'interstice_sro'
class DistanceInterstice(BaseInterstice):
def __init__(self, backend=None, dependent_class="voro", type_col='type',
atomic_number_list=None, neighbor_num_limit=80,
save=True, radii=None, radius_type="miracle_radius",
verbose=1, output_path=None, output_file_prefix=None,
stat_ops='all', **nn_kwargs):
super(DistanceInterstice, self).__init__(
save=save, backend=backend, dependent_class=dependent_class,
type_col=type_col, atomic_number_list=atomic_number_list,
neighbor_num_limit=neighbor_num_limit,
radii=radii, radius_type = radius_type,
verbose = verbose, output_path=output_path, **nn_kwargs)
self.output_file_prefix = output_file_prefix \
if output_file_prefix is not None \
else 'feature_{}_{}_{}_distance'.format(
self.category, self.dependent_name_,
self.radius_type.replace('_radius', '') if '_radius' in self.radius_type else self.radius_type)
self.stat_ops = stat_ops if stat_ops != 'all' \
else ['sum', 'mean', 'std', 'min', 'max']
self.dependent_cols_ = [self.neighbor_num_col, self.neighbor_ids_col,
self.neighbor_dists_col]
def transform(self, X):
"""
Args:
X (DataFrame): X can be a DataFrame which composed of partial
columns of Nearest Neighbor class's output; or X can be the
input of Nearest Neighbor class, which should contains
['type', 'x', 'y', 'z'...] columns, we will automatic call
Nearest Neighbor class to calculate X's output by self.fit()
method, then feed it as input to this transform() method.
Returns:
dist_interstice_df (DataFrame): Distance interstice DataFrame, which
index is same as X's index, columns is
[neighbor_dists_interstice_voro] or
[neighbor_dists_interstice_dist] dependent on dependent_class.
"""
X = X.join(self.lammps_df) \
if self.calculated_X is None else self.calculated_X
# define print verbose
if self.verbose > 0 and self.save:
vr = VerboseReporter(self.backend, total_stage=1,
verbose=self.verbose, max_verbose_mod=10000)
vr.init(total_epoch=len(X), start_epoch=0,
init_msg='Calculating DistanceInterstice features.',
epoch_name='Atoms', stage=1)
feature_lists = list()
for idx, row in X.iterrows():
neighbor_dist_interstice_list = list()
for neighbor_id, neighbor_dist in zip(row[self.neighbor_ids_col],
row[self.neighbor_dists_col]):
if neighbor_id > 0:
neighbor_dist_interstice_list.append(
neighbor_dist / (
self.radii[str(int(X.loc[idx][self.type_col]))][
self.radius_type] +
self.radii[
str(int(X.loc[neighbor_id][self.type_col]))][
self.radius_type]) - 1)
else:
continue
feature_lists.append(calc_stats(neighbor_dist_interstice_list,
self.stat_ops))
if self.verbose > 0 and self.save:
vr.update(idx - 1)
dist_interstice_df = \
pd.DataFrame(feature_lists, columns=self.get_feature_names(),
index=X.index)
if self.save:
self.backend.save_featurizer_as_dataframe(
output_df=dist_interstice_df, name=self.output_file_prefix)
return dist_interstice_df
def get_feature_names(self):
return ['Dist_interstice_{}_{}'.format(
stat, self.dependent_name_) for stat in self.stat_ops]
class VolumeAreaInterstice(BaseInterstice):
def __init__(self, pbc=None, backend=None, dependent_class="voro",
coords_cols=None, type_col='type',
atomic_number_list=None,
neighbor_num_limit=80, save=True,
radii=None, radius_type="miracle_radius",
calc_volume_area='all', verbose=1,
volume_types=None, area_types=None,
output_path=None, output_file_prefix=None,
calc_indices='all', stat_ops='all', **nn_kwargs):
"""
Args:
volume_types (list like): Can be one or several of the arrays
["volume_interstice",
"fractional_volume_interstice_tetrahedra",
"fractional_volume_interstice_tetrahedra_avg",
"fractional_volume_interstice_center_v"];
default is : ["fractional_volume_interstice_tetrahedra"]
area_types (list like): Can be one or several of the arrays
["area_interstice",
"fractional_area_interstice_triangle",
"fractional_area_interstice_triangle_avg",
"fractional_area_interstice_center_slice_a"]
default is : ["fractional_area_interstice_triangle"]
"""
assert dependent_class == "voro" or dependent_class == "voronoi"
super(VolumeAreaInterstice, self).__init__(
save=save, backend=backend, dependent_class=dependent_class,
type_col=type_col, atomic_number_list=atomic_number_list,
neighbor_num_limit=neighbor_num_limit,
radii=radii, radius_type = radius_type,
verbose = verbose, output_path=output_path, **nn_kwargs)
self.pbc = pbc if pbc is not None else [1, 1, 1]
self.calc_volume_area = calc_volume_area
self.coords_cols = coords_cols \
if coords_cols is not None else ['x', 'y', 'z']
self.area_list = list()
self.area_interstice_list = list()
self.volume_list = list()
self.volume_interstice_list = list()
self.calc_indices = calc_indices
self.stat_ops = stat_ops if stat_ops != 'all' \
else ['sum', 'mean', 'std', 'min', 'max']
self.dependent_cols_ = [self.neighbor_num_col, self.neighbor_ids_col]
self.volume_types = \
volume_types if isinstance(volume_types, list_like()) \
else [volume_types] if volume_types is not None \
else ['fractional_volume_interstice_tetrahedra']
self.area_types = \
area_types if isinstance(area_types, list_like()) \
else [area_types] if area_types is not None \
else ['fractional_area_interstice_triangle']
self.output_file_prefix = output_file_prefix \
if output_file_prefix is not None \
else 'feature_{}_{}_{}_volume_area'.format(
self.category, self.dependent_name_,
self.radius_type.replace('_radius', '') if '_radius' in self.radius_type else self.radius_type)
def transform(self, X):
"""
Args:
X (DataFrame): X can be a DataFrame which composed of partial
columns of Nearest Neighbor class's output; or X can be the
input of Nearest Neighbor class, which should contains
['type', 'x', 'y', 'z'...] columns, we will automatic call
Nearest Neighbor class to calculate X's output by self.fit()
method, then feed it as input to this transform() method.
Returns:
volume_area_interstice_df (DataFrame): Volume/Area interstice
DataFrame, which index is same as X's index, see
get_feature_names() method for column names.
"""
X = X.join(self.lammps_df) if self.calculated_X is None \
else self.calculated_X
# define print verbose
if self.verbose > 0 and self.save:
vr = VerboseReporter(self.backend, total_stage=1,
verbose=self.verbose, max_verbose_mod=10000)
vr.init(total_epoch=len(X), start_epoch=0,
init_msg='Calculating VolumeAreaInterstice features.',
epoch_name='Atoms', stage=1)
if self.calc_indices == 'all':
self.calc_indices = list(X.index)
feature_lists = list()
for idx, row in X.iterrows():
if idx not in self.calc_indices:
continue
neighbor_type = list()
neighbor_coords = list()
for neighbor_id in row[self.neighbor_ids_col]:
if neighbor_id > 0:
neighbor_type.append(X.loc[neighbor_id][self.type_col])
neighbor_coords.append(
X.loc[neighbor_id][self.coords_cols].astype(float))
else:
continue
pos_ = PackingOfSite(self.pbc, self.bds, row[self.type_col],
row[self.coords_cols].values.astype(float),
neighbor_type, neighbor_coords,
radii=self.radii, radius_type=self.radius_type)
if len(neighbor_type) < 4:
feature_lists.append([0] * len(self.get_feature_names()))
else:
feature_list = list()
if self.calc_volume_area == 'volume' or \
self.calc_volume_area == 'all':
pos_.analyze_vol_interstice()
volume_interstice_list = pos_.volume_interstice_list_
volume_list = pos_.volume_list_
volume_total = pos_.convex_hull().volume
volume_interstice_original_array = \
np.array(volume_interstice_list)*np.array(volume_list)
center_volume = 4/3 * pi * pow(
pos_.radii[str(pos_.atom_type)][pos_.radius_type], 3)
for volume_type in self.volume_types:
# fractional volume_interstices in relative to the
# tetrahedra volume
if volume_type == \
"fractional_volume_interstice_tetrahedra":
feature_list.extend(
calc_stats(volume_interstice_list,
self.stat_ops))
# original volume_interstices (in the units of volume)
elif volume_type == "volume_interstice":
feature_list.extend(
calc_stats(volume_interstice_original_array,
self.stat_ops))
# fractional volume_interstices in relative to the
# entire volume
elif volume_type == \
"fractional_volume_interstice_tetrahedra_avg":
feature_list.extend(
calc_stats(volume_interstice_original_array /
volume_total * len(volume_list),
self.stat_ops))
# fractional volume_interstices in relative to the
# center atom volume
elif volume_type == \
"fractional_volume_interstice_center_v":
feature_list.extend(
calc_stats(volume_interstice_original_array /
center_volume, self.stat_ops))
if self.calc_volume_area == 'area' or \
self.calc_volume_area == 'all':
pos_.analyze_area_interstice()
area_interstice_list = pos_.area_interstice_list_
area_list = pos_.area_list_
area_total = pos_.convex_hull().area
area_interstice_original_array = \
np.array(area_interstice_list) * np.array(area_list)
center_slice_area = pi * pow(
pos_.radii[str(pos_.atom_type)][pos_.radius_type], 2)
for area_type in self.area_types:
# fractional area_interstices in relative to the
# tetrahedra area
if area_type == "fractional_area_interstice_triangle":
feature_list.extend(
calc_stats(area_interstice_list, self.stat_ops))
# original area_interstices (in the units of area)
if area_type == "area_interstice":
feature_list.extend(
calc_stats(area_interstice_original_array,
self.stat_ops))
# fractional area_interstices in relative to the
# entire area
if area_type == \
"fractional_area_interstice_triangle_avg":
feature_list.extend(
calc_stats(area_interstice_original_array /
area_total * len(area_list),
self.stat_ops))
# fractional area_interstices in relative to the center
# atom volume
if area_type == \
"fractional_area_interstice_center_slice_a":
feature_list.extend(
calc_stats(area_interstice_original_array /
center_slice_area, self.stat_ops))
feature_lists.append(feature_list)
if self.verbose > 0 and self.save:
vr.update(idx - 1)
volume_area_interstice_df = \
pd.DataFrame(feature_lists, index=self.calc_indices,
columns=self.get_feature_names())
if self.save:
self.backend.save_featurizer_as_dataframe(
output_df=volume_area_interstice_df,
name=self.output_file_prefix)
return volume_area_interstice_df
def get_feature_names(self):
feature_names = list()
feature_prefixs = list()
if self.calc_volume_area == 'volume' or self.calc_volume_area == 'all':
volume_type_names = ['Volume_interstice'] \
if len(self.volume_types) == 1 else self.volume_types
feature_prefixs += volume_type_names
if self.calc_volume_area == 'area' or self.calc_volume_area == 'all':
volume_type_names = ['Area_interstice'] \
if len(self.area_types) == 1 else self.area_types
feature_prefixs += volume_type_names
feature_names += ['{}_{}_{}'.format(feature_prefix, stat,
self.dependent_name_)
for feature_prefix in feature_prefixs
for stat in self.stat_ops]
return feature_names
class ClusterPackingEfficiency(BaseInterstice):
"""
<NAME>. et al. Atomic-Scale Mechanisms of the Glass-Forming Ability
in Metallic Glasses. Phys. Rev. Lett. 109, 105502 (2012).
The authors also term this metric as "Atomic Packing Efficiency" in the
original paper. Here we name it as "Cluster Packing Efficiency" to
distinguish this with that proposed in Laws, K. J. et al. Nat. Commun.
6, 8123 (2015).
"""
def __init__(self, pbc=None, backend=None, dependent_class="voro",
coords_cols=None, type_col='type',
atomic_number_list=None,
neighbor_num_limit=80, save=True,
radii=None, radius_type="miracle_radius",
verbose=1, output_path=None, output_file_prefix=None,
**nn_kwargs):
assert dependent_class == "voro" or dependent_class == "voronoi"
super(ClusterPackingEfficiency, self).__init__(
save=save, backend=backend, dependent_class=dependent_class,
type_col=type_col, atomic_number_list=atomic_number_list,
neighbor_num_limit=neighbor_num_limit, radii=radii,
radius_type = radius_type, verbose = verbose,
output_path=output_path, **nn_kwargs)
self.pbc = pbc if pbc is not None else [1, 1, 1]
self.coords_cols = coords_cols \
if coords_cols is not None else ['x', 'y', 'z']
self.dependent_cols_ = [self.neighbor_num_col, self.neighbor_ids_col]
self.output_file_prefix = output_file_prefix \
if output_file_prefix is not None \
else 'feature_{}_{}_{}_cpe'.format(
self.category.replace('interstice_', ''), self.dependent_name_,
self.radius_type.replace('_radius', '') if '_radius' in self.radius_type else self.radius_type)
def transform(self, X):
"""
Args:
X (DataFrame): X can be a DataFrame which composed of partial
columns of Nearest Neighbor class's output; or X can be the
input of Nearest Neighbor class, which should contains
['type', 'x', 'y', 'z'...] columns, we will automatic call
Nearest Neighbor class to calculate X's output by self.fit()
method, then feed it as input to this transform() method.
Returns:
cluster_packing_efficiency_df (DataFrame): Cluster Packing
Efficiency_df DataFrame, which index is same as X's index,
see get_feature_names() method for column names.
"""
X = X.join(self.lammps_df) \
if self.calculated_X is None else self.calculated_X
# define print verbose
if self.verbose > 0 and self.save:
vr = VerboseReporter(self.backend, total_stage=1,
verbose=self.verbose, max_verbose_mod=10000)
vr.init(total_epoch=len(X), start_epoch=0,
init_msg='Calculating Cluster Packing Efficiency features.',
epoch_name='Atoms', stage=1)
feature_lists = list()
for idx, row in X.iterrows():
neighbor_type = list()
neighbor_coords = list()
for neighbor_id in row[self.neighbor_ids_col]:
if neighbor_id > 0:
neighbor_type.append(X.loc[neighbor_id][self.type_col])
neighbor_coords.append(X.loc[neighbor_id][self.coords_cols])
else:
continue
pos_ = PackingOfSite(self.pbc, self.bds,
row[self.type_col], row[self.coords_cols],
neighbor_type, neighbor_coords,
radii=self.radii, radius_type=self.radius_type)
if len(neighbor_type) < 4:
feature_lists.append([0] * len(self.get_feature_names()))
else:
feature_lists.append([pos_.cluster_packing_efficiency()])
if self.verbose > 0 and self.save:
vr.update(idx - 1)
cluster_packing_efficiency_df = pd.DataFrame(
feature_lists, index=X.index, columns=self.get_feature_names())
if self.save:
self.backend.save_featurizer_as_dataframe(
output_df=cluster_packing_efficiency_df,
name=self.output_file_prefix)
return cluster_packing_efficiency_df
def get_feature_names(self):
feature_names = ['Cluster_packing_efficiency_{}_{}'.format(
self.radius_type.replace("_radius", ""), self.dependent_name_)]
return feature_names
class AtomicPackingEfficiency(BaseInterstice):
"""
Laws, <NAME>., <NAME>. & <NAME>. A predictive structural model for
bulk metallic glasses. Nat. Commun. 6, 8123 (2015).
"""
def __init__(self, pbc=None, backend=None, dependent_class="voro",
coords_cols=None, type_col='type',
atomic_number_list=None,
neighbor_num_limit=80, save=True,
radii=None, radius_type="miracle_radius",
verbose=1, output_path=None, output_file_prefix=None,
**nn_kwargs):
assert dependent_class == "voro" or dependent_class == "voronoi"
super(AtomicPackingEfficiency, self).__init__(
save=save, backend=backend, dependent_class=dependent_class,
type_col=type_col, atomic_number_list=atomic_number_list,
neighbor_num_limit=neighbor_num_limit, radii=radii,
radius_type = radius_type, verbose = verbose,
output_path=output_path, **nn_kwargs)
self.pbc = pbc if pbc is not None else [1, 1, 1]
self.coords_cols = coords_cols \
if coords_cols is not None else ['x', 'y', 'z']
self.dependent_cols_ = [self.neighbor_num_col, self.neighbor_ids_col]
self.output_file_prefix = output_file_prefix \
if output_file_prefix is not None \
else 'feature_{}_{}_{}_ape'.format(
self.category.replace('interstice_', ''), self.dependent_name_,
self.radius_type.replace('_radius', '') if '_radius' in self.radius_type else self.radius_type)
def transform(self, X):
"""
Args:
X (DataFrame): X can be a DataFrame which composed of partial
columns of Nearest Neighbor class's output; or X can be the
input of Nearest Neighbor class, which should contains
['type', 'x', 'y', 'z'...] columns, we will automatic call
Nearest Neighbor class to calculate X's output by self.fit()
method, then feed it as input to this transform() method.
Returns:
atomic_packing_efficiency_df (DataFrame): Atomic Packing Efficiency
DataFrame, which index is same as X's index, see
get_feature_names() method for column names.
"""
X = X.join(self.lammps_df) \
if self.calculated_X is None else self.calculated_X
# define print verbose
if self.verbose > 0 and self.save:
vr = VerboseReporter(self.backend, total_stage=1,
verbose=self.verbose, max_verbose_mod=10000)
vr.init(total_epoch=len(X), start_epoch=0,
init_msg='Calculating Atomic Packing Efficiency features.',
epoch_name='Atoms', stage=1)
feature_lists = list()
for idx, row in X.iterrows():
neighbor_type = list()
neighbor_coords = list()
for neighbor_id in row[self.neighbor_ids_col]:
if neighbor_id > 0:
neighbor_type.append(X.loc[neighbor_id][self.type_col])
neighbor_coords.append(X.loc[neighbor_id][self.coords_cols])
else:
continue
pos_ = PackingOfSite(self.pbc, self.bds,
row[self.type_col], row[self.coords_cols],
neighbor_type, neighbor_coords,
radii=self.radii, radius_type=self.radius_type)
if len(neighbor_type) < 4:
feature_lists.append([0] * len(self.get_feature_names()))
else:
feature_lists.append([pos_.atomic_packing_efficiency()])
if self.verbose > 0 and self.save:
vr.update(idx - 1)
atomic_packing_efficiency_df = pd.DataFrame(
feature_lists, index=X.index, columns=self.get_feature_names())
if self.save:
self.backend.save_featurizer_as_dataframe(
output_df=atomic_packing_efficiency_df,
name=self.output_file_prefix)
return atomic_packing_efficiency_df
def get_feature_names(self):
feature_names = ['Atomic_packing_efficiency_{}_{}'.format(
self.radius_type.replace("_radius", ""), self.dependent_name_)]
return feature_names
class CN(BaseSRO):
def __init__(self, backend=None, dependent_class="voro", save=True,
output_path=None, output_file_prefix=None, **nn_kwargs):
super(CN, self).__init__(save=save, backend=backend,
dependent_class=dependent_class,
output_path=output_path, **nn_kwargs)
self.dependent_cols_ = [self.neighbor_num_col]
self.output_file_prefix = output_file_prefix \
if output_file_prefix is not None \
else 'feature_{}_{}_cn'.format(self.category, self.dependent_name_)
def transform(self, X=None):
X = X if self.calculated_X is None else self.calculated_X
cn_df = pd.DataFrame(X[self.dependent_cols_].values,
index=X.index, columns=self.get_feature_names())
if self.save:
self.backend.save_featurizer_as_dataframe(
output_df=cn_df, name=self.output_file_prefix)
return cn_df
def get_feature_names(self):
feature_names = ['CN_{}'.format(self.dependent_name_)]
return feature_names
class VoroIndex(BaseSRO):
def __init__(self, backend=None, dependent_class="voro",
neighbor_num_limit=80, include_beyond_edge_max=True,
save=True, edge_min=3, edge_max=7, output_path=None,
output_file_prefix=None, **nn_kwargs):
assert dependent_class == "voro" or dependent_class == "voronoi" or \
isinstance(dependent_class, VoroNN)
super(VoroIndex, self).__init__(save=save, backend=backend,
dependent_class=dependent_class,
output_path=output_path, **nn_kwargs)
self.edge_min = edge_min
self.edge_max = edge_max
self.neighbor_num_limit = neighbor_num_limit
self.include_beyond_edge_max = include_beyond_edge_max
self.dependent_cols_ = [self.neighbor_num_col, self.neighbor_edges_col]
self.output_file_prefix = output_file_prefix \
if output_file_prefix is not None \
else 'feature_{}_{}_voronoi_index'.format(self.category,
self.dependent_name_)
def transform(self, X=None):
X = X if self.calculated_X is None else self.calculated_X
edge_num = self.edge_max - self.edge_min + 1
edge_lists = get_isometric_lists(X[self.neighbor_edges_col].values,
limit_width=self.neighbor_num_limit)
voro_index_list = np.zeros((len(X), edge_num))
voro_index_list = voronoi_stats.voronoi_index(
voro_index_list, X[self.neighbor_num_col].values, edge_lists,
self.edge_min, self.edge_max, self.include_beyond_edge_max,
n_atoms=len(X), neighbor_num_limit=self.neighbor_num_limit)
voro_index_df = pd.DataFrame(voro_index_list, index=X.index,
columns=self.get_feature_names())
if self.save:
self.backend.save_featurizer_as_dataframe(
output_df=voro_index_df, name=self.output_file_prefix)
return voro_index_df
def get_feature_names(self):
return ['Voronoi_idx_{}_{}'.format(idx, self.dependent_name_)
for idx in range(self.edge_min, self.edge_max + 1)]
class CharacterMotif(BaseSRO):
def __init__(self, backend=None, dependent_class="voro",
neighbor_num_limit=80, include_beyond_edge_max=True,
edge_min=3, target_voro_idx=None, frank_kasper=1,
save=True, output_path=None, output_file_prefix=None,
**nn_kwargs):
assert dependent_class == "voro" or dependent_class == "voronoi"
super(CharacterMotif, self).__init__(save=save,
backend=backend,
dependent_class=dependent_class,
output_path=output_path,
**nn_kwargs)
self.neighbor_num_limit = neighbor_num_limit
self.include_beyond_edge_max = include_beyond_edge_max
if target_voro_idx is None:
self.target_voro_idx = np.array([[0, 0, 12, 0, 0],
[0, 0, 12, 4, 0]],
dtype=np.longdouble)
self.frank_kasper = frank_kasper
self.edge_min = edge_min
self.dependent_cols_ = ['Voronoi_idx_{}_voro'.format(idx)
for idx in range(3, 8)]
self.output_file_prefix = output_file_prefix \
if output_file_prefix is not None \
else 'feature_{}_voro_character_motif'.format(self.category)
def fit(self, X=None):
self.dependent_class_ = self.check_dependency(X)
# This class is only dependent on 'Voronoi_indices_voro' col, so if
# X don't have this col, this method will calculate it automatically.
if self.dependent_class_ is not None:
voro_index = \
VoroIndex(neighbor_num_limit=self.neighbor_num_limit,
include_beyond_edge_max=self.include_beyond_edge_max,
dependent_class=self.dependent_class, save=False,
backend=getattr(self, 'backend', None))
self.calculated_X = voro_index.fit_transform(X)
return self
def transform(self, X=None):
X = X if self.calculated_X is None else self.calculated_X
voro_idx_lists = get_isometric_lists(
X[self.dependent_cols_].values, len(self.target_voro_idx[0]))
motif_one_hot = np.zeros((len(X),
len(self.target_voro_idx) + self.frank_kasper))
motif_one_hot = \
voronoi_stats.character_motif(motif_one_hot, voro_idx_lists,
self.edge_min, self.target_voro_idx,
self.frank_kasper, n_atoms=len(X))
motif_one_hot_array = np.array(motif_one_hot)
is_120_124 = motif_one_hot_array[:, 0] | motif_one_hot_array[:, 1]
motif_one_hot_array = np.append(motif_one_hot_array,
np.array([is_120_124]).T, axis=1)
character_motif_df = pd.DataFrame(motif_one_hot_array, index=X.index,
columns=self.get_feature_names())
if self.save:
self.backend.save_featurizer_as_dataframe(
output_df=character_motif_df, name=self.output_file_prefix)
return character_motif_df
def get_feature_names(self):
feature_names = ['is_<0,0,12,0,0>_voro', 'is_<0,0,12,4,0>_voro'] + \
["_".join(map(str, v)) + "_voro"
for v in self.target_voro_idx[2:]] + \
['is_polytetrahedral_voro', 'is_<0,0,12,0/4,0>_voro']
return feature_names
class IFoldSymmetry(BaseSRO):
def __init__(self, backend=None, dependent_class="voro",
neighbor_num_limit=80, include_beyond_edge_max=True,
edge_min=3, edge_max=7, save=True, output_path=None,
output_file_prefix=None, **nn_kwargs):
assert dependent_class == "voro" or dependent_class == "voronoi"
super(IFoldSymmetry, self).__init__(save=save,
backend=backend,
dependent_class=dependent_class,
output_path=output_path,
**nn_kwargs)
self.neighbor_num_limit = neighbor_num_limit
self.include_beyond_edge_max = include_beyond_edge_max
self.edge_min = edge_min
self.edge_max = edge_max
self.dependent_cols_ = [self.neighbor_num_col, self.neighbor_edges_col]
self.output_file_prefix = output_file_prefix \
if output_file_prefix is not None \
else 'feature_{}_voro_i_fold_symmetry'.format(self.category)
def transform(self, X=None):
X = X if self.calculated_X is None else self.calculated_X
edge_num = self.edge_max - self.edge_min + 1
edge_lists = get_isometric_lists(X[self.neighbor_edges_col].values,
limit_width=self.neighbor_num_limit)
i_symm_list = np.zeros((len(X), edge_num))
i_symm_list = voronoi_stats.i_fold_symmetry(
i_symm_list, X[self.neighbor_num_col].values, edge_lists,
self.edge_min, self.edge_max, self.include_beyond_edge_max,
n_atoms=len(X), neighbor_num_limit=self.neighbor_num_limit)
i_symm_df = pd.DataFrame(i_symm_list, index=X.index,
columns=self.get_feature_names())
if self.save:
self.backend.save_featurizer_as_dataframe(
output_df=i_symm_df, name=self.output_file_prefix)
return i_symm_df
def get_feature_names(self):
feature_names = ['{}_fold_symm_idx_voro'.format(edge)
for edge in range(self.edge_min, self.edge_max+1)]
return feature_names
class AreaWtIFoldSymmetry(BaseSRO):
def __init__(self, backend=None, dependent_class="voro",
neighbor_num_limit=80, include_beyond_edge_max=True,
edge_min=3, edge_max=7, save=True, output_path=None,
output_file_prefix=None, **nn_kwargs):
assert dependent_class == "voro" or dependent_class == "voronoi"
super(AreaWtIFoldSymmetry, self).__init__(
save=save, backend=backend, dependent_class=dependent_class,
output_path=output_path, **nn_kwargs)
self.neighbor_num_limit = neighbor_num_limit
self.include_beyond_edge_max = include_beyond_edge_max
self.edge_min = edge_min
self.edge_max = edge_max
self.dependent_cols_ = [self.neighbor_num_col,
self.neighbor_edges_col,
self.neighbor_areas_col]
self.output_file_prefix = output_file_prefix \
if output_file_prefix is not None \
else 'feature_{}_{}_area_wt_i_fold_symmetry'.format(self.category,
self.dependent_name_)
def transform(self, X=None):
X = X if self.calculated_X is None else self.calculated_X
edge_lists = get_isometric_lists(X[self.neighbor_edges_col].values,
limit_width=self.neighbor_num_limit)
area_lists = get_isometric_lists(
X[self.neighbor_areas_col].values,
limit_width=self.neighbor_num_limit).astype(np.longdouble)
edge_num = self.edge_max - self.edge_min + 1
area_wt_i_symm_list = np.zeros((len(X), edge_num))
area_wt_i_symm_list = voronoi_stats.area_wt_i_fold_symmetry(
area_wt_i_symm_list, X[self.neighbor_num_col].values,
edge_lists, area_lists, self.edge_min, self.edge_max,
self.include_beyond_edge_max, n_atoms=len(X),
neighbor_num_limit=self.neighbor_num_limit)
area_wt_i_symm_df = \
pd.DataFrame(area_wt_i_symm_list, index=X.index,
columns=self.get_feature_names())
if self.save:
self.backend.save_featurizer_as_dataframe(
output_df=area_wt_i_symm_df, name=self.output_file_prefix)
return area_wt_i_symm_df
def get_feature_names(self):
feature_names = ['Area_wt_{}_fold_symm_idx_voro'.format(edge)
for edge in range(self.edge_min, self.edge_max + 1)]
return feature_names
class VolWtIFoldSymmetry(BaseSRO):
def __init__(self, backend=None, dependent_class="voro",
neighbor_num_limit=80, include_beyond_edge_max=True,
edge_min=3, edge_max=7, save=True, output_path=None,
output_file_prefix=None, **nn_kwargs):
assert dependent_class == "voro" or dependent_class == "voronoi"
super(VolWtIFoldSymmetry, self).__init__(
save=save, backend=backend, dependent_class=dependent_class,
output_path=output_path, **nn_kwargs)
self.neighbor_num_limit = neighbor_num_limit
self.include_beyond_edge_max = include_beyond_edge_max
self.edge_min = edge_min
self.edge_max = edge_max
self.dependent_cols_ = [self.neighbor_num_col,
self.neighbor_edges_col,
self.neighbor_vols_col]
self.output_file_prefix = output_file_prefix \
if output_file_prefix is not None \
else 'feature_{}_{}_vol_wt_i_fold_symmetry'.format(
self.category, self.dependent_name_)
def transform(self, X=None):
X = X if self.calculated_X is None else self.calculated_X
edge_lists = get_isometric_lists(X[self.neighbor_edges_col].values,
limit_width=self.neighbor_num_limit)
vol_lists = get_isometric_lists(
X[self.neighbor_vols_col].values,
limit_width=self.neighbor_num_limit).astype(np.longdouble)
edge_num = self.edge_max - self.edge_min + 1
vol_wt_i_symm_list = np.zeros((len(X), edge_num))
vol_wt_i_symm_list = \
voronoi_stats.vol_wt_i_fold_symmetry(
vol_wt_i_symm_list, X[self.neighbor_num_col].values, edge_lists,
vol_lists, self.edge_min, self.edge_max,
self.include_beyond_edge_max, n_atoms=len(X),
neighbor_num_limit=self.neighbor_num_limit)
vol_wt_i_symm_df = pd.DataFrame(vol_wt_i_symm_list, index=X.index,
columns=self.get_feature_names())
if self.save:
self.backend.save_featurizer_as_dataframe(
output_df=vol_wt_i_symm_df, name=self.output_file_prefix)
return vol_wt_i_symm_df
def get_feature_names(self):
feature_names = ['Vol_wt_{}_fold_symm_idx_voro'.format(edge)
for edge in range(self.edge_min, self.edge_max + 1)]
return feature_names
class VoroAreaStats(BaseSRO):
def __init__(self, backend=None, dependent_class="voro",
neighbor_num_limit=80, save=True, output_path=None,
output_file_prefix=None, **nn_kwargs):
assert dependent_class == "voro" or dependent_class == "voronoi"
super(VoroAreaStats, self).__init__(save=save,
backend=backend,
dependent_class=dependent_class,
output_path=output_path,
**nn_kwargs)
self.neighbor_num_limit = neighbor_num_limit
self.stats = ['mean', 'std', 'min', 'max']
self.dependent_cols_ = [self.neighbor_num_col, self.neighbor_areas_col]
self.output_file_prefix = output_file_prefix \
if output_file_prefix is not None \
else 'feature_{}_{}_area_stats'.format(self.category,
self.dependent_name_)
def transform(self, X=None):
X = X if self.calculated_X is None else self.calculated_X
area_lists = get_isometric_lists(
X[self.neighbor_areas_col].values,
limit_width=self.neighbor_num_limit).astype(np.longdouble)
area_stats = np.zeros((len(X), len(self.stats) + 1))
area_stats = \
voronoi_stats.voronoi_area_stats(area_stats,
X[self.neighbor_num_col].values,
area_lists, n_atoms=len(X),
neighbor_num_limit=
self.neighbor_num_limit)
area_stats_df = pd.DataFrame(area_stats, index=X.index,
columns=self.get_feature_names())
if self.save:
self.backend.save_featurizer_as_dataframe(
output_df=area_stats_df, name=self.output_file_prefix)
return area_stats_df
def get_feature_names(self):
feature_names = ['Facet_area_sum_voro'] + \
['Facet_area_{}_voro'.format(stat)
for stat in self.stats]
return feature_names
class VoroAreaStatsSeparate(BaseSRO):
def __init__(self, backend=None, dependent_class="voro",
neighbor_num_limit=80, include_beyond_edge_max=True,
edge_min=3, edge_max=7, save=True, output_path=None,
output_file_prefix=None, **nn_kwargs):
assert dependent_class == "voro" or dependent_class == "voronoi"
super(VoroAreaStatsSeparate, self).__init__(
save=save, backend=backend, dependent_class=dependent_class,
output_path=output_path, **nn_kwargs)
self.neighbor_num_limit = neighbor_num_limit
self.edge_min = edge_min
self.edge_max = edge_max
self.edge_num = edge_max - edge_min + 1
self.include_beyond_edge_max = include_beyond_edge_max
self.stats = ['sum', 'mean', 'std', 'min', 'max']
self.dependent_cols_ = [self.neighbor_num_col, self.neighbor_edges_col,
self.neighbor_areas_col]
self.output_file_prefix = output_file_prefix \
if output_file_prefix is not None \
else 'feature_{}_{}_area_stats_separate'.format(
self.category, self.dependent_name_)
def transform(self, X=None):
X = X if self.calculated_X is None else self.calculated_X
edge_lists = get_isometric_lists(
X[self.neighbor_edges_col].values,
limit_width=self.neighbor_num_limit)
area_lists = get_isometric_lists(
X[self.neighbor_areas_col].values,
limit_width=self.neighbor_num_limit).astype(np.longdouble)
area_stats_separate = \
np.zeros((len(X), self.edge_num * len(self.stats)))
area_stats_separate = \
voronoi_stats.voronoi_area_stats_separate(
area_stats_separate, X[self.neighbor_num_col].values,
edge_lists, area_lists, self.edge_min, self.edge_max,
self.include_beyond_edge_max, n_atoms=len(X),
neighbor_num_limit=self.neighbor_num_limit)
area_stats_separate_df = pd.DataFrame(area_stats_separate,
index=X.index,
columns=self.get_feature_names())
if self.save:
self.backend.save_featurizer_as_dataframe(
output_df=area_stats_separate_df, name=self.output_file_prefix)
return area_stats_separate_df
def get_feature_names(self):
feature_names = ['{}_edged_area_{}_voro'.format(edge, stat)
for edge in range(self.edge_min, self.edge_max + 1)
for stat in self.stats]
return feature_names
class VoroVolStats(BaseSRO):
def __init__(self, backend=None, dependent_class="voro",
neighbor_num_limit=80, save=True, output_path=None,
output_file_prefix=None, **nn_kwargs):
assert dependent_class == "voro" or dependent_class == "voronoi"
super(VoroVolStats, self).__init__(
save=save, backend=backend, dependent_class=dependent_class,
output_path=output_path, **nn_kwargs)
self.neighbor_num_limit = neighbor_num_limit
self.stats = ['mean', 'std', 'min', 'max']
self.dependent_cols_ = [self.neighbor_num_col, self.neighbor_vols_col]
self.output_file_prefix = output_file_prefix \
if output_file_prefix is not None \
else 'feature_{}_{}_vol_stats'.format(self.category,
self.dependent_name_)
def transform(self, X=None):
X = X if self.calculated_X is None else self.calculated_X
vol_lists = get_isometric_lists(
X[self.neighbor_vols_col].values,
limit_width=self.neighbor_num_limit).astype(np.longdouble)
vol_stats = np.zeros((len(X), len(self.stats) + 1))
vol_stats = \
voronoi_stats.voronoi_vol_stats(vol_stats,
X[self.neighbor_num_col].values,
vol_lists, n_atoms=len(X),
neighbor_num_limit=
self.neighbor_num_limit)
vol_stats_df = pd.DataFrame(vol_stats, index=X.index,
columns=self.get_feature_names())
if self.save:
self.backend.save_featurizer_as_dataframe(
output_df=vol_stats_df, name=self.output_file_prefix)
return vol_stats_df
def get_feature_names(self):
feature_names = ['Subpolyhedra_vol_sum_voro'] + \
['Subpolyhedra_vol_{}_voro'.format(stat)
for stat in self.stats]
return feature_names
class VoroVolStatsSeparate(BaseSRO):
def __init__(self, backend=None, dependent_class="voro",
neighbor_num_limit=80, include_beyond_edge_max=True,
edge_min=3, edge_max=7, save=True, output_path=None,
output_file_prefix=None, **nn_kwargs):
assert dependent_class == "voro" or dependent_class == "voronoi"
super(VoroVolStatsSeparate, self).__init__(
save=save, backend=backend, dependent_class=dependent_class,
output_path=output_path, **nn_kwargs)
self.edge_min = edge_min
self.edge_max = edge_max
self.edge_num = edge_max - edge_min + 1
self.neighbor_num_limit = neighbor_num_limit
self.include_beyond_edge_max = include_beyond_edge_max
self.stats = ['sum', 'mean', 'std', 'min', 'max']
self.dependent_cols_ = [self.neighbor_num_col, self.neighbor_edges_col,
self.neighbor_vols_col]
self.output_file_prefix = output_file_prefix \
if output_file_prefix is not None \
else 'feature_{}_{}_vol_stats_separate'.format(self.category,
self.dependent_name_)
def transform(self, X=None):
X = X if self.calculated_X is None else self.calculated_X
edge_lists = get_isometric_lists(
X[self.neighbor_edges_col].values,
limit_width=self.neighbor_num_limit)
vol_lists = get_isometric_lists(
X[self.neighbor_vols_col].values,
limit_width=self.neighbor_num_limit).astype(np.longdouble)
vol_stats_separate = np.zeros((len(X),
self.edge_num * len(self.stats)))
vol_stats_separate = \
voronoi_stats.voronoi_vol_stats_separate(
vol_stats_separate, X[self.neighbor_num_col].values,
edge_lists, vol_lists, self.edge_min, self.edge_max,
self.include_beyond_edge_max, n_atoms=len(X),
neighbor_num_limit=self.neighbor_num_limit)
vol_stats_separate_df = pd.DataFrame(vol_stats_separate,
index=X.index,
columns=self.get_feature_names())
if self.save:
self.backend.save_featurizer_as_dataframe(
output_df=vol_stats_separate_df, name=self.output_file_prefix)
return vol_stats_separate_df
def get_feature_names(self):
feature_names = ['{}_edged_vol_{}_voro'.format(edge, stat)
for edge in range(self.edge_min, self.edge_max + 1)
for stat in self.stats]
return feature_names
class DistStats(BaseSRO):
def __init__(self, backend=None, dependent_class="voro",
dist_type='distance', neighbor_num_limit=80, save=True,
output_path=None, output_file_prefix=None, **nn_kwargs):
super(DistStats, self).__init__(save=save, backend=backend,
dependent_class=dependent_class,
output_path=output_path,
**nn_kwargs)
self.dist_type = dist_type
self.neighbor_num_limit = neighbor_num_limit
self.stats = ['sum', 'mean', 'std', 'min', 'max']
self.dependent_cols_ = [self.neighbor_num_col, self.neighbor_dists_col]
self.output_file_prefix = output_file_prefix \
if output_file_prefix is not None \
else 'feature_{}_{}_{}_stats'.format(
self.category, self.dependent_name_, self.dist_type)
def transform(self, X=None):
X = X if self.calculated_X is None else self.calculated_X
dist_lists = get_isometric_lists(
X[self.neighbor_dists_col].values,
limit_width=self.neighbor_num_limit)
dist_stats = np.zeros((len(X), len(self.stats)))
dist_stats = \
voronoi_stats.voronoi_distance_stats(
dist_stats, X[self.neighbor_num_col].values, dist_lists,
n_atoms=len(X), neighbor_num_limit=self.neighbor_num_limit)
dist_stats_df = pd.DataFrame(dist_stats, index=X.index,
columns=self.get_feature_names())
if self.save:
self.backend.save_featurizer_as_dataframe(
output_df=dist_stats_df, name=self.output_file_prefix)
return dist_stats_df
def get_feature_names(self):
feature_names = ['{}_{}_{}'.format(self.dist_type, stat,
self.dependent_name_)
for stat in self.stats]
return feature_names
@property
def double_dependency(self):
return False
class BOOP(BaseSRO):
def __init__(self, backend=None, dependent_class="voro", coords_path=None,
atom_coords=None, bds=None, pbc=None, low_order=1,
higher_order=1, coarse_lower_order=1, coarse_higher_order=1,
neighbor_num_limit=80, save=True, output_path=None,
output_file_prefix=None, **nn_kwargs):
super(BOOP, self).__init__(save=save, backend=backend,
dependent_class=dependent_class,
output_path=output_path,
**nn_kwargs)
self.low_order = low_order
self.higher_order = higher_order
self.coarse_lower_order = coarse_lower_order
self.coarse_higher_order = coarse_higher_order
if coords_path is not None and os.path.exists(coords_path):
_, _, self.atom_coords, self.bds = read_imd(coords_path)
else:
self.atom_coords = atom_coords
self.bds = bds
if self.atom_coords is None or self.bds is None:
raise ValueError("Please make sure atom_coords and bds are not None"
" or coords_path is not None")
self.pbc = pbc if pbc else [1, 1, 1]
self.neighbor_num_limit = neighbor_num_limit
self.bq_tags = ['4', '6', '8', '10']
self.dependent_cols_ = [self.neighbor_num_col, self.neighbor_ids_col]
self.output_file_prefix = output_file_prefix \
if output_file_prefix is not None \
else 'feature_{}_{}_boop'.format(self.category,
self.dependent_name_)
def transform(self, X=None):
X = X if self.calculated_X is None else self.calculated_X
n_atoms = len(X)
dist_lists = get_isometric_lists(
X[self.neighbor_ids_col].values,
limit_width=self.neighbor_num_limit)
Ql = np.zeros((n_atoms, 4), dtype=np.longdouble)
Wlbar = np.zeros((n_atoms, 4), dtype=np.longdouble)
coarse_Ql = np.zeros((n_atoms, 4), dtype=np.longdouble)
coarse_Wlbar = np.zeros((n_atoms, 4), dtype=np.longdouble)
Ql, Wlbar, coarse_Ql, coarse_Wlbar = \
boop.calculate_boop(
self.atom_coords.astype(np.longdouble),
self.pbc, np.array(self.bds, dtype=np.longdouble),
X[self.neighbor_num_col].values, dist_lists,
self.low_order, self.higher_order, self.coarse_lower_order,
self.coarse_higher_order, Ql, Wlbar, coarse_Ql, coarse_Wlbar,
n_atoms=n_atoms, n_neighbor_limit=self.neighbor_num_limit)
concat_array = np.append(Ql, Wlbar, axis=1)
concat_array = np.append(concat_array, coarse_Ql, axis=1)
concat_array = np.append(concat_array, coarse_Wlbar, axis=1)
boop_df = pd.DataFrame(concat_array, index=X.index,
columns=self.get_feature_names())
if self.save:
self.backend.save_featurizer_as_dataframe(
output_df=boop_df, name=self.output_file_prefix)
return boop_df
def get_feature_names(self):
feature_names = ['q_{}_{}'.format(num, self.dependent_name_)
for num in self.bq_tags] + \
['w_{}_{}'.format(num, self.dependent_name_)
for num in self.bq_tags] + \
['Coarse_grained_q_{}_{}'.format(num,
self.dependent_name_)
for num in self.bq_tags] + \
['Coarse_grained_w_{}_{}'.format(num,
self.dependent_name_)
for num in self.bq_tags]
return feature_names
| [
"amlearn.utils.data.get_isometric_lists",
"amlearn.utils.packing.tetra_volume",
"amlearn.utils.data.read_imd",
"numpy.array",
"copy.copy",
"os.path.exists",
"amlearn.utils.packing.pbc_image_nn_coords",
"six.with_metaclass",
"amlearn.utils.data.read_lammps_dump",
"amlearn.utils.data.list_like",
"... | [((7291, 7312), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(10)'}), '(maxsize=10)\n', (7300, 7312), False, 'from functools import lru_cache\n'), ((8428, 8470), 'six.with_metaclass', 'six.with_metaclass', (['ABCMeta', 'BaseFeaturize'], {}), '(ABCMeta, BaseFeaturize)\n', (8446, 8470), False, 'import six\n'), ((11668, 11704), 'six.with_metaclass', 'six.with_metaclass', (['ABCMeta', 'BaseSRO'], {}), '(ABCMeta, BaseSRO)\n', (11686, 11704), False, 'import six\n'), ((823, 848), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (838, 848), False, 'import os\n'), ((2277, 2320), 'numpy.array', 'np.array', (['[[0, 1, 2], [1, 0, 2], [2, 0, 1]]'], {}), '([[0, 1, 2], [1, 0, 2], [2, 0, 1]])\n', (2285, 2320), True, 'import numpy as np\n'), ((6895, 6911), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (6906, 6911), False, 'from collections import defaultdict\n'), ((7984, 8020), 'amlearn.featurize.nearest_neighbor.VoroNN', 'VoroNN', ([], {'backend': 'backend'}), '(backend=backend, **nn_kwargs)\n', (7990, 8020), False, 'from amlearn.featurize.nearest_neighbor import VoroNN, DistanceNN, BaseNN\n'), ((41136, 41232), 'amlearn.utils.data.get_isometric_lists', 'get_isometric_lists', (['X[self.neighbor_edges_col].values'], {'limit_width': 'self.neighbor_num_limit'}), '(X[self.neighbor_edges_col].values, limit_width=self.\n neighbor_num_limit)\n', (41155, 41232), False, 'from amlearn.utils.data import read_imd, read_lammps_dump, get_isometric_lists, list_like\n'), ((44850, 44873), 'numpy.array', 'np.array', (['motif_one_hot'], {}), '(motif_one_hot)\n', (44858, 44873), True, 'import numpy as np\n'), ((47059, 47155), 'amlearn.utils.data.get_isometric_lists', 'get_isometric_lists', (['X[self.neighbor_edges_col].values'], {'limit_width': 'self.neighbor_num_limit'}), '(X[self.neighbor_edges_col].values, limit_width=self.\n neighbor_num_limit)\n', (47078, 47155), False, 'from amlearn.utils.data import read_imd, read_lammps_dump, get_isometric_lists, list_like\n'), ((49286, 49382), 'amlearn.utils.data.get_isometric_lists', 'get_isometric_lists', (['X[self.neighbor_edges_col].values'], {'limit_width': 'self.neighbor_num_limit'}), '(X[self.neighbor_edges_col].values, limit_width=self.\n neighbor_num_limit)\n', (49305, 49382), False, 'from amlearn.utils.data import read_imd, read_lammps_dump, get_isometric_lists, list_like\n'), ((51781, 51877), 'amlearn.utils.data.get_isometric_lists', 'get_isometric_lists', (['X[self.neighbor_edges_col].values'], {'limit_width': 'self.neighbor_num_limit'}), '(X[self.neighbor_edges_col].values, limit_width=self.\n neighbor_num_limit)\n', (51800, 51877), False, 'from amlearn.utils.data import read_imd, read_lammps_dump, get_isometric_lists, list_like\n'), ((56635, 56731), 'amlearn.utils.data.get_isometric_lists', 'get_isometric_lists', (['X[self.neighbor_edges_col].values'], {'limit_width': 'self.neighbor_num_limit'}), '(X[self.neighbor_edges_col].values, limit_width=self.\n neighbor_num_limit)\n', (56654, 56731), False, 'from amlearn.utils.data import read_imd, read_lammps_dump, get_isometric_lists, list_like\n'), ((61468, 61564), 'amlearn.utils.data.get_isometric_lists', 'get_isometric_lists', (['X[self.neighbor_edges_col].values'], {'limit_width': 'self.neighbor_num_limit'}), '(X[self.neighbor_edges_col].values, limit_width=self.\n neighbor_num_limit)\n', (61487, 61564), False, 'from amlearn.utils.data import read_imd, read_lammps_dump, get_isometric_lists, list_like\n'), ((63924, 64020), 'amlearn.utils.data.get_isometric_lists', 'get_isometric_lists', (['X[self.neighbor_dists_col].values'], {'limit_width': 'self.neighbor_num_limit'}), '(X[self.neighbor_dists_col].values, limit_width=self.\n neighbor_num_limit)\n', (63943, 64020), False, 'from amlearn.utils.data import read_imd, read_lammps_dump, get_isometric_lists, list_like\n'), ((66757, 66851), 'amlearn.utils.data.get_isometric_lists', 'get_isometric_lists', (['X[self.neighbor_ids_col].values'], {'limit_width': 'self.neighbor_num_limit'}), '(X[self.neighbor_ids_col].values, limit_width=self.\n neighbor_num_limit)\n', (66776, 66851), False, 'from amlearn.utils.data import read_imd, read_lammps_dump, get_isometric_lists, list_like\n'), ((66886, 66929), 'numpy.zeros', 'np.zeros', (['(n_atoms, 4)'], {'dtype': 'np.longdouble'}), '((n_atoms, 4), dtype=np.longdouble)\n', (66894, 66929), True, 'import numpy as np\n'), ((66946, 66989), 'numpy.zeros', 'np.zeros', (['(n_atoms, 4)'], {'dtype': 'np.longdouble'}), '((n_atoms, 4), dtype=np.longdouble)\n', (66954, 66989), True, 'import numpy as np\n'), ((67010, 67053), 'numpy.zeros', 'np.zeros', (['(n_atoms, 4)'], {'dtype': 'np.longdouble'}), '((n_atoms, 4), dtype=np.longdouble)\n', (67018, 67053), True, 'import numpy as np\n'), ((67077, 67120), 'numpy.zeros', 'np.zeros', (['(n_atoms, 4)'], {'dtype': 'np.longdouble'}), '((n_atoms, 4), dtype=np.longdouble)\n', (67085, 67120), True, 'import numpy as np\n'), ((67637, 67665), 'numpy.append', 'np.append', (['Ql', 'Wlbar'], {'axis': '(1)'}), '(Ql, Wlbar, axis=1)\n', (67646, 67665), True, 'import numpy as np\n'), ((67689, 67731), 'numpy.append', 'np.append', (['concat_array', 'coarse_Ql'], {'axis': '(1)'}), '(concat_array, coarse_Ql, axis=1)\n', (67698, 67731), True, 'import numpy as np\n'), ((67755, 67800), 'numpy.append', 'np.append', (['concat_array', 'coarse_Wlbar'], {'axis': '(1)'}), '(concat_array, coarse_Wlbar, axis=1)\n', (67764, 67800), True, 'import numpy as np\n'), ((1311, 1323), 'amlearn.utils.packing.load_radii', 'load_radii', ([], {}), '()\n', (1321, 1323), False, 'from amlearn.utils.packing import load_radii, pbc_image_nn_coords, solid_angle, triangular_angle, calc_stats, triangle_area, tetra_volume\n'), ((2814, 2842), 'amlearn.utils.packing.triangle_area', 'triangle_area', (['*facet_coords'], {}), '(*facet_coords)\n', (2827, 2842), False, 'from amlearn.utils.packing import load_radii, pbc_image_nn_coords, solid_angle, triangular_angle, calc_stats, triangle_area, tetra_volume\n'), ((3286, 3329), 'numpy.array', 'np.array', (['[[0, 1, 2], [1, 0, 2], [2, 0, 1]]'], {}), '([[0, 1, 2], [1, 0, 2], [2, 0, 1]])\n', (3294, 3329), True, 'import numpy as np\n'), ((4667, 4706), 'amlearn.utils.packing.solid_angle', 'solid_angle', (['self.coords', '*facet_coords'], {}), '(self.coords, *facet_coords)\n', (4678, 4706), False, 'from amlearn.utils.packing import load_radii, pbc_image_nn_coords, solid_angle, triangular_angle, calc_stats, triangle_area, tetra_volume\n'), ((4873, 4913), 'amlearn.utils.packing.tetra_volume', 'tetra_volume', (['self.coords', '*facet_coords'], {}), '(self.coords, *facet_coords)\n', (4885, 4913), False, 'from amlearn.utils.packing import load_radii, pbc_image_nn_coords, solid_angle, triangular_angle, calc_stats, triangle_area, tetra_volume\n'), ((8082, 8122), 'amlearn.featurize.nearest_neighbor.DistanceNN', 'DistanceNN', ([], {'backend': 'backend'}), '(backend=backend, **nn_kwargs)\n', (8092, 8122), False, 'from amlearn.featurize.nearest_neighbor import VoroNN, DistanceNN, BaseNN\n'), ((12309, 12321), 'amlearn.utils.packing.load_radii', 'load_radii', ([], {}), '()\n', (12319, 12321), False, 'from amlearn.utils.packing import load_radii, pbc_image_nn_coords, solid_angle, triangular_angle, calc_stats, triangle_area, tetra_volume\n'), ((13477, 13504), 'os.path.exists', 'os.path.exists', (['lammps_path'], {}), '(lammps_path)\n', (13491, 13504), False, 'import os\n'), ((13545, 13574), 'amlearn.utils.data.read_lammps_dump', 'read_lammps_dump', (['lammps_path'], {}), '(lammps_path)\n', (13561, 13574), False, 'from amlearn.utils.data import read_imd, read_lammps_dump, get_isometric_lists, list_like\n'), ((13618, 13633), 'copy.copy', 'copy', (['lammps_df'], {}), '(lammps_df)\n', (13622, 13633), False, 'from copy import copy\n'), ((16461, 16554), 'amlearn.utils.verbose.VerboseReporter', 'VerboseReporter', (['self.backend'], {'total_stage': '(1)', 'verbose': 'self.verbose', 'max_verbose_mod': '(10000)'}), '(self.backend, total_stage=1, verbose=self.verbose,\n max_verbose_mod=10000)\n', (16476, 16554), False, 'from amlearn.utils.verbose import VerboseReporter\n'), ((22266, 22359), 'amlearn.utils.verbose.VerboseReporter', 'VerboseReporter', (['self.backend'], {'total_stage': '(1)', 'verbose': 'self.verbose', 'max_verbose_mod': '(10000)'}), '(self.backend, total_stage=1, verbose=self.verbose,\n max_verbose_mod=10000)\n', (22281, 22359), False, 'from amlearn.utils.verbose import VerboseReporter\n'), ((32416, 32509), 'amlearn.utils.verbose.VerboseReporter', 'VerboseReporter', (['self.backend'], {'total_stage': '(1)', 'verbose': 'self.verbose', 'max_verbose_mod': '(10000)'}), '(self.backend, total_stage=1, verbose=self.verbose,\n max_verbose_mod=10000)\n', (32431, 32509), False, 'from amlearn.utils.verbose import VerboseReporter\n'), ((36813, 36906), 'amlearn.utils.verbose.VerboseReporter', 'VerboseReporter', (['self.backend'], {'total_stage': '(1)', 'verbose': 'self.verbose', 'max_verbose_mod': '(10000)'}), '(self.backend, total_stage=1, verbose=self.verbose,\n max_verbose_mod=10000)\n', (36828, 36906), False, 'from amlearn.utils.verbose import VerboseReporter\n'), ((43006, 43073), 'numpy.array', 'np.array', (['[[0, 0, 12, 0, 0], [0, 0, 12, 4, 0]]'], {'dtype': 'np.longdouble'}), '([[0, 0, 12, 0, 0], [0, 0, 12, 4, 0]], dtype=np.longdouble)\n', (43014, 43073), True, 'import numpy as np\n'), ((65780, 65807), 'os.path.exists', 'os.path.exists', (['coords_path'], {}), '(coords_path)\n', (65794, 65807), False, 'import os\n'), ((65856, 65877), 'amlearn.utils.data.read_imd', 'read_imd', (['coords_path'], {}), '(coords_path)\n', (65864, 65877), False, 'from amlearn.utils.data import read_imd, read_lammps_dump, get_isometric_lists, list_like\n'), ((67283, 67322), 'numpy.array', 'np.array', (['self.bds'], {'dtype': 'np.longdouble'}), '(self.bds, dtype=np.longdouble)\n', (67291, 67322), True, 'import numpy as np\n'), ((1492, 1561), 'amlearn.utils.packing.pbc_image_nn_coords', 'pbc_image_nn_coords', (['self.coords', 'neighbor_coords', 'self.bds', 'self.pbc'], {}), '(self.coords, neighbor_coords, self.bds, self.pbc)\n', (1511, 1561), False, 'from amlearn.utils.packing import load_radii, pbc_image_nn_coords, solid_angle, triangular_angle, calc_stats, triangle_area, tetra_volume\n'), ((2584, 2624), 'amlearn.utils.packing.triangular_angle', 'triangular_angle', (['*facet_coords[triplet]'], {}), '(*facet_coords[triplet])\n', (2600, 2624), False, 'from amlearn.utils.packing import load_radii, pbc_image_nn_coords, solid_angle, triangular_angle, calc_stats, triangle_area, tetra_volume\n'), ((17565, 17621), 'amlearn.utils.packing.calc_stats', 'calc_stats', (['neighbor_dist_interstice_list', 'self.stat_ops'], {}), '(neighbor_dist_interstice_list, self.stat_ops)\n', (17575, 17621), False, 'from amlearn.utils.packing import load_radii, pbc_image_nn_coords, solid_angle, triangular_angle, calc_stats, triangle_area, tetra_volume\n'), ((20656, 20667), 'amlearn.utils.data.list_like', 'list_like', ([], {}), '()\n', (20665, 20667), False, 'from amlearn.utils.data import read_imd, read_lammps_dump, get_isometric_lists, list_like\n'), ((20879, 20890), 'amlearn.utils.data.list_like', 'list_like', ([], {}), '()\n', (20888, 20890), False, 'from amlearn.utils.data import read_imd, read_lammps_dump, get_isometric_lists, list_like\n'), ((45050, 45072), 'numpy.array', 'np.array', (['[is_120_124]'], {}), '([is_120_124])\n', (45058, 45072), True, 'import numpy as np\n'), ((49440, 49536), 'amlearn.utils.data.get_isometric_lists', 'get_isometric_lists', (['X[self.neighbor_areas_col].values'], {'limit_width': 'self.neighbor_num_limit'}), '(X[self.neighbor_areas_col].values, limit_width=self.\n neighbor_num_limit)\n', (49459, 49536), False, 'from amlearn.utils.data import read_imd, read_lammps_dump, get_isometric_lists, list_like\n'), ((51934, 52029), 'amlearn.utils.data.get_isometric_lists', 'get_isometric_lists', (['X[self.neighbor_vols_col].values'], {'limit_width': 'self.neighbor_num_limit'}), '(X[self.neighbor_vols_col].values, limit_width=self.\n neighbor_num_limit)\n', (51953, 52029), False, 'from amlearn.utils.data import read_imd, read_lammps_dump, get_isometric_lists, list_like\n'), ((54226, 54322), 'amlearn.utils.data.get_isometric_lists', 'get_isometric_lists', (['X[self.neighbor_areas_col].values'], {'limit_width': 'self.neighbor_num_limit'}), '(X[self.neighbor_areas_col].values, limit_width=self.\n neighbor_num_limit)\n', (54245, 54322), False, 'from amlearn.utils.data import read_imd, read_lammps_dump, get_isometric_lists, list_like\n'), ((56773, 56869), 'amlearn.utils.data.get_isometric_lists', 'get_isometric_lists', (['X[self.neighbor_areas_col].values'], {'limit_width': 'self.neighbor_num_limit'}), '(X[self.neighbor_areas_col].values, limit_width=self.\n neighbor_num_limit)\n', (56792, 56869), False, 'from amlearn.utils.data import read_imd, read_lammps_dump, get_isometric_lists, list_like\n'), ((59020, 59115), 'amlearn.utils.data.get_isometric_lists', 'get_isometric_lists', (['X[self.neighbor_vols_col].values'], {'limit_width': 'self.neighbor_num_limit'}), '(X[self.neighbor_vols_col].values, limit_width=self.\n neighbor_num_limit)\n', (59039, 59115), False, 'from amlearn.utils.data import read_imd, read_lammps_dump, get_isometric_lists, list_like\n'), ((61605, 61700), 'amlearn.utils.data.get_isometric_lists', 'get_isometric_lists', (['X[self.neighbor_vols_col].values'], {'limit_width': 'self.neighbor_num_limit'}), '(X[self.neighbor_vols_col].values, limit_width=self.\n neighbor_num_limit)\n', (61624, 61700), False, 'from amlearn.utils.data import read_imd, read_lammps_dump, get_isometric_lists, list_like\n'), ((3589, 3637), 'amlearn.utils.packing.solid_angle', 'solid_angle', (['*facet_coords[triplet]', 'self.coords'], {}), '(*facet_coords[triplet], self.coords)\n', (3600, 3637), False, 'from amlearn.utils.packing import load_radii, pbc_image_nn_coords, solid_angle, triangular_angle, calc_stats, triangle_area, tetra_volume\n'), ((24104, 24136), 'numpy.array', 'np.array', (['volume_interstice_list'], {}), '(volume_interstice_list)\n', (24112, 24136), True, 'import numpy as np\n'), ((24137, 24158), 'numpy.array', 'np.array', (['volume_list'], {}), '(volume_list)\n', (24145, 24158), True, 'import numpy as np\n'), ((26450, 26480), 'numpy.array', 'np.array', (['area_interstice_list'], {}), '(area_interstice_list)\n', (26458, 26480), True, 'import numpy as np\n'), ((26483, 26502), 'numpy.array', 'np.array', (['area_list'], {}), '(area_list)\n', (26491, 26502), True, 'import numpy as np\n'), ((24667, 24716), 'amlearn.utils.packing.calc_stats', 'calc_stats', (['volume_interstice_list', 'self.stat_ops'], {}), '(volume_interstice_list, self.stat_ops)\n', (24677, 24716), False, 'from amlearn.utils.packing import load_radii, pbc_image_nn_coords, solid_angle, triangular_angle, calc_stats, triangle_area, tetra_volume\n'), ((26961, 27008), 'amlearn.utils.packing.calc_stats', 'calc_stats', (['area_interstice_list', 'self.stat_ops'], {}), '(area_interstice_list, self.stat_ops)\n', (26971, 27008), False, 'from amlearn.utils.packing import load_radii, pbc_image_nn_coords, solid_angle, triangular_angle, calc_stats, triangle_area, tetra_volume\n'), ((27225, 27282), 'amlearn.utils.packing.calc_stats', 'calc_stats', (['area_interstice_original_array', 'self.stat_ops'], {}), '(area_interstice_original_array, self.stat_ops)\n', (27235, 27282), False, 'from amlearn.utils.packing import load_radii, pbc_image_nn_coords, solid_angle, triangular_angle, calc_stats, triangle_area, tetra_volume\n'), ((28129, 28206), 'amlearn.utils.packing.calc_stats', 'calc_stats', (['(area_interstice_original_array / center_slice_area)', 'self.stat_ops'], {}), '(area_interstice_original_array / center_slice_area, self.stat_ops)\n', (28139, 28206), False, 'from amlearn.utils.packing import load_radii, pbc_image_nn_coords, solid_angle, triangular_angle, calc_stats, triangle_area, tetra_volume\n'), ((24986, 25045), 'amlearn.utils.packing.calc_stats', 'calc_stats', (['volume_interstice_original_array', 'self.stat_ops'], {}), '(volume_interstice_original_array, self.stat_ops)\n', (24996, 25045), False, 'from amlearn.utils.packing import load_radii, pbc_image_nn_coords, solid_angle, triangular_angle, calc_stats, triangle_area, tetra_volume\n'), ((25912, 25987), 'amlearn.utils.packing.calc_stats', 'calc_stats', (['(volume_interstice_original_array / center_volume)', 'self.stat_ops'], {}), '(volume_interstice_original_array / center_volume, self.stat_ops)\n', (25922, 25987), False, 'from amlearn.utils.packing import load_radii, pbc_image_nn_coords, solid_angle, triangular_angle, calc_stats, triangle_area, tetra_volume\n')] |
import quandl
import pandas as pd
import numpy as np
import datetime
from sklearn.linear_model import LinearRegression
from sklearn import preprocessing, cross_validation
df = quandl.get("WIKI/AMZN")
df = df[['Adj. Close']]
# print(df)
#
# exit()
forecast_out = int(30) # predicting 30 days into future
df['Prediction'] = df[['Adj. Close']].shift(-forecast_out) # label column with data shifted 30 units up
X = np.array(df.drop(['Prediction'], 1))
X = preprocessing.scale(X)
X_forecast = X[-forecast_out:] # set X_forecast equal to last 30
X = X[:-forecast_out] # remove last 30 from X
y = np.array(df['Prediction'])
y = y[:-forecast_out]
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size = 0.2)
# Training
clf = LinearRegression()
clf.fit(X_train,y_train)
# Testing
confidence = clf.score(X_test, y_test)
print("confidence: ", confidence)
forecast_prediction = clf.predict(X_forecast)
print(forecast_prediction)
| [
"numpy.array",
"quandl.get",
"sklearn.cross_validation.train_test_split",
"sklearn.linear_model.LinearRegression",
"sklearn.preprocessing.scale"
] | [((178, 201), 'quandl.get', 'quandl.get', (['"""WIKI/AMZN"""'], {}), "('WIKI/AMZN')\n", (188, 201), False, 'import quandl\n'), ((462, 484), 'sklearn.preprocessing.scale', 'preprocessing.scale', (['X'], {}), '(X)\n', (481, 484), False, 'from sklearn import preprocessing, cross_validation\n'), ((602, 628), 'numpy.array', 'np.array', (["df['Prediction']"], {}), "(df['Prediction'])\n", (610, 628), True, 'import numpy as np\n'), ((688, 742), 'sklearn.cross_validation.train_test_split', 'cross_validation.train_test_split', (['X', 'y'], {'test_size': '(0.2)'}), '(X, y, test_size=0.2)\n', (721, 742), False, 'from sklearn import preprocessing, cross_validation\n'), ((763, 781), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (779, 781), False, 'from sklearn.linear_model import LinearRegression\n')] |
from glob import glob
import os
from typing import List
from PIL import Image, ImageFilter
import numpy as np
import csv
import random
import shutil
from joblib import Parallel, delayed
import cv2
from preprocess.util import (
ImageInfo,
cal_new_size,
hex_to_rgb,
noisy,
printStats,
random_blur,
random_phase,
random_quality,
)
def main(
input_dataset_path, output_dataset_path, augmentation, min_size, max_size, threads
):
input = input_dataset_path
output = output_dataset_path
# Pick from all simulations, all cameras, all txt
files = list(sorted(glob(os.path.join(input, "Simulation*", "*", "*.txt"))))
if os.path.exists(output):
shutil.rmtree(output)
os.mkdir(os.path.join(output))
else:
os.mkdir(os.path.join(output))
if not os.path.exists(os.path.join(output, "train")):
os.mkdir(os.path.join(output, "train"))
if not os.path.exists(os.path.join(output, "val")):
os.mkdir(os.path.join(output, "val"))
if not os.path.exists(os.path.join(output, "test")):
os.mkdir(os.path.join(output, "test"))
print(f"{len(files)} images found")
infos = Parallel(n_jobs=threads, verbose=10)(
delayed(Process)(files[i], i, output, augmentation, min_size, max_size)
for i in range(len(files))
)
printStats(infos)
def generate_data(im_path, min_size, max_size):
im = Image.open(im_path).convert("RGB")
im_w, im_h = im.size
txt_path = im_path.replace(".bmp", ".txt")
segmentation_path = txt_path.replace(".txt", "_mask.bmp")
points = []
segmentation = Image.open(segmentation_path).convert("RGB")
with open(txt_path) as csvfile:
reader = csv.reader(csvfile, delimiter=";")
for x, y, c in reader:
r, g, b = hex_to_rgb(c)
x = min(im_w - 1, float(x))
y = min(im_h - 1, float(y))
if segmentation.getpixel((x, y)) == (r, g, b):
points.append([float(x), float(y)])
if len(points) == 0:
points = np.empty((0, 2))
else:
points = np.array(points)
im_h, im_w, rr = cal_new_size(im_h, im_w, min_size, max_size)
if rr != 1.0:
im = im.resize((im_w, im_h), Image.BICUBIC)
points = points * rr
return im, points
def Process(txt_path, i, output, augmentation, min_size, max_size) -> List[ImageInfo]:
standard_path = txt_path.replace(".txt", ".bmp")
segmentation_path = txt_path.replace(".txt", "_mask.bmp")
infos = []
if os.path.exists(standard_path) and os.path.exists(segmentation_path):
im, points = generate_data(standard_path, min_size, max_size)
infos.append(ImageInfo(im.width, im.height, len(points)))
phase = random_phase()
im.save(os.path.join(output, phase, f"img_{i}.jpg"), quality=random_quality())
np.save(os.path.join(output, phase, f"img_{i}.npy"), points)
if augmentation:
phase = "train"
noisy_standard = noisy(im)
noisy_standard.save(
os.path.join(output, phase, f"img_{i}_noise.jpg"),
quality=random_quality(),
)
np.save(os.path.join(output, phase, f"img_{i}_noise.npy"), points)
blur_size = random_blur()
blurred_standard = Image.fromarray(
cv2.GaussianBlur(
np.array(im), (blur_size, blur_size), np.random.randint(1, 4)
)
)
blurred_standard.save(
os.path.join(output, phase, f"img_{i}_blur.jpg"),
quality=random_quality(),
)
np.save(os.path.join(output, phase, f"img_{i}_blur.npy"), points)
infos = infos * 3
return infos
| [
"os.path.exists",
"PIL.Image.open",
"preprocess.util.random_phase",
"preprocess.util.hex_to_rgb",
"os.path.join",
"joblib.delayed",
"joblib.Parallel",
"numpy.array",
"preprocess.util.printStats",
"preprocess.util.random_blur",
"preprocess.util.cal_new_size",
"numpy.empty",
"numpy.random.rand... | [((670, 692), 'os.path.exists', 'os.path.exists', (['output'], {}), '(output)\n', (684, 692), False, 'import os\n'), ((1340, 1357), 'preprocess.util.printStats', 'printStats', (['infos'], {}), '(infos)\n', (1350, 1357), False, 'from preprocess.util import ImageInfo, cal_new_size, hex_to_rgb, noisy, printStats, random_blur, random_phase, random_quality\n'), ((2137, 2181), 'preprocess.util.cal_new_size', 'cal_new_size', (['im_h', 'im_w', 'min_size', 'max_size'], {}), '(im_h, im_w, min_size, max_size)\n', (2149, 2181), False, 'from preprocess.util import ImageInfo, cal_new_size, hex_to_rgb, noisy, printStats, random_blur, random_phase, random_quality\n'), ((702, 723), 'shutil.rmtree', 'shutil.rmtree', (['output'], {}), '(output)\n', (715, 723), False, 'import shutil\n'), ((1177, 1213), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'threads', 'verbose': '(10)'}), '(n_jobs=threads, verbose=10)\n', (1185, 1213), False, 'from joblib import Parallel, delayed\n'), ((1719, 1753), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""";"""'}), "(csvfile, delimiter=';')\n", (1729, 1753), False, 'import csv\n'), ((2054, 2070), 'numpy.empty', 'np.empty', (['(0, 2)'], {}), '((0, 2))\n', (2062, 2070), True, 'import numpy as np\n'), ((2098, 2114), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (2106, 2114), True, 'import numpy as np\n'), ((2530, 2559), 'os.path.exists', 'os.path.exists', (['standard_path'], {}), '(standard_path)\n', (2544, 2559), False, 'import os\n'), ((2564, 2597), 'os.path.exists', 'os.path.exists', (['segmentation_path'], {}), '(segmentation_path)\n', (2578, 2597), False, 'import os\n'), ((2752, 2766), 'preprocess.util.random_phase', 'random_phase', ([], {}), '()\n', (2764, 2766), False, 'from preprocess.util import ImageInfo, cal_new_size, hex_to_rgb, noisy, printStats, random_blur, random_phase, random_quality\n'), ((741, 761), 'os.path.join', 'os.path.join', (['output'], {}), '(output)\n', (753, 761), False, 'import os\n'), ((790, 810), 'os.path.join', 'os.path.join', (['output'], {}), '(output)\n', (802, 810), False, 'import os\n'), ((838, 867), 'os.path.join', 'os.path.join', (['output', '"""train"""'], {}), "(output, 'train')\n", (850, 867), False, 'import os\n'), ((887, 916), 'os.path.join', 'os.path.join', (['output', '"""train"""'], {}), "(output, 'train')\n", (899, 916), False, 'import os\n'), ((944, 971), 'os.path.join', 'os.path.join', (['output', '"""val"""'], {}), "(output, 'val')\n", (956, 971), False, 'import os\n'), ((991, 1018), 'os.path.join', 'os.path.join', (['output', '"""val"""'], {}), "(output, 'val')\n", (1003, 1018), False, 'import os\n'), ((1046, 1074), 'os.path.join', 'os.path.join', (['output', '"""test"""'], {}), "(output, 'test')\n", (1058, 1074), False, 'import os\n'), ((1094, 1122), 'os.path.join', 'os.path.join', (['output', '"""test"""'], {}), "(output, 'test')\n", (1106, 1122), False, 'import os\n'), ((1417, 1436), 'PIL.Image.open', 'Image.open', (['im_path'], {}), '(im_path)\n', (1427, 1436), False, 'from PIL import Image, ImageFilter\n'), ((1621, 1650), 'PIL.Image.open', 'Image.open', (['segmentation_path'], {}), '(segmentation_path)\n', (1631, 1650), False, 'from PIL import Image, ImageFilter\n'), ((1807, 1820), 'preprocess.util.hex_to_rgb', 'hex_to_rgb', (['c'], {}), '(c)\n', (1817, 1820), False, 'from preprocess.util import ImageInfo, cal_new_size, hex_to_rgb, noisy, printStats, random_blur, random_phase, random_quality\n'), ((2784, 2827), 'os.path.join', 'os.path.join', (['output', 'phase', 'f"""img_{i}.jpg"""'], {}), "(output, phase, f'img_{i}.jpg')\n", (2796, 2827), False, 'import os\n'), ((2871, 2914), 'os.path.join', 'os.path.join', (['output', 'phase', 'f"""img_{i}.npy"""'], {}), "(output, phase, f'img_{i}.npy')\n", (2883, 2914), False, 'import os\n'), ((3007, 3016), 'preprocess.util.noisy', 'noisy', (['im'], {}), '(im)\n', (3012, 3016), False, 'from preprocess.util import ImageInfo, cal_new_size, hex_to_rgb, noisy, printStats, random_blur, random_phase, random_quality\n'), ((3277, 3290), 'preprocess.util.random_blur', 'random_blur', ([], {}), '()\n', (3288, 3290), False, 'from preprocess.util import ImageInfo, cal_new_size, hex_to_rgb, noisy, printStats, random_blur, random_phase, random_quality\n'), ((610, 658), 'os.path.join', 'os.path.join', (['input', '"""Simulation*"""', '"""*"""', '"""*.txt"""'], {}), "(input, 'Simulation*', '*', '*.txt')\n", (622, 658), False, 'import os\n'), ((1223, 1239), 'joblib.delayed', 'delayed', (['Process'], {}), '(Process)\n', (1230, 1239), False, 'from joblib import Parallel, delayed\n'), ((2837, 2853), 'preprocess.util.random_quality', 'random_quality', ([], {}), '()\n', (2851, 2853), False, 'from preprocess.util import ImageInfo, cal_new_size, hex_to_rgb, noisy, printStats, random_blur, random_phase, random_quality\n'), ((3066, 3115), 'os.path.join', 'os.path.join', (['output', 'phase', 'f"""img_{i}_noise.jpg"""'], {}), "(output, phase, f'img_{i}_noise.jpg')\n", (3078, 3115), False, 'import os\n'), ((3193, 3242), 'os.path.join', 'os.path.join', (['output', 'phase', 'f"""img_{i}_noise.npy"""'], {}), "(output, phase, f'img_{i}_noise.npy')\n", (3205, 3242), False, 'import os\n'), ((3538, 3586), 'os.path.join', 'os.path.join', (['output', 'phase', 'f"""img_{i}_blur.jpg"""'], {}), "(output, phase, f'img_{i}_blur.jpg')\n", (3550, 3586), False, 'import os\n'), ((3664, 3712), 'os.path.join', 'os.path.join', (['output', 'phase', 'f"""img_{i}_blur.npy"""'], {}), "(output, phase, f'img_{i}_blur.npy')\n", (3676, 3712), False, 'import os\n'), ((3141, 3157), 'preprocess.util.random_quality', 'random_quality', ([], {}), '()\n', (3155, 3157), False, 'from preprocess.util import ImageInfo, cal_new_size, hex_to_rgb, noisy, printStats, random_blur, random_phase, random_quality\n'), ((3393, 3405), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (3401, 3405), True, 'import numpy as np\n'), ((3431, 3454), 'numpy.random.randint', 'np.random.randint', (['(1)', '(4)'], {}), '(1, 4)\n', (3448, 3454), True, 'import numpy as np\n'), ((3612, 3628), 'preprocess.util.random_quality', 'random_quality', ([], {}), '()\n', (3626, 3628), False, 'from preprocess.util import ImageInfo, cal_new_size, hex_to_rgb, noisy, printStats, random_blur, random_phase, random_quality\n')] |
'''
Author: <NAME>
Date: 2021-07-08 10:50:22
LastEditTime: 2021-07-08 14:10:04
LastEditors: Please set LastEditors
Description: In User Settings Edit
FilePath: /genetic-drawing/mask.py
'''
import cv2
import numpy as np
def main():
# 1.导入图片
img_src = cv2.imread("03.jpg")
# 2.灰度化,二值化
img_gray = cv2.cvtColor(img_src, cv2.COLOR_BGR2GRAY)
img_canny=cv2.Canny(img_gray,40,140)
ret, img_bin = cv2.threshold(img_canny, 127, 255, cv2.THRESH_BINARY)
kernel = np.ones((50,50),np.uint8)
img_binmax = cv2.dilate(img_bin,kernel)
## 3.连通域分析
contours, hierarchy = cv2.findContours(img_binmax,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
#print (len(contours))
color = [255, 255, 255]
img_result0 = img_binmax.copy()
img_result0 = cv2.fillPoly(img_result0, [np.array(contours[0])] ,color)
cv2.imwrite("masks-03/mask-0.jpg",img_result0)
kernels = [30,10,5,3]
for i in range(4):
k = kernels[i]
kernel = np.ones((k,k),np.uint8)
img_bin_i = cv2.dilate(img_bin,kernel)
cv2.imwrite("masks-03/mask-{}.jpg".format(i+1),img_bin_i)
if __name__ == '__main__':
main() | [
"cv2.imwrite",
"numpy.ones",
"cv2.threshold",
"numpy.array",
"cv2.cvtColor",
"cv2.findContours",
"cv2.dilate",
"cv2.Canny",
"cv2.imread"
] | [((261, 281), 'cv2.imread', 'cv2.imread', (['"""03.jpg"""'], {}), "('03.jpg')\n", (271, 281), False, 'import cv2\n'), ((314, 355), 'cv2.cvtColor', 'cv2.cvtColor', (['img_src', 'cv2.COLOR_BGR2GRAY'], {}), '(img_src, cv2.COLOR_BGR2GRAY)\n', (326, 355), False, 'import cv2\n'), ((370, 398), 'cv2.Canny', 'cv2.Canny', (['img_gray', '(40)', '(140)'], {}), '(img_gray, 40, 140)\n', (379, 398), False, 'import cv2\n'), ((416, 469), 'cv2.threshold', 'cv2.threshold', (['img_canny', '(127)', '(255)', 'cv2.THRESH_BINARY'], {}), '(img_canny, 127, 255, cv2.THRESH_BINARY)\n', (429, 469), False, 'import cv2\n'), ((483, 510), 'numpy.ones', 'np.ones', (['(50, 50)', 'np.uint8'], {}), '((50, 50), np.uint8)\n', (490, 510), True, 'import numpy as np\n'), ((526, 553), 'cv2.dilate', 'cv2.dilate', (['img_bin', 'kernel'], {}), '(img_bin, kernel)\n', (536, 553), False, 'import cv2\n'), ((594, 662), 'cv2.findContours', 'cv2.findContours', (['img_binmax', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(img_binmax, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (610, 662), False, 'import cv2\n'), ((845, 892), 'cv2.imwrite', 'cv2.imwrite', (['"""masks-03/mask-0.jpg"""', 'img_result0'], {}), "('masks-03/mask-0.jpg', img_result0)\n", (856, 892), False, 'import cv2\n'), ((987, 1012), 'numpy.ones', 'np.ones', (['(k, k)', 'np.uint8'], {}), '((k, k), np.uint8)\n', (994, 1012), True, 'import numpy as np\n'), ((1031, 1058), 'cv2.dilate', 'cv2.dilate', (['img_bin', 'kernel'], {}), '(img_bin, kernel)\n', (1041, 1058), False, 'import cv2\n'), ((810, 831), 'numpy.array', 'np.array', (['contours[0]'], {}), '(contours[0])\n', (818, 831), True, 'import numpy as np\n')] |
import pandas
from lux.vis.VisList import VisList
from lux.vis.Vis import Vis
from lux.core.frame import LuxDataFrame
from lux.executor.Executor import Executor
from lux.utils import utils
class PandasExecutor(Executor):
'''
Given a Vis objects with complete specifications, fetch and process data using Pandas dataframe operations.
'''
def __init__(self):
self.name = "PandasExecutor"
def __repr__(self):
return f"<PandasExecutor>"
@staticmethod
def execute(vislist:VisList, ldf:LuxDataFrame):
'''
Given a VisList, fetch the data required to render the vis.
1) Apply filters
2) Retrieve relevant attribute
3) Perform vis-related processing (aggregation, binning)
4) return a DataFrame with relevant results
Parameters
----------
vislist: list[lux.Vis]
vis list that contains lux.Vis objects for visualization.
ldf : lux.core.frame
LuxDataFrame with specified intent.
Returns
-------
None
'''
for vis in vislist:
vis._vis_data = ldf # The vis data starts off being the same as the content of the original dataframe
filter_executed = PandasExecutor.execute_filter(vis)
# Select relevant data based on attribute information
attributes = set([])
for clause in vis._inferred_intent:
if (clause.attribute):
if (clause.attribute!="Record"):
attributes.add(clause.attribute)
if len(vis.data) > 10000:
vis._vis_data = vis.data[list(attributes)].sample(n = 10000, random_state = 1)
else:
vis._vis_data = vis.data[list(attributes)]
if (vis.mark =="bar" or vis.mark =="line"):
PandasExecutor.execute_aggregate(vis,isFiltered = filter_executed)
elif (vis.mark =="histogram"):
PandasExecutor.execute_binning(vis)
@staticmethod
def execute_aggregate(vis: Vis,isFiltered = True):
'''
Aggregate data points on an axis for bar or line charts
Parameters
----------
vis: lux.Vis
lux.Vis object that represents a visualization
ldf : lux.core.frame
LuxDataFrame with specified intent.
Returns
-------
None
'''
import numpy as np
import pandas as pd
x_attr = vis.get_attr_by_channel("x")[0]
y_attr = vis.get_attr_by_channel("y")[0]
has_color = False
groupby_attr =""
measure_attr =""
if (x_attr.aggregation is None or y_attr.aggregation is None):
return
if (y_attr.aggregation!=""):
groupby_attr = x_attr
measure_attr = y_attr
agg_func = y_attr.aggregation
if (x_attr.aggregation!=""):
groupby_attr = y_attr
measure_attr = x_attr
agg_func = x_attr.aggregation
if (groupby_attr.attribute in vis.data.unique_values.keys()):
attr_unique_vals = vis.data.unique_values[groupby_attr.attribute]
#checks if color is specified in the Vis
if len(vis.get_attr_by_channel("color")) == 1:
color_attr = vis.get_attr_by_channel("color")[0]
color_attr_vals = vis.data.unique_values[color_attr.attribute]
color_cardinality = len(color_attr_vals)
#NOTE: might want to have a check somewhere to not use categorical variables with greater than some number of categories as a Color variable----------------
has_color = True
else:
color_cardinality = 1
if (measure_attr!=""):
if (measure_attr.attribute=="Record"):
vis._vis_data = vis.data.reset_index()
#if color is specified, need to group by groupby_attr and color_attr
if has_color:
vis._vis_data = vis.data.groupby([groupby_attr.attribute, color_attr.attribute]).count().reset_index()
vis._vis_data = vis.data.rename(columns={"index":"Record"})
vis._vis_data = vis.data[[groupby_attr.attribute,color_attr.attribute,"Record"]]
else:
vis._vis_data = vis.data.groupby(groupby_attr.attribute).count().reset_index()
vis._vis_data = vis.data.rename(columns={"index":"Record"})
vis._vis_data = vis.data[[groupby_attr.attribute,"Record"]]
else:
#if color is specified, need to group by groupby_attr and color_attr
if has_color:
groupby_result = vis.data.groupby([groupby_attr.attribute, color_attr.attribute])
else:
groupby_result = vis.data.groupby(groupby_attr.attribute)
groupby_result = groupby_result.agg(agg_func)
intermediate = groupby_result.reset_index()
vis._vis_data = intermediate.__finalize__(vis.data)
result_vals = list(vis.data[groupby_attr.attribute])
#create existing group by attribute combinations if color is specified
#this is needed to check what combinations of group_by_attr and color_attr values have a non-zero number of elements in them
if has_color:
res_color_combi_vals = []
result_color_vals = list(vis.data[color_attr.attribute])
for i in range(0, len(result_vals)):
res_color_combi_vals.append([result_vals[i], result_color_vals[i]])
# For filtered aggregation that have missing groupby-attribute values, set these aggregated value as 0, since no datapoints
if (isFiltered or has_color and attr_unique_vals):
N_unique_vals = len(attr_unique_vals)
if (len(result_vals) != N_unique_vals*color_cardinality):
columns = vis.data.columns
if has_color:
df = pd.DataFrame({columns[0]: attr_unique_vals*color_cardinality, columns[1]: pd.Series(color_attr_vals).repeat(N_unique_vals)})
vis._vis_data = vis.data.merge(df, on=[columns[0],columns[1]], how='right', suffixes=['', '_right'])
for col in columns[2:]:
vis.data[col] = vis.data[col].fillna(0) #Triggers __setitem__
assert len(list(vis.data[groupby_attr.attribute])) == N_unique_vals*len(color_attr_vals), f"Aggregated data missing values compared to original range of values of `{groupby_attr.attribute, color_attr.attribute}`."
vis._vis_data = vis.data.iloc[:,:3] # Keep only the three relevant columns not the *_right columns resulting from merge
else:
df = pd.DataFrame({columns[0]: attr_unique_vals})
vis._vis_data = vis.data.merge(df, on=columns[0], how='right', suffixes=['', '_right'])
for col in columns[1:]:
vis.data[col] = vis.data[col].fillna(0)
assert len(list(vis.data[groupby_attr.attribute])) == N_unique_vals, f"Aggregated data missing values compared to original range of values of `{groupby_attr.attribute}`."
vis._vis_data = vis.data.sort_values(by=groupby_attr.attribute, ascending=True)
vis._vis_data = vis.data.reset_index()
vis._vis_data = vis.data.drop(columns="index")
@staticmethod
def execute_binning(vis: Vis):
'''
Binning of data points for generating histograms
Parameters
----------
vis: lux.Vis
lux.Vis object that represents a visualization
ldf : lux.core.frame
LuxDataFrame with specified intent.
Returns
-------
None
'''
import numpy as np
import pandas as pd # is this import going to be conflicting with LuxDf?
bin_attribute = list(filter(lambda x: x.bin_size!=0,vis._inferred_intent))[0]
if not np.isnan(vis.data[bin_attribute.attribute]).all():
series = vis.data[bin_attribute.attribute].dropna() # np.histogram breaks if array contain NaN
#TODO:binning runs for name attribte. Name attribute has datatype quantitative which is wrong.
counts,bin_edges = np.histogram(series,bins=bin_attribute.bin_size)
#bin_edges of size N+1, so need to compute bin_center as the bin location
bin_center = np.mean(np.vstack([bin_edges[0:-1],bin_edges[1:]]), axis=0)
# TODO: Should vis.data be a LuxDataFrame or a Pandas DataFrame?
vis._vis_data = pd.DataFrame(np.array([bin_center,counts]).T,columns=[bin_attribute.attribute, "Number of Records"])
@staticmethod
def execute_filter(vis: Vis):
assert vis.data is not None, "execute_filter assumes input vis.data is populated (if not, populate with LuxDataFrame values)"
filters = utils.get_filter_specs(vis._inferred_intent)
if (filters):
# TODO: Need to handle OR logic
for filter in filters:
vis._vis_data = PandasExecutor.apply_filter(vis.data, filter.attribute, filter.filter_op, filter.value)
return True
else:
return False
@staticmethod
def apply_filter(df: pandas.DataFrame, attribute:str, op: str, val: object) -> pandas.DataFrame:
"""
Helper function for applying filter to a dataframe
Parameters
----------
df : pandas.DataFrame
Dataframe to filter on
attribute : str
Filter attribute
op : str
Filter operation, '=', '<', '>', '<=', '>=', '!='
val : object
Filter value
Returns
-------
df: pandas.DataFrame
Dataframe resulting from the filter operation
"""
if (op == '='):
return df[df[attribute] == val]
elif (op == '<'):
return df[df[attribute] < val]
elif (op == '>'):
return df[df[attribute] > val]
elif (op == '<='):
return df[df[attribute] <= val]
elif (op == '>='):
return df[df[attribute] >= val]
elif (op == '!='):
return df[df[attribute] != val]
return df | [
"pandas.Series",
"numpy.histogram",
"numpy.array",
"lux.utils.utils.get_filter_specs",
"numpy.isnan",
"numpy.vstack",
"pandas.DataFrame"
] | [((9142, 9186), 'lux.utils.utils.get_filter_specs', 'utils.get_filter_specs', (['vis._inferred_intent'], {}), '(vis._inferred_intent)\n', (9164, 9186), False, 'from lux.utils import utils\n'), ((8511, 8560), 'numpy.histogram', 'np.histogram', (['series'], {'bins': 'bin_attribute.bin_size'}), '(series, bins=bin_attribute.bin_size)\n', (8523, 8560), True, 'import numpy as np\n'), ((8679, 8722), 'numpy.vstack', 'np.vstack', (['[bin_edges[0:-1], bin_edges[1:]]'], {}), '([bin_edges[0:-1], bin_edges[1:]])\n', (8688, 8722), True, 'import numpy as np\n'), ((8215, 8258), 'numpy.isnan', 'np.isnan', (['vis.data[bin_attribute.attribute]'], {}), '(vis.data[bin_attribute.attribute])\n', (8223, 8258), True, 'import numpy as np\n'), ((8849, 8879), 'numpy.array', 'np.array', (['[bin_center, counts]'], {}), '([bin_center, counts])\n', (8857, 8879), True, 'import numpy as np\n'), ((6932, 6976), 'pandas.DataFrame', 'pd.DataFrame', (['{columns[0]: attr_unique_vals}'], {}), '({columns[0]: attr_unique_vals})\n', (6944, 6976), True, 'import pandas as pd\n'), ((6181, 6207), 'pandas.Series', 'pd.Series', (['color_attr_vals'], {}), '(color_attr_vals)\n', (6190, 6207), True, 'import pandas as pd\n')] |
from io import BytesIO
from urllib.request import urlopen
from zipfile import ZipFile
import numpy as np
import pandas as pd
from sklearn.utils.testing import assert_array_equal
from sktime.classifiers.example_classifiers import TSExampleClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sktime.model_selection import GridSearchCV
from sklearn.metrics import accuracy_score, make_scorer
from sktime.datasets import load_gunpoint
Xsf_train, y_train = load_gunpoint(return_X_y=True)
Xdf_train = pd.DataFrame({'ts': Xsf_train, 'ts_copy': Xsf_train})
Xsf_test, y_test = load_gunpoint("TEST", return_X_y=True)
Xdf_test = pd.DataFrame({'ts': Xsf_test, 'ts_copy': Xsf_test})
def test_xdataframe_TSExampleClassifier():
X = Xdf_train
y = y_train
model = TSExampleClassifier(func=np.mean, columns=X.columns, estimator=RandomForestClassifier(random_state=123, n_estimators=10))
model.fit(X, y)
assert_array_equal(model.predict(Xdf_test), np.ones(y_test.shape[0]) * 2)
def test_set_get_param():
X = Xdf_train
y = y_train
model = TSExampleClassifier(func=np.mean, columns=X.columns, estimator=RandomForestClassifier(random_state=123, n_estimators=10))
model.set_params(estimator__random_state=42)
assert model.get_params()['estimator__random_state'] == 42
def test_grid_search_cv():
X = Xdf_train
y = y_train
model = TSExampleClassifier(func=np.mean,
columns=X.columns,
estimator=LogisticRegression(fit_intercept=True,
solver='lbfgs'))
model.fit(X, y)
expected = model.predict(X)
# give (deep) parameter tuning details
parameters = {'estimator__fit_intercept': (True, False)}
# as we are not using a mixin, we need an external scorer
external_scorer = make_scorer(accuracy_score)
# fit and predict GridSearchCV
clf = GridSearchCV(model, parameters, scoring=external_scorer, cv=5)
clf.fit(X, y)
got = clf.predict(X)
assert_array_equal(expected, got)
def test_grid_search_cv_default_scorer():
X = Xdf_train
y = y_train
model = TSExampleClassifier(func=np.mean,
columns=X.columns,
estimator=LogisticRegression(fit_intercept=True,
solver='lbfgs'))
model.fit(X, y)
expected = model.predict(X)
# give (deep) parameter tuning details
parameters = {'estimator__fit_intercept': (True, False)}
# fit and predict GridSearchCV without external scorer
clf = GridSearchCV(model, parameters, cv=5)
clf.fit(X, y)
got = clf.predict(X)
assert_array_equal(expected, got)
| [
"sklearn.utils.testing.assert_array_equal",
"numpy.ones",
"sklearn.metrics.make_scorer",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.linear_model.LogisticRegression",
"sktime.model_selection.GridSearchCV",
"sktime.datasets.load_gunpoint",
"pandas.DataFrame"
] | [((524, 554), 'sktime.datasets.load_gunpoint', 'load_gunpoint', ([], {'return_X_y': '(True)'}), '(return_X_y=True)\n', (537, 554), False, 'from sktime.datasets import load_gunpoint\n'), ((567, 620), 'pandas.DataFrame', 'pd.DataFrame', (["{'ts': Xsf_train, 'ts_copy': Xsf_train}"], {}), "({'ts': Xsf_train, 'ts_copy': Xsf_train})\n", (579, 620), True, 'import pandas as pd\n'), ((640, 678), 'sktime.datasets.load_gunpoint', 'load_gunpoint', (['"""TEST"""'], {'return_X_y': '(True)'}), "('TEST', return_X_y=True)\n", (653, 678), False, 'from sktime.datasets import load_gunpoint\n'), ((690, 741), 'pandas.DataFrame', 'pd.DataFrame', (["{'ts': Xsf_test, 'ts_copy': Xsf_test}"], {}), "({'ts': Xsf_test, 'ts_copy': Xsf_test})\n", (702, 741), True, 'import pandas as pd\n'), ((1918, 1945), 'sklearn.metrics.make_scorer', 'make_scorer', (['accuracy_score'], {}), '(accuracy_score)\n', (1929, 1945), False, 'from sklearn.metrics import accuracy_score, make_scorer\n'), ((1991, 2053), 'sktime.model_selection.GridSearchCV', 'GridSearchCV', (['model', 'parameters'], {'scoring': 'external_scorer', 'cv': '(5)'}), '(model, parameters, scoring=external_scorer, cv=5)\n', (2003, 2053), False, 'from sktime.model_selection import GridSearchCV\n'), ((2101, 2134), 'sklearn.utils.testing.assert_array_equal', 'assert_array_equal', (['expected', 'got'], {}), '(expected, got)\n', (2119, 2134), False, 'from sklearn.utils.testing import assert_array_equal\n'), ((2694, 2731), 'sktime.model_selection.GridSearchCV', 'GridSearchCV', (['model', 'parameters'], {'cv': '(5)'}), '(model, parameters, cv=5)\n', (2706, 2731), False, 'from sktime.model_selection import GridSearchCV\n'), ((2779, 2812), 'sklearn.utils.testing.assert_array_equal', 'assert_array_equal', (['expected', 'got'], {}), '(expected, got)\n', (2797, 2812), False, 'from sklearn.utils.testing import assert_array_equal\n'), ((895, 952), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': '(123)', 'n_estimators': '(10)'}), '(random_state=123, n_estimators=10)\n', (917, 952), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((1022, 1046), 'numpy.ones', 'np.ones', (['y_test.shape[0]'], {}), '(y_test.shape[0])\n', (1029, 1046), True, 'import numpy as np\n'), ((1188, 1245), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': '(123)', 'n_estimators': '(10)'}), '(random_state=123, n_estimators=10)\n', (1210, 1245), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((1560, 1614), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'fit_intercept': '(True)', 'solver': '"""lbfgs"""'}), "(fit_intercept=True, solver='lbfgs')\n", (1578, 1614), False, 'from sklearn.linear_model import LogisticRegression\n'), ((2351, 2405), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'fit_intercept': '(True)', 'solver': '"""lbfgs"""'}), "(fit_intercept=True, solver='lbfgs')\n", (2369, 2405), False, 'from sklearn.linear_model import LogisticRegression\n')] |
import numpy as np
from evtol import eVTOL
import matplotlib.pyplot as plt
# OTHER STUFF STORED HERE FOR NOW
def unique(list1):
# initialize a null list
unique_list = []
unique_indices = []
# traverse for all elements
i = 0
for x in list1:
# check if exists in unique_list or not
if x not in unique_list:
unique_list.append(x)
unique_indices.append(i)
i+=1
return unique_list, unique_indices
def add_legend(ax):
h, l = ax.get_legend_handles_labels()
l_unique, ind_unique = unique(l)
h_unique = [h[i] for i in ind_unique]
ax.legend(h_unique, l_unique)
if __name__ == "__main__":
# Model inputs
m = 2200 #kg
n = 6
r = 0.65 # meters
x_init = [0,0, 100]
heading = 0
# Model initialize
evtol = eVTOL(m, soc_init=100, soc_limit=20, mission=[], energy_density=260)
evtol.initialize_state(x_init, heading)
# print(evtol.calculate_power(mode='hover'))
# print(evtol.calculate_power(mode='cruise'))
#
# print(evtol.battery.get_energy_remaining())
mission = []
evtol.fly(30, 'taxi')
evtol.fly(5, 'hover')
# (z_alt_now - z_alt_des) / evtol.ROC_climb
evtol.fly(45, 'vertical climb')
# evtol.fly(15, 'vertical climb')
evtol.fly(45, 'transition forward')
evtol.fly(105, 'climb')
# evtol.fly(1500-105, 'cruise')
cruise_time = 1500-105
evtol.fly(cruise_time*0.333, 'cruise')
evtol.fly(cruise_time*0.333, 'cruise')
evtol.fly(cruise_time*0.333, 'cruise')
evtol.fly(105, 'descent')
# evtol.fly(15, 'vertical descent')
evtol.fly(45, 'transition reverse')
evtol.fly(45, 'vertical descent')
evtol.fly(5, 'hover')
evtol.fly(30, 'taxi')
final_range_remaining = evtol.calculate_range()
print(final_range_remaining)
print(evtol.mission)
print('Total Energy Used in kWh: {}'.format(evtol.battery.get_energy_used()))
print('Predicted Range Remaining in km: {}'.format(evtol.calculate_range()))
#TODO: ARE WE IGNORING THE ENERGY BELOW 20% OR NOT??? I THOUGHT I WAS!?
# (tuple([mode, round(time, 2), round(power, 0),
# round(soc_remaining, 2), round(range_remaining, 2)]))
fig = plt.figure(10)
times = [x[1] for x in evtol.mission]
socs = [x[3] for x in evtol.mission]
states = [x[5] for x in evtol.mission]
xs = [x[0] for x in states]
ys = [x[1] for x in states]
zs = [x[2] for x in states]
phases = [x[0] for x in evtol.mission]
plt.plot(xs, ys)
fig2 = plt.figure(11)
plt.plot(xs,zs)
predicted_ranges = [p[4] for p in evtol.mission]
ts=[0]
xs_new = [x_init[0]]
ys_new = [x_init[1]]
zs_new = [x_init[2]]
plot_ranges = [0]
index = 0
predicted_ranges.insert(0,0)
socs.insert(0,100)
xs.insert(0,x_init[0])
ys.insert(0,x_init[1])
zs.insert(0,x_init[2])
plot_socs = [100]
plot_phases = ['taxi']
for time in times:
new_times = np.linspace(ts[-1], ts[-1]+time,time+1)
for i in new_times:
ts.append(i)
# new_xs = [xs[index]] * len(new_times)
# new_ys = [ys[index]] * len(new_times)
# new_zs = [zs[index]] * len(new_times)
ranges = np.linspace(predicted_ranges[index], predicted_ranges[index+1], len(new_times))
# ranges = [predicted_ranges[index+1]] * len(new_times)
#TODO: SMOOTHING USING NP.LINSPACE
new_xs = np.linspace(xs[index], xs[index+1], len(new_times))
new_ys = np.linspace(ys[index], ys[index+1], len(new_times))
new_zs = np.linspace(zs[index], zs[index+1], len(new_times))
new_socs = np.linspace(socs[index], socs[index+1], len(new_times))
for j in new_xs:
xs_new.append(j)
for j in new_ys:
ys_new.append(j)
for j in new_zs:
zs_new.append(j)
for j in ranges:
plot_ranges.append(j)
for j in new_socs:
plot_socs.append(j)
plot_phases.append(phases[index])
index+=1
colors = {'taxi':'red', 'hover':'blue', 'vertical climb':'green', 'transition forward':'orange', 'climb':'purple', 'cruise':'yellow', 'descent':'pink', 'transition reverse':'black', 'vertical descent':'brown'}
fig3 = plt.figure(12)
plt.plot(ts, xs_new)
plt.title("X Position over Flight")
plt.xlabel("Time")
fig4 = plt.figure(16)
plt.plot(ts, ys_new)
plt.title("Y Position over Flight")
plt.xlabel("Time")
fig6, ax6 = plt.subplots()
for i in range(len(ts)-1):
plt.plot([ts[i],ts[i+1]], [plot_ranges[i],plot_ranges[i+1]], color=colors[plot_phases[i]], label=str(plot_phases[i]))
# plt.plot(ts, zs_new)
plt.title("Altitude over Flight")
plt.xlabel("Time (s)")
plt.ylabel("Altitude (m)")
add_legend(ax6)
fig5, ax5 = plt.subplots()
plt.plot(ts, plot_ranges)
plt.title("Predicted Range over Flight")
plt.xlabel("Time")
fig7, ax7 = plt.subplots()
for i in range(len(ts)-1):
plt.plot([ts[i],ts[i+1]], [plot_socs[i],plot_socs[i+1]], color=colors[plot_phases[i]], label=str(plot_phases[i]))
# plt.plot(ts, zs_new)
# plt.plot(ts, plot_socs)
plt.title("SOC over Flight")
plt.xlabel("Time (s)")
plt.ylabel("SOC (%)")
add_legend(ax7)
plt.show()
| [
"matplotlib.pyplot.ylabel",
"evtol.eVTOL",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.figure",
"numpy.linspace",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((821, 889), 'evtol.eVTOL', 'eVTOL', (['m'], {'soc_init': '(100)', 'soc_limit': '(20)', 'mission': '[]', 'energy_density': '(260)'}), '(m, soc_init=100, soc_limit=20, mission=[], energy_density=260)\n', (826, 889), False, 'from evtol import eVTOL\n'), ((2241, 2255), 'matplotlib.pyplot.figure', 'plt.figure', (['(10)'], {}), '(10)\n', (2251, 2255), True, 'import matplotlib.pyplot as plt\n'), ((2526, 2542), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', 'ys'], {}), '(xs, ys)\n', (2534, 2542), True, 'import matplotlib.pyplot as plt\n'), ((2555, 2569), 'matplotlib.pyplot.figure', 'plt.figure', (['(11)'], {}), '(11)\n', (2565, 2569), True, 'import matplotlib.pyplot as plt\n'), ((2574, 2590), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', 'zs'], {}), '(xs, zs)\n', (2582, 2590), True, 'import matplotlib.pyplot as plt\n'), ((4295, 4309), 'matplotlib.pyplot.figure', 'plt.figure', (['(12)'], {}), '(12)\n', (4305, 4309), True, 'import matplotlib.pyplot as plt\n'), ((4314, 4334), 'matplotlib.pyplot.plot', 'plt.plot', (['ts', 'xs_new'], {}), '(ts, xs_new)\n', (4322, 4334), True, 'import matplotlib.pyplot as plt\n'), ((4339, 4374), 'matplotlib.pyplot.title', 'plt.title', (['"""X Position over Flight"""'], {}), "('X Position over Flight')\n", (4348, 4374), True, 'import matplotlib.pyplot as plt\n'), ((4379, 4397), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (4389, 4397), True, 'import matplotlib.pyplot as plt\n'), ((4409, 4423), 'matplotlib.pyplot.figure', 'plt.figure', (['(16)'], {}), '(16)\n', (4419, 4423), True, 'import matplotlib.pyplot as plt\n'), ((4428, 4448), 'matplotlib.pyplot.plot', 'plt.plot', (['ts', 'ys_new'], {}), '(ts, ys_new)\n', (4436, 4448), True, 'import matplotlib.pyplot as plt\n'), ((4453, 4488), 'matplotlib.pyplot.title', 'plt.title', (['"""Y Position over Flight"""'], {}), "('Y Position over Flight')\n", (4462, 4488), True, 'import matplotlib.pyplot as plt\n'), ((4493, 4511), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (4503, 4511), True, 'import matplotlib.pyplot as plt\n'), ((4528, 4542), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4540, 4542), True, 'import matplotlib.pyplot as plt\n'), ((4731, 4764), 'matplotlib.pyplot.title', 'plt.title', (['"""Altitude over Flight"""'], {}), "('Altitude over Flight')\n", (4740, 4764), True, 'import matplotlib.pyplot as plt\n'), ((4769, 4791), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (s)"""'], {}), "('Time (s)')\n", (4779, 4791), True, 'import matplotlib.pyplot as plt\n'), ((4796, 4822), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Altitude (m)"""'], {}), "('Altitude (m)')\n", (4806, 4822), True, 'import matplotlib.pyplot as plt\n'), ((4859, 4873), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4871, 4873), True, 'import matplotlib.pyplot as plt\n'), ((4878, 4903), 'matplotlib.pyplot.plot', 'plt.plot', (['ts', 'plot_ranges'], {}), '(ts, plot_ranges)\n', (4886, 4903), True, 'import matplotlib.pyplot as plt\n'), ((4908, 4948), 'matplotlib.pyplot.title', 'plt.title', (['"""Predicted Range over Flight"""'], {}), "('Predicted Range over Flight')\n", (4917, 4948), True, 'import matplotlib.pyplot as plt\n'), ((4953, 4971), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (4963, 4971), True, 'import matplotlib.pyplot as plt\n'), ((4988, 5002), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5000, 5002), True, 'import matplotlib.pyplot as plt\n'), ((5217, 5245), 'matplotlib.pyplot.title', 'plt.title', (['"""SOC over Flight"""'], {}), "('SOC over Flight')\n", (5226, 5245), True, 'import matplotlib.pyplot as plt\n'), ((5250, 5272), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (s)"""'], {}), "('Time (s)')\n", (5260, 5272), True, 'import matplotlib.pyplot as plt\n'), ((5277, 5298), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""SOC (%)"""'], {}), "('SOC (%)')\n", (5287, 5298), True, 'import matplotlib.pyplot as plt\n'), ((5324, 5334), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5332, 5334), True, 'import matplotlib.pyplot as plt\n'), ((3000, 3044), 'numpy.linspace', 'np.linspace', (['ts[-1]', '(ts[-1] + time)', '(time + 1)'], {}), '(ts[-1], ts[-1] + time, time + 1)\n', (3011, 3044), True, 'import numpy as np\n')] |
from joblib import Parallel, delayed
import numpy as np
from pyriemann.classification import MDM
from pyriemann.utils.distance import distance
from pyriemann.utils.geodesic import geodesic
from pyriemann.utils.mean import mean_covariance
class MDWM(MDM):
def __init__(self, L=0, **kwargs):
"""Init."""
if L < 0. or L > 1.0:
raise ValueError("L should be chosen between 0.0 and 1.0")
self.L = L
super().__init__(**kwargs)
def fit(self, X, y, X_domain, y_domain, sample_weight=None):
"""Fit (estimates) the centroids.
Parameters
----------
X : ndarray, shape (n_trials, n_channels, n_channels)
ndarray of SPD matrices.
y : ndarray shape (n_trials, 1)
labels corresponding to each trial.
sample_weight : None | ndarray shape (n_trials, 1)
the weights of each sample from the domain. if None, each sample
is treated with equal weights.
Returns
-------
self : MDM instance
The MDM instance.
"""
self.classes_ = np.unique(y)
# TODO: ajouter un test pour verifier que y et y_domain
# ont les meme classes
if sample_weight is None:
sample_weight = np.ones(X_domain.shape[0])
if self.n_jobs == 1:
self.target_means_ = [
mean_covariance(X[y == label], metric=self.metric_mean)
# sample_weight=sample_weight_target[y == l])
for label in self.classes_
]
self.domain_means_ = [
mean_covariance(
X_domain[y_domain == label],
metric=self.metric_mean,
sample_weight=sample_weight[y_domain == label],
)
for label in self.classes_
]
else:
self.target_means_ = Parallel(n_jobs=self.n_jobs)(
delayed(mean_covariance)(X[y == label],
metric=self.metric_mean)
for label in self.classes_
) # sample_weight=sample_weight_target[y == l])
self.domain_means_ = Parallel(n_jobs=self.n_jobs)(
delayed(mean_covariance)(
X_domain[y_domain == label],
metric=self.metric_mean,
sample_weight=sample_weight[y_domain == label],
)
for label in self.classes_
)
self.class_center_ = [
geodesic(self.target_means_[i], self.domain_means_[i],
self.L, self.metric)
for i, _ in enumerate(self.classes_)
]
return self
def _predict_distances(self, covtest):
"""Helper to predict the distance. equivalent to transform."""
Nc = len(self.class_center_)
if self.n_jobs == 1:
dist = [
distance(covtest, self.class_center_[m], self.metric_dist)
for m in range(Nc)
]
else:
dist = Parallel(n_jobs=self.n_jobs)(
delayed(distance)(covtest, self.class_center_[m],
self.metric_dist)
for m in range(Nc)
)
dist = np.concatenate(dist, axis=1)
return dist
def predict(self, covtest):
"""get the predictions.
Parameters
----------
X : ndarray, shape (n_trials, n_channels, n_channels)
ndarray of SPD matrices.
Returns
-------
pred : ndarray of int, shape (n_trials, 1)
the prediction for each trials according to the closest centroid.
"""
dist = self._predict_distances(covtest)
return self.classes_[dist.argmin(axis=1)]
def transform(self, X):
"""get the distance to each centroid.
Parameters
----------
X : ndarray, shape (n_trials, n_channels, n_channels)
ndarray of SPD matrices.
Returns
-------
dist : ndarray, shape (n_trials, n_classes)
the distance to each centroid according to the metric.
"""
return self._predict_distances(X)
| [
"numpy.ones",
"numpy.unique",
"pyriemann.utils.distance.distance",
"joblib.Parallel",
"numpy.concatenate",
"pyriemann.utils.mean.mean_covariance",
"pyriemann.utils.geodesic.geodesic",
"joblib.delayed"
] | [((1113, 1125), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (1122, 1125), True, 'import numpy as np\n'), ((3318, 3346), 'numpy.concatenate', 'np.concatenate', (['dist'], {'axis': '(1)'}), '(dist, axis=1)\n', (3332, 3346), True, 'import numpy as np\n'), ((1290, 1316), 'numpy.ones', 'np.ones', (['X_domain.shape[0]'], {}), '(X_domain.shape[0])\n', (1297, 1316), True, 'import numpy as np\n'), ((2568, 2643), 'pyriemann.utils.geodesic.geodesic', 'geodesic', (['self.target_means_[i]', 'self.domain_means_[i]', 'self.L', 'self.metric'], {}), '(self.target_means_[i], self.domain_means_[i], self.L, self.metric)\n', (2576, 2643), False, 'from pyriemann.utils.geodesic import geodesic\n'), ((1398, 1453), 'pyriemann.utils.mean.mean_covariance', 'mean_covariance', (['X[y == label]'], {'metric': 'self.metric_mean'}), '(X[y == label], metric=self.metric_mean)\n', (1413, 1453), False, 'from pyriemann.utils.mean import mean_covariance\n'), ((1625, 1746), 'pyriemann.utils.mean.mean_covariance', 'mean_covariance', (['X_domain[y_domain == label]'], {'metric': 'self.metric_mean', 'sample_weight': 'sample_weight[y_domain == label]'}), '(X_domain[y_domain == label], metric=self.metric_mean,\n sample_weight=sample_weight[y_domain == label])\n', (1640, 1746), False, 'from pyriemann.utils.mean import mean_covariance\n'), ((1926, 1954), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'self.n_jobs'}), '(n_jobs=self.n_jobs)\n', (1934, 1954), False, 'from joblib import Parallel, delayed\n'), ((2215, 2243), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'self.n_jobs'}), '(n_jobs=self.n_jobs)\n', (2223, 2243), False, 'from joblib import Parallel, delayed\n'), ((2964, 3022), 'pyriemann.utils.distance.distance', 'distance', (['covtest', 'self.class_center_[m]', 'self.metric_dist'], {}), '(covtest, self.class_center_[m], self.metric_dist)\n', (2972, 3022), False, 'from pyriemann.utils.distance import distance\n'), ((3105, 3133), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'self.n_jobs'}), '(n_jobs=self.n_jobs)\n', (3113, 3133), False, 'from joblib import Parallel, delayed\n'), ((1972, 1996), 'joblib.delayed', 'delayed', (['mean_covariance'], {}), '(mean_covariance)\n', (1979, 1996), False, 'from joblib import Parallel, delayed\n'), ((2261, 2285), 'joblib.delayed', 'delayed', (['mean_covariance'], {}), '(mean_covariance)\n', (2268, 2285), False, 'from joblib import Parallel, delayed\n'), ((3151, 3168), 'joblib.delayed', 'delayed', (['distance'], {}), '(distance)\n', (3158, 3168), False, 'from joblib import Parallel, delayed\n')] |
import inviwopy
from inviwopy.glm import *
import numpy as np
import math
# input variables
# img - memory for the final image
# p - the processor
rAxis = np.linspace(p.realBounds.value[0],p.realBounds.value[1],img.data.shape[0])
iAxis = np.linspace(p.imaginaryBound.value[0],p.imaginaryBound.value[1],img.data.shape[1])
po = p.power.value
its = p.iterations.value
for (index,v) in np.ndenumerate(img.data):
C = Z = complex( rAxis[index[0]] , iAxis[index[1]] );
for i in range(0,its):
if(abs(Z)>2):
img.data[index[0],index[1]] = math.log(1+i);
break;
Z = np.power(Z,po) + C; | [
"numpy.linspace",
"numpy.power",
"numpy.ndenumerate",
"math.log"
] | [((158, 234), 'numpy.linspace', 'np.linspace', (['p.realBounds.value[0]', 'p.realBounds.value[1]', 'img.data.shape[0]'], {}), '(p.realBounds.value[0], p.realBounds.value[1], img.data.shape[0])\n', (169, 234), True, 'import numpy as np\n'), ((241, 330), 'numpy.linspace', 'np.linspace', (['p.imaginaryBound.value[0]', 'p.imaginaryBound.value[1]', 'img.data.shape[1]'], {}), '(p.imaginaryBound.value[0], p.imaginaryBound.value[1], img.data.\n shape[1])\n', (252, 330), True, 'import numpy as np\n'), ((388, 412), 'numpy.ndenumerate', 'np.ndenumerate', (['img.data'], {}), '(img.data)\n', (402, 412), True, 'import numpy as np\n'), ((566, 581), 'math.log', 'math.log', (['(1 + i)'], {}), '(1 + i)\n', (574, 581), False, 'import math\n'), ((613, 628), 'numpy.power', 'np.power', (['Z', 'po'], {}), '(Z, po)\n', (621, 628), True, 'import numpy as np\n')] |
from math import sqrt
import numpy as np
class KNearestNeighborsClassifier:
"""
A simple attempt at creating a K-Nearest Neighbors algorithm.
n_neighbors: int, default=5
Number of neighbors to use by default in classification.
"""
def __init__(self, n_neighbors=5):
"""Initialize the classifier."""
self.neighbors = n_neighbors
self.X = None
self.y = None
def fit(self, X_train, y_train):
"""
Fit the train data. X_train can be multidimensional array.
y_train can be one dimensional array that matches the length
of the X_train.
X: numpy array, training data
y: numpy array, target values
"""
self.X = X_train
self.y = y_train
def predict(self, X_test):
"""
Predict the class labels for provided data.
X_test: numpy array
"""
predictions = []
for row in X_test:
prediction = self._make_prediction(
row, self.X, self.y, self.neighbors)
predictions.append(prediction)
return np.array(predictions)
def _eucl_dist(self, test_v, train_v):
"""
Helper function to calculate the Euclidean distances of
each test vector to each train vector.
"""
dist = sum([(test_v[i] - train_v[i])**2 for i in range(len(test_v))])
return sqrt(dist)
def _get_neighbors(self, test_v, train_v, y_train, n_neighbors):
"""
Helper function to calculate the nearest neighbors.
"""
distances = []
# Once the distance is calculated for each vector,
# we attach the train vector, its associated y value
# and actual distance to a list.
for i in range(len(train_v)):
dist = self._eucl_dist(test_v, train_v[i])
distances.append((train_v[i], y_train[i], dist))
# Sort the list based on the distance value.
distances.sort(key=lambda item: item[2])
# Get the number of neighbors from the distance list.
# And return them.
neighbors = []
for i in range(n_neighbors):
neighbors.append(distances[i])
return neighbors
def _make_prediction(self, test_v, train_v, y_train, n_neighbors):
"""
Helper function to make prediction based on the number of neighbors.
"""
neighbors = self._get_neighbors(
test_v, train_v, y_train, n_neighbors
)
output_class = [row[-2] for row in neighbors]
# Make the prediction based on the most voted class member.
pred = max(set(output_class), key=output_class.count)
return pred
| [
"numpy.array",
"math.sqrt"
] | [((1118, 1139), 'numpy.array', 'np.array', (['predictions'], {}), '(predictions)\n', (1126, 1139), True, 'import numpy as np\n'), ((1413, 1423), 'math.sqrt', 'sqrt', (['dist'], {}), '(dist)\n', (1417, 1423), False, 'from math import sqrt\n')] |
import copy
import numpy as np
from collections import OrderedDict
import torch
from torch import optim
import torch.nn.functional as F
from torch.distributions import Categorical
from torchmeta.utils.gradient_based import gradient_update_parameters
import lio.model.meta_actor_net as meta_actor_net
import lio.model.actor_net as actor_net
from lio.model.meta_actor_net import MetaNet_PG
from lio.model.actor_net import Reward_net
import lio.utils.util as util
from lio.utils.util import grad_graph
from lio.utils.util import check_reward_net_grad
from lio.utils.util import Adam_Optim
gamma = 0.99
eps = torch.finfo(torch.float32).eps
class Actor(object):
def __init__(self, agent_id, l_obs, n_agent, n_action=3, l_act=3, l1=64, l2=32, lr=0.01):
self.id = agent_id
self.n_action = n_action
self.l_act = l_act
self.l_obs = l_obs
self.n_agent = n_agent
self.lr = lr
self.l1 = l1
self.l2 = l2
self._obs = []
self._action = []
self._action_hot = []
self._reward_from = []
self.policy_net = MetaNet_PG(self.l_obs, self.n_action, l1, l2)
self.reward_net = Reward_net(self.l_obs, self.l_act, n_agent, l1, 16)
self.policy_net.apply(meta_actor_net.weight_init_meta)
self.reward_net.apply(actor_net.weight_init)
self.new_params = None
self.optimizer_p = Adam_Optim(self.policy_net, lr=self.lr)
self.optimizer_r = optim.Adam(self.reward_net.parameters(), lr=1e-2)
def reset_state(self):
self._obs = []
self._action = []
self._action_hot = []
self._reward_from = []
self.new_params = []
def set_obs(self, obs):
self._obs = obs
def get_obs(self):
return self._obs
def action_sampling(self, epsilon):
with torch.no_grad():
obs = torch.Tensor(self._obs)
logits = self.policy_net(obs, self.new_params)
probs = F.softmax(logits, dim=0)
probs_hat = (1 - epsilon) * probs + epsilon / self.n_action
m = Categorical(probs_hat)
try:
action_prim = m.sample()
except RuntimeError:
print("logits = ", logits, "\nprobs = ", probs, "\nprobs_hat = ", probs_hat)
action_hot = np.zeros(self.l_act)
action_hot[action_prim] = 1
self._action = action_prim
self._action_hot = action_hot
def get_action(self):
return self._action
def get_action_hot(self):
return self._action_hot
def give_reward(self, list_action):
action_other = copy.copy(list_action)
del action_other[self.id]
action_other = torch.Tensor(action_other).view(-1)
obs = torch.Tensor(self._obs).view(-1)
input = torch.cat([obs, action_other]).detach()
reward = self.reward_net(input)
reward = reward * 1
return reward
def set_reward_given(self, reward_given):
self._reward_from = reward_given
def get_reward_from(self):
return self._reward_from
def update_policy(self, trajectorys, lr=0.1):
self.policy_net.zero_grad()
states = trajectorys[self.id].get_state()
actions = trajectorys[self.id].get_action()
returns_env = trajectorys[self.id].get_returns_env()
returns_from = trajectorys[self.id].get_returns_from()
returns_env = torch.Tensor(returns_env)
returns_from = torch.Tensor(returns_from)
returns = torch.add(returns_env, returns_from)
# returns = returns_env
# Compute policy loss
logits = self.policy_net(states)
prob = F.softmax(logits, dim=-1)
log_prob = F.log_softmax(logits, dim=-1)
# compute policy loss
loss_entropy = - log_prob * prob
loss_entropy = loss_entropy.mean()
log_prob_act = torch.stack([log_prob[i][actions[i]] for i in range(len(actions))], dim=0)
loss_policy = - torch.dot(returns, log_prob_act).view(1) / len(prob)
loss = loss_policy + 0.01 * loss_entropy
self.new_params = util.gd(self.policy_net, loss, lr=lr)
def update_policy_op(self, trajectorys):
self.policy_net.zero_grad()
states = trajectorys[self.id].get_state()
actions = trajectorys[self.id].get_action()
rewards_env = trajectorys[self.id].get_reward_env()
rewards_from = trajectorys[self.id].get_reward_from()
loss_policy = []
loss_entropy = []
returns_env = []
returns_from = []
# Compute policy loss
logits = self.policy_net(states)
prob = F.softmax(logits, dim=-1)
log_prob = F.log_softmax(logits, dim=-1)
R = 0
for r in rewards_env[::-1]:
R = r + gamma * R
returns_env.insert(0, R)
R = 0
for r in rewards_from[::-1]:
R = r + gamma * R
returns_from.insert(0, R)
returns_env = torch.Tensor(returns_env).detach()
# returns_from = torch.cat(returns_from, dim=0)
# returns = returns_env + returns_from
returns = returns_env
if len(returns) != 1:
returns = (returns - returns.mean()) / (returns.std() + eps)
else:
returns -= returns.mean()
# compute policy loss
loss_entropy_p = - log_prob * prob
loss_entropy = loss_entropy_p.mean()
log_prob_act = torch.stack([log_prob[i][actions[i]] for i in range(len(actions))], dim=0)
loss_policy = - torch.dot(returns, log_prob_act).view(1) / len(prob)
loss = loss_policy + 0.001 * loss_entropy
self.new_params = self.optimizer_p.update(self.policy_net, loss, retain_graph=True)
def update_to_new_params(self):
if self.new_params is None:
raise ValueError("The policy has not been updated. "
"Please check that you had use 'update_policy()' before.")
with torch.no_grad():
for p, new_p in zip(self.policy_net.parameters(), self.new_params.items()):
p.copy_(new_p[1])
self.new_params = None
def update_rewards_giving(self, trajectorys, trajectorys_new, log_prob_act_other):
self.optimizer_r.zero_grad()
loss_policy = torch.Tensor([0])
loss_cost = None
states = [trajectory.get_state() for trajectory in trajectorys]
actions_hot = [trajectory.get_action_hot() for trajectory in trajectorys]
n_step = len(states[0])
# compute loss of cost
n_agent = self.n_agent - 1
actions_other = copy.copy(actions_hot)
del actions_other[self.id]
actions_other = [[actions_other[j][i] for j in range(n_agent)] for i in range(n_step)]
actions_other = [torch.Tensor(actions_other[i]).view(-1) for i in range(n_step)]
obs = states[self.id]
feeds_r = [torch.cat([obs[i], actions_other[i]]) for i in range(n_step)]
feeds_r = torch.stack(feeds_r).float()
rewards_giving = self.reward_net(feeds_r)
rewards_giving_total = torch.zeros(n_step)
for i in range(n_step):
rewards_giving_total[i] = (gamma ** i) * (rewards_giving[i][torch.arange(rewards_giving.size(1)) != self.id].sum())
loss_cost = rewards_giving_total.sum()
# compute policy loss
rewards_env_new = [trajectory.get_reward_env() for trajectory in trajectorys_new]
returns_env = []
R = 0
reward_env = rewards_env_new[self.id]
for r in reward_env[::-1]:
R = r + gamma * R
returns_env.insert(0, R)
returns_env = torch.Tensor(returns_env).float()
if len(returns_env) != 1:
returns_env = (returns_env - returns_env.mean()) / (returns_env.std() + eps)
else:
returns_env = returns_env - returns_env.mean() + eps
for idx in range(self.n_agent):
if idx != self.id:
loss_term = log_prob_act_other[idx]
loss_policy -= torch.dot(returns_env.detach(), loss_term).view(-1)
# loss = loss_policy + 0.0001 * loss_cost
loss = loss_policy
loss.backward()
# print('for agent ', self.id, ', the gradient of its parameters is as following:')
# for name, parms in self.reward_net.named_parameters():
# if name == "l3.bias":
# print('-->name:', name, '-->grad_requirs:', parms.requires_grad,
# ' \n-->grad_value:', parms.grad)
# print('----------------------------------------------------\n\n')
self.optimizer_r.step()
| [
"torch.nn.functional.softmax",
"torch.distributions.Categorical",
"torch.stack",
"torch.Tensor",
"lio.utils.util.Adam_Optim",
"torch.no_grad",
"lio.model.actor_net.Reward_net",
"torch.add",
"lio.utils.util.gd",
"torch.finfo",
"torch.nn.functional.log_softmax",
"numpy.zeros",
"torch.zeros",
... | [((609, 635), 'torch.finfo', 'torch.finfo', (['torch.float32'], {}), '(torch.float32)\n', (620, 635), False, 'import torch\n'), ((1104, 1149), 'lio.model.meta_actor_net.MetaNet_PG', 'MetaNet_PG', (['self.l_obs', 'self.n_action', 'l1', 'l2'], {}), '(self.l_obs, self.n_action, l1, l2)\n', (1114, 1149), False, 'from lio.model.meta_actor_net import MetaNet_PG\n'), ((1176, 1227), 'lio.model.actor_net.Reward_net', 'Reward_net', (['self.l_obs', 'self.l_act', 'n_agent', 'l1', '(16)'], {}), '(self.l_obs, self.l_act, n_agent, l1, 16)\n', (1186, 1227), False, 'from lio.model.actor_net import Reward_net\n'), ((1403, 1442), 'lio.utils.util.Adam_Optim', 'Adam_Optim', (['self.policy_net'], {'lr': 'self.lr'}), '(self.policy_net, lr=self.lr)\n', (1413, 1442), False, 'from lio.utils.util import Adam_Optim\n'), ((2650, 2672), 'copy.copy', 'copy.copy', (['list_action'], {}), '(list_action)\n', (2659, 2672), False, 'import copy\n'), ((3450, 3475), 'torch.Tensor', 'torch.Tensor', (['returns_env'], {}), '(returns_env)\n', (3462, 3475), False, 'import torch\n'), ((3499, 3525), 'torch.Tensor', 'torch.Tensor', (['returns_from'], {}), '(returns_from)\n', (3511, 3525), False, 'import torch\n'), ((3544, 3580), 'torch.add', 'torch.add', (['returns_env', 'returns_from'], {}), '(returns_env, returns_from)\n', (3553, 3580), False, 'import torch\n'), ((3700, 3725), 'torch.nn.functional.softmax', 'F.softmax', (['logits'], {'dim': '(-1)'}), '(logits, dim=-1)\n', (3709, 3725), True, 'import torch.nn.functional as F\n'), ((3745, 3774), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['logits'], {'dim': '(-1)'}), '(logits, dim=-1)\n', (3758, 3774), True, 'import torch.nn.functional as F\n'), ((4142, 4179), 'lio.utils.util.gd', 'util.gd', (['self.policy_net', 'loss'], {'lr': 'lr'}), '(self.policy_net, loss, lr=lr)\n', (4149, 4179), True, 'import lio.utils.util as util\n'), ((4677, 4702), 'torch.nn.functional.softmax', 'F.softmax', (['logits'], {'dim': '(-1)'}), '(logits, dim=-1)\n', (4686, 4702), True, 'import torch.nn.functional as F\n'), ((4722, 4751), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['logits'], {'dim': '(-1)'}), '(logits, dim=-1)\n', (4735, 4751), True, 'import torch.nn.functional as F\n'), ((6331, 6348), 'torch.Tensor', 'torch.Tensor', (['[0]'], {}), '([0])\n', (6343, 6348), False, 'import torch\n'), ((6652, 6674), 'copy.copy', 'copy.copy', (['actions_hot'], {}), '(actions_hot)\n', (6661, 6674), False, 'import copy\n'), ((7133, 7152), 'torch.zeros', 'torch.zeros', (['n_step'], {}), '(n_step)\n', (7144, 7152), False, 'import torch\n'), ((1843, 1858), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1856, 1858), False, 'import torch\n'), ((1878, 1901), 'torch.Tensor', 'torch.Tensor', (['self._obs'], {}), '(self._obs)\n', (1890, 1901), False, 'import torch\n'), ((1981, 2005), 'torch.nn.functional.softmax', 'F.softmax', (['logits'], {'dim': '(0)'}), '(logits, dim=0)\n', (1990, 2005), True, 'import torch.nn.functional as F\n'), ((2094, 2116), 'torch.distributions.Categorical', 'Categorical', (['probs_hat'], {}), '(probs_hat)\n', (2105, 2116), False, 'from torch.distributions import Categorical\n'), ((2326, 2346), 'numpy.zeros', 'np.zeros', (['self.l_act'], {}), '(self.l_act)\n', (2334, 2346), True, 'import numpy as np\n'), ((6013, 6028), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6026, 6028), False, 'import torch\n'), ((6943, 6980), 'torch.cat', 'torch.cat', (['[obs[i], actions_other[i]]'], {}), '([obs[i], actions_other[i]])\n', (6952, 6980), False, 'import torch\n'), ((2730, 2756), 'torch.Tensor', 'torch.Tensor', (['action_other'], {}), '(action_other)\n', (2742, 2756), False, 'import torch\n'), ((2780, 2803), 'torch.Tensor', 'torch.Tensor', (['self._obs'], {}), '(self._obs)\n', (2792, 2803), False, 'import torch\n'), ((2829, 2859), 'torch.cat', 'torch.cat', (['[obs, action_other]'], {}), '([obs, action_other])\n', (2838, 2859), False, 'import torch\n'), ((5012, 5037), 'torch.Tensor', 'torch.Tensor', (['returns_env'], {}), '(returns_env)\n', (5024, 5037), False, 'import torch\n'), ((7023, 7043), 'torch.stack', 'torch.stack', (['feeds_r'], {}), '(feeds_r)\n', (7034, 7043), False, 'import torch\n'), ((7690, 7715), 'torch.Tensor', 'torch.Tensor', (['returns_env'], {}), '(returns_env)\n', (7702, 7715), False, 'import torch\n'), ((6830, 6860), 'torch.Tensor', 'torch.Tensor', (['actions_other[i]'], {}), '(actions_other[i])\n', (6842, 6860), False, 'import torch\n'), ((4013, 4045), 'torch.dot', 'torch.dot', (['returns', 'log_prob_act'], {}), '(returns, log_prob_act)\n', (4022, 4045), False, 'import torch\n'), ((5577, 5609), 'torch.dot', 'torch.dot', (['returns', 'log_prob_act'], {}), '(returns, log_prob_act)\n', (5586, 5609), False, 'import torch\n')] |
""" Python script to train HRNet + shiftNet for multi frame super resolution (MFSR)
Credits:
This code is adapted from ElementAI's HighRes-Net: https://github.com/ElementAI/HighRes-net
"""
import os
import gc
import json
import argparse
import datetime
from functools import partial
from collections import defaultdict, deque
import numpy as np
import cv2 as cv
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from sr.metrics import calculate_metrics
from hrnet.src.DeepNetworks.HRNet import HRNet
from hrnet.src.DeepNetworks.ShiftNet import ShiftNet
from hrnet.src.utils import normalize_plotting
from hrnet.src.utils import distributions_plot
from sr.data_loader import ImagesetDataset, augment
from sr.metrics import compute_perceptual_loss
from tqdm.auto import tqdm
from tensorboardX import SummaryWriter
import wandb
def register_batch(shiftNet, lrs, reference):
"""
Registers images against references.
Args:
shiftNet: torch.model
lrs: tensor (batch size, views, C, W, H), images to shift
reference: tensor (batch size, 1, C, W, H), reference images to shift
Returns:
thetas: tensor (batch size, views, 2)
"""
n_views = lrs.size(1)
thetas = []
for i in range(n_views):
# Add references as views
concated = torch.cat([reference, lrs[:, i:i + 1]], 1)
theta = shiftNet(concated)
thetas.append(theta)
thetas = torch.stack(thetas, 1)
return thetas
def apply_shifts(shiftNet, images, thetas, device):
"""
Applies sub-pixel translations to images with Lanczos interpolation.
Args:
shiftNet: torch.model
images: tensor (batch size, views, C, W, H), images to shift
thetas: tensor (batch size, views, 2), translation params
Returns:
new_images: tensor (batch size, views, C, W, H), warped images
"""
batch_size, n_views, channels, height, width = images.shape
images = images.view(-1, channels, height, width)
thetas = thetas.view(-1, 2)
new_images = shiftNet.transform(thetas, images, device=device)
return new_images.view(-1, n_views, channels, images.size(2), images.size(3))
def resize_batch_images(batch, fx=3, fy=3, interpolation=cv.INTER_CUBIC):
resized = torch.tensor([cv.resize(np.moveaxis(img.detach().cpu().numpy(), 0, 2), None,
fx=fx, fy=fy, interpolation=interpolation) for img in batch])
# The channel dimension was 1 and was lost by opencv...
if resized.ndim < batch.ndim:
b, w, h = resized.shape
return resized.view(b, 1, w, h)
return resized.permute([0, 3, 1, 2])
def save_per_sample_scores(val_score_lists, baseline_val_score_lists, val_names, filename):
out_dict = {}
for metric, batch_scores in val_score_lists.items():
out_dict[metric] = np.concatenate(batch_scores).tolist()
for metric, batch_scores in baseline_val_score_lists.items():
out_dict[metric] = np.concatenate(batch_scores).tolist()
out_dict['name'] = val_names
with open(filename, 'w') as out_file:
json.dump(out_dict, out_file)
def trainAndGetBestModel(fusion_model, regis_model, optimizer, dataloaders, config, perceptual_loss_model=None):
"""
Trains HRNet and ShiftNet for Multi-Frame Super Resolution (MFSR), and saves best model.
Args:
fusion_model: torch.model, HRNet
regis_model: torch.model, ShiftNet
optimizer: torch.optim, optimizer to minimize loss
dataloaders: dict, wraps train and validation dataloaders
config: dict, configuration file
perceptual_loss_model: model used for perceptual loss
"""
# Set params from config
num_epochs = config['training']['num_epochs']
batch_size = config['training']['batch_size']
loss_metric = config['training']['loss_metric']
val_metrics = config['training']['validation_metrics']
apply_correction = config['training']['apply_correction']
use_reg_regularisation = config['training']['use_reg_regularization']
lambda_ = config['training']['lambda']
use_kl_div_loss = config['training']['use_kl_div_loss']
eta_ = config['training']['eta']
upscale_factor = config['network']['upscale_factor']
reg_offset = config['training']['reg_offset']
plot_chnls = config['visualization']['channels_to_plot']
distribution_sampling_proba = config['visualization']['distribution_sampling_proba']
assert loss_metric in ['MAE', 'MSE', 'SSIM', 'MIXED']
# Logging
subfolder_pattern = 'batch_{}_time_{}'.format(batch_size, f"{datetime.datetime.now():%Y-%m-%d-%H-%M-%S-%f}")
if config['training']['wandb']:
wandb.watch(fusion_model)
wandb.watch(regis_model)
out_fusion = os.path.join(wandb.run.dir, 'HRNet.pth')
out_regis = os.path.join(wandb.run.dir, 'ShiftNet.pth')
out_val_scores = os.path.join(wandb.run.dir, 'val_scores.json')
else:
checkpoint_dir_run = os.path.join(config['paths']['checkpoint_dir'], subfolder_pattern)
scores_dir_run = os.path.join(config['paths']['scores_dir'], subfolder_pattern)
os.makedirs(checkpoint_dir_run, exist_ok=True)
os.makedirs(scores_dir_run, exist_ok=True)
out_fusion = os.path.join(checkpoint_dir_run, 'HRNet.pth')
out_regis = os.path.join(checkpoint_dir_run, 'ShiftNet.pth')
out_val_scores = os.path.join(scores_dir_run, 'val_scores.json')
tb_logging_dir = config['paths']['tb_log_file_dir']
logging_dir = os.path.join(tb_logging_dir, subfolder_pattern)
os.makedirs(logging_dir, exist_ok=True)
writer = SummaryWriter(logging_dir)
# Set backend
device = torch.device('cuda' if torch.cuda.is_available() and config['training']['use_gpu'] else 'cpu')
fusion_model.to(device)
regis_model.to(device)
# Iterate
best_score_loss = np.Inf
val_names_saved = False
val_names = deque()
for epoch in tqdm(range(0, num_epochs), desc='Epochs'):
# Set train mode
fusion_model.train()
regis_model.train()
# Reset epoch loss
train_loss = 0.
train_loss_reg = 0.
train_loss_kl = 0.
train_loss_perceptual = 0.
for sample in tqdm(dataloaders['train'], desc='Training iter. %d' % epoch):
# Reset parameter gradients
optimizer.zero_grad()
# Potentially transfer data to GPU
lrs = sample['lr'].float().to(device, non_blocking=True)
alphas = sample['alphas'].float().to(device, non_blocking=True)
lrs_last = lrs[np.arange(len(alphas)), torch.sum(alphas, dim=1, dtype=torch.int64) - 1]
hrs = sample['hr'].float().to(device, non_blocking=True)
# Fuse multiple frames into (B, 1, upscale_factor*W, upscale_factor*H)
srs = fusion_model(lrs, alphas)
batch, c, w, h = srs.shape
srs = srs.view(batch, 1, c, w, h)
# Register batch wrt HR
shifts = register_batch(
regis_model,
srs[:, :, :, reg_offset:-reg_offset, reg_offset:-reg_offset],
reference=hrs[:, :, reg_offset:-reg_offset, reg_offset:-reg_offset].view(-1, 1,
c,
h-2*reg_offset,
w-2*reg_offset))
srs_shifted = apply_shifts(regis_model, srs, shifts, device)[:, 0]
# Training loss
scores = calculate_metrics(hrs=hrs,
srs=srs_shifted,
metrics=loss_metric,
apply_correction=apply_correction)
loss = torch.mean(scores)
if loss_metric == 'SSIM':
loss = -1 * loss + 1
loss_registration = torch.mean(torch.linalg.norm(shifts, ord=2, dim=1))
if use_reg_regularisation:
loss += lambda_ * loss_registration
srs = srs.view(batch, c, w, h)
kl_losses = 0
if use_kl_div_loss:
for nc in np.arange(c):
mean_diffs = srs[:, nc, ...].mean(dim=(1, 2)) - lrs_last[:, nc, ...].mean(dim=(1, 2))
tmp_kl_loss = torch.abs(mean_diffs).mean()
loss += eta_ * tmp_kl_loss
kl_losses += tmp_kl_loss
del tmp_kl_loss
kl_losses /= c
# adding perceptual loss
if perceptual_loss_model:
feat_perceptual_loss, style_perceptual_loss = compute_perceptual_loss(hrs,
srs,
perceptual_loss_model)
perceptual_loss = feat_perceptual_loss + style_perceptual_loss
loss += config["perceptual_loss"]["weight"] * perceptual_loss
del feat_perceptual_loss, style_perceptual_loss
# Backprop
loss.backward()
optimizer.step()
# Scale loss so that epoch loss is the average of batch losses
num_batches = len(dataloaders['train'].dataset) / len(hrs)
train_loss += loss.detach().item() / num_batches
train_loss_reg += loss_registration.detach().item() / num_batches
train_loss_kl += kl_losses / num_batches
train_loss_perceptual += perceptual_loss.detach().item() / num_batches
# Try releasing some memory
del lrs, alphas, hrs, srs, srs_shifted, scores, loss, loss_registration, sample, kl_losses, perceptual_loss
gc.collect()
torch.cuda.empty_cache()
# Set eval mode
fusion_model.eval()
val_scores = defaultdict(float)
baseline_val_scores = defaultdict(float)
val_score_lists = defaultdict(list)
baseline_val_score_lists = defaultdict(list)
lrs_ref = None
hrs_ref = None
srs_ref = None
lin_interp_img = None
distribution_s2, distribution_deimos, distribution_sr = [], [], []
# Run validation
with torch.no_grad():
for sample in tqdm(dataloaders['val'], desc='Valid. iter. %d' % epoch):
# Potentially transfer data to GPU
lrs_cpu = sample['lr'].float()
hrs_cpu = sample['hr'].float()
lrs = lrs_cpu.to(device, non_blocking=True)
hrs = hrs_cpu.to(device, non_blocking=True)
alphas = sample['alphas'].float().to(device, non_blocking=True)
# Inference
srs = fusion_model(lrs, alphas)
if np.random.random() < distribution_sampling_proba: # sampling....
for lr, hr, sr, a in zip(lrs_cpu, hrs_cpu, srs.cpu().numpy(), sample['alphas']):
num_valid = torch.sum(a, dim=0, dtype=torch.int64).int()
distribution_s2.append(lr[:num_valid, ...])
distribution_deimos.append(np.expand_dims(hr, 0))
distribution_sr.append(np.expand_dims(sr, 0))
# Update scores
metrics = calculate_metrics(hrs, srs, val_metrics, apply_correction)
for metric, batch_scores in metrics.items():
batch_scores = batch_scores.cpu()
val_scores[metric] += torch.sum(batch_scores).item()
val_score_lists[metric].append(batch_scores.numpy().flatten())
# First val. iter.: add names, calculate baseline
if not val_names_saved:
val_names.append(sample['name'])
latest_s2_images = lrs_cpu[np.arange(len(alphas)), torch.sum(alphas, dim=1, dtype=torch.int64) - 1]
lin_interp_imgs = resize_batch_images(latest_s2_images, fx=upscale_factor, fy=upscale_factor)
baseline_metrics = calculate_metrics(hrs_cpu, lin_interp_imgs, val_metrics, apply_correction)
for metric, batch_scores in baseline_metrics.items():
batch_scores = batch_scores.cpu()
baseline_val_scores[f'{metric}_baseline'] += torch.sum(batch_scores).item()
baseline_val_score_lists[f'{metric}_baseline'].append(batch_scores.numpy().flatten())
del baseline_metrics
# Keep a reference for plotting
if lrs_ref is None:
lrs_ref = lrs_cpu[0].numpy()
hrs_ref = hrs_cpu[0].numpy()
lin_interp_img = lin_interp_imgs[0].numpy()
if srs_ref is None:
srs_ref = srs[0].cpu().numpy()
# Try releasing some memory
del lrs_cpu, hrs_cpu, lrs, alphas, hrs, srs, metrics, batch_scores, sample
gc.collect()
torch.cuda.empty_cache()
s2 = np.concatenate(distribution_s2)
deimos = np.concatenate(distribution_deimos)
sresolved = np.concatenate(distribution_sr)
# Compute the average scores per sample (note the sum instead of the mean above)
n = len(dataloaders['val'].dataset)
for metric in val_scores:
val_scores[metric] /= n
# Validation file identifiers
if not val_names_saved:
val_names = np.concatenate(val_names).tolist()
val_names_saved = True
for metric in baseline_val_scores:
baseline_val_scores[metric] /= n
# Save improved model
val_loss_metric = loss_metric if not use_kl_div_loss else 'SSIM'
val_scores_loss = val_scores[val_loss_metric]
if val_loss_metric == 'SSIM':
val_scores_loss = -1 * val_scores_loss + 1
if val_scores_loss < best_score_loss:
print('Saving model (val. loss has improved).')
torch.save(fusion_model.state_dict(), out_fusion)
torch.save(regis_model.state_dict(), out_regis)
save_per_sample_scores(val_score_lists, baseline_val_score_lists, val_names, out_val_scores)
best_score_loss = val_scores_loss
# Plotting
lrs = lrs_ref
srs = srs_ref
hrs = hrs_ref
normalized_srs = (srs - np.min(srs)) / np.max(srs)
normalized_plot = normalized_srs[plot_chnls, ...]
lrs_plot = np.array([normalize_plotting(x[plot_chnls, ...]) for x in lrs if np.any(x)])
error_map = hrs - srs
writer.add_image('SR Image', normalize_plotting(normalized_plot), epoch, dataformats='HWC')
writer.add_image('Error Map', normalize_plotting(error_map[plot_chnls, ...]), epoch, dataformats='HWC')
writer.add_image('HR GT', normalize_plotting(hrs[plot_chnls, ...]), epoch, dataformats='HWC')
writer.add_images('S2', np.moveaxis(lrs_plot, 3, 1), epoch, dataformats='NCHW')
writer.add_scalar('train/loss', train_loss, epoch)
for metric in val_metrics:
writer.add_scalar('val/%s' % metric.lower(), val_scores[metric], epoch)
# wandb
if config['training']['wandb']:
wandb.log({'Train loss': train_loss,
'Train loss registration': train_loss_reg,
'Train KL loss': train_loss_kl,
'Train loss perceptual': train_loss_perceptual}, step=epoch)
wandb.log({'sr': [wandb.Image(normalize_plotting(normalized_plot), caption='SR Image')]}, step=epoch)
wandb.log({'gt': [wandb.Image(normalize_plotting(hrs[plot_chnls, ...]), caption='HR GT')]}, step=epoch)
wandb.log({'S2': [wandb.Image(x, caption='S2 GT') for x in lrs_plot]}, step=epoch)
wandb.log({'S2 bilinear interpolation baseline': [wandb.Image(normalize_plotting(lin_interp_img[plot_chnls, ...]))]}, step=epoch)
wandb.log({'Distributions': [wandb.Image(normalize_plotting(hrs[plot_chnls, ...]), caption='HR GT')]}, step=epoch)
wandb.log({'Distribution Blue': [wandb.Image(distributions_plot(s2, deimos, sresolved, 0), caption='Band Blue')],
'Distribution Green': [wandb.Image(distributions_plot(s2, deimos, sresolved, 1), caption='Band Green')],
'Distributions Red': [wandb.Image(distributions_plot(s2, deimos, sresolved, 2), caption='Band Red')],
'DIstributions NIR': [wandb.Image(distributions_plot(s2, deimos, sresolved, 3), caption='Band NIR')]
}, step=epoch)
wandb.log(val_scores, step=epoch)
wandb.log(baseline_val_scores, step=epoch)
del lrs, srs, hrs, lrs_ref, srs_ref, hrs_ref
del val_scores, baseline_val_scores, val_score_lists, baseline_val_score_lists
gc.collect()
writer.close()
def main(
config, data_df, filesystem=None, country_norm_df=None, normalize=True, norm_deimos_npz=None, norm_s2_npz=None, perceptual_loss_model=None,
fusion_model = None, regis_model = None
):
"""
Given a configuration, trains HRNet and ShiftNet for Multi-Frame Super Resolution (MFSR), and saves best model.
Args:
config: dict, configuration file
"""
# Reproducibility options
seed = config['training']['seed']
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.enabled = True
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
channels_labels = config['training']['channels_labels']
channels_features = config['training']['channels_features']
lr_patch_size = config['training']['patch_size']
upscale_factor = config['network']['upscale_factor']
reg_offset = config['training']['reg_offset']
histogram_matching = config['training']['histogram_matching']
# Initialize the network based on the network configuration
if fusion_model is None:
fusion_model = HRNet(config["network"])
if regis_model is None:
regis_model = ShiftNet(in_channel=len(channels_labels),
patch_size=lr_patch_size*upscale_factor - 2*reg_offset)
optimizer = optim.Adam(list(fusion_model.parameters()) + list(regis_model.parameters()),
lr=config['training']['lr'])
data_directory = config['paths']['prefix']
# Dataloaders
batch_size = config['training']['batch_size']
n_workers = config['training']['n_workers']
n_views = config['training']['n_views']
use_augment = config['training']['augment']
aug_fn = partial(augment, permute_timestamps=False) if use_augment else None
# Train data loader
train_samples = data_df[data_df.train_test_validation == 'train'].singleton_npz_filename.values
train_dataset = ImagesetDataset(
imset_dir=data_directory,
imset_npz_files=train_samples,
time_first=True,
filesystem=filesystem,
country_norm_df=country_norm_df,
normalize=normalize,
norm_deimos_npz=norm_deimos_npz,
norm_s2_npz=norm_s2_npz,
channels_labels=channels_labels,
channels_feats=channels_features,
n_views=n_views,
padding='zeros',
transform=aug_fn,
histogram_matching=histogram_matching)
train_dataloader = DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=n_workers,
pin_memory=True)
# Validation data loader
validation_samples = data_df[data_df.train_test_validation == 'validation'].singleton_npz_filename.values
val_dataset = ImagesetDataset(
imset_dir=data_directory,
imset_npz_files=validation_samples,
time_first=True,
filesystem=filesystem,
country_norm_df=country_norm_df,
normalize=normalize,
norm_deimos_npz=norm_deimos_npz,
norm_s2_npz=norm_s2_npz,
channels_labels=channels_labels,
channels_feats=channels_features,
n_views=n_views,
padding='zeros',
transform=None,
histogram_matching=False)
val_dataloader = DataLoader(
val_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=n_workers,
pin_memory=True)
dataloaders = {'train': train_dataloader, 'val': val_dataloader}
# Train model
torch.cuda.empty_cache()
trainAndGetBestModel(fusion_model, regis_model, optimizer, dataloaders, config, perceptual_loss_model)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config', help='path of the config file', default='config/config.json')
args = parser.parse_args()
assert os.path.isfile(args.config)
with open(args.config, 'r') as read_file:
config = json.load(read_file)
main(config)
| [
"wandb.log",
"torch.cuda.is_available",
"torch.sum",
"hrnet.src.utils.normalize_plotting",
"numpy.moveaxis",
"numpy.arange",
"torch.linalg.norm",
"collections.deque",
"tensorboardX.SummaryWriter",
"argparse.ArgumentParser",
"torch.mean",
"numpy.random.random",
"numpy.max",
"numpy.random.se... | [((1456, 1478), 'torch.stack', 'torch.stack', (['thetas', '(1)'], {}), '(thetas, 1)\n', (1467, 1478), False, 'import torch\n'), ((5547, 5594), 'os.path.join', 'os.path.join', (['tb_logging_dir', 'subfolder_pattern'], {}), '(tb_logging_dir, subfolder_pattern)\n', (5559, 5594), False, 'import os\n'), ((5599, 5638), 'os.makedirs', 'os.makedirs', (['logging_dir'], {'exist_ok': '(True)'}), '(logging_dir, exist_ok=True)\n', (5610, 5638), False, 'import os\n'), ((5653, 5679), 'tensorboardX.SummaryWriter', 'SummaryWriter', (['logging_dir'], {}), '(logging_dir)\n', (5666, 5679), False, 'from tensorboardX import SummaryWriter\n'), ((5951, 5958), 'collections.deque', 'deque', ([], {}), '()\n', (5956, 5958), False, 'from collections import defaultdict, deque\n'), ((17754, 17774), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (17768, 17774), True, 'import numpy as np\n'), ((17779, 17802), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (17796, 17802), False, 'import torch\n'), ((17807, 17835), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (17829, 17835), False, 'import torch\n'), ((19276, 19680), 'sr.data_loader.ImagesetDataset', 'ImagesetDataset', ([], {'imset_dir': 'data_directory', 'imset_npz_files': 'train_samples', 'time_first': '(True)', 'filesystem': 'filesystem', 'country_norm_df': 'country_norm_df', 'normalize': 'normalize', 'norm_deimos_npz': 'norm_deimos_npz', 'norm_s2_npz': 'norm_s2_npz', 'channels_labels': 'channels_labels', 'channels_feats': 'channels_features', 'n_views': 'n_views', 'padding': '"""zeros"""', 'transform': 'aug_fn', 'histogram_matching': 'histogram_matching'}), "(imset_dir=data_directory, imset_npz_files=train_samples,\n time_first=True, filesystem=filesystem, country_norm_df=country_norm_df,\n normalize=normalize, norm_deimos_npz=norm_deimos_npz, norm_s2_npz=\n norm_s2_npz, channels_labels=channels_labels, channels_feats=\n channels_features, n_views=n_views, padding='zeros', transform=aug_fn,\n histogram_matching=histogram_matching)\n", (19291, 19680), False, 'from sr.data_loader import ImagesetDataset, augment\n'), ((19796, 19903), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': 'n_workers', 'pin_memory': '(True)'}), '(train_dataset, batch_size=batch_size, shuffle=True, num_workers=\n n_workers, pin_memory=True)\n', (19806, 19903), False, 'from torch.utils.data import DataLoader\n'), ((20099, 20494), 'sr.data_loader.ImagesetDataset', 'ImagesetDataset', ([], {'imset_dir': 'data_directory', 'imset_npz_files': 'validation_samples', 'time_first': '(True)', 'filesystem': 'filesystem', 'country_norm_df': 'country_norm_df', 'normalize': 'normalize', 'norm_deimos_npz': 'norm_deimos_npz', 'norm_s2_npz': 'norm_s2_npz', 'channels_labels': 'channels_labels', 'channels_feats': 'channels_features', 'n_views': 'n_views', 'padding': '"""zeros"""', 'transform': 'None', 'histogram_matching': '(False)'}), "(imset_dir=data_directory, imset_npz_files=\n validation_samples, time_first=True, filesystem=filesystem,\n country_norm_df=country_norm_df, normalize=normalize, norm_deimos_npz=\n norm_deimos_npz, norm_s2_npz=norm_s2_npz, channels_labels=\n channels_labels, channels_feats=channels_features, n_views=n_views,\n padding='zeros', transform=None, histogram_matching=False)\n", (20114, 20494), False, 'from sr.data_loader import ImagesetDataset, augment\n'), ((20608, 20714), 'torch.utils.data.DataLoader', 'DataLoader', (['val_dataset'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': 'n_workers', 'pin_memory': '(True)'}), '(val_dataset, batch_size=batch_size, shuffle=False, num_workers=\n n_workers, pin_memory=True)\n', (20618, 20714), False, 'from torch.utils.data import DataLoader\n'), ((20848, 20872), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (20870, 20872), False, 'import torch\n'), ((21023, 21048), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (21046, 21048), False, 'import argparse\n'), ((21190, 21217), 'os.path.isfile', 'os.path.isfile', (['args.config'], {}), '(args.config)\n', (21204, 21217), False, 'import os\n'), ((1335, 1377), 'torch.cat', 'torch.cat', (['[reference, lrs[:, i:i + 1]]', '(1)'], {}), '([reference, lrs[:, i:i + 1]], 1)\n', (1344, 1377), False, 'import torch\n'), ((3119, 3148), 'json.dump', 'json.dump', (['out_dict', 'out_file'], {}), '(out_dict, out_file)\n', (3128, 3148), False, 'import json\n'), ((4703, 4728), 'wandb.watch', 'wandb.watch', (['fusion_model'], {}), '(fusion_model)\n', (4714, 4728), False, 'import wandb\n'), ((4737, 4761), 'wandb.watch', 'wandb.watch', (['regis_model'], {}), '(regis_model)\n', (4748, 4761), False, 'import wandb\n'), ((4784, 4824), 'os.path.join', 'os.path.join', (['wandb.run.dir', '"""HRNet.pth"""'], {}), "(wandb.run.dir, 'HRNet.pth')\n", (4796, 4824), False, 'import os\n'), ((4845, 4888), 'os.path.join', 'os.path.join', (['wandb.run.dir', '"""ShiftNet.pth"""'], {}), "(wandb.run.dir, 'ShiftNet.pth')\n", (4857, 4888), False, 'import os\n'), ((4914, 4960), 'os.path.join', 'os.path.join', (['wandb.run.dir', '"""val_scores.json"""'], {}), "(wandb.run.dir, 'val_scores.json')\n", (4926, 4960), False, 'import os\n'), ((5000, 5066), 'os.path.join', 'os.path.join', (["config['paths']['checkpoint_dir']", 'subfolder_pattern'], {}), "(config['paths']['checkpoint_dir'], subfolder_pattern)\n", (5012, 5066), False, 'import os\n'), ((5092, 5154), 'os.path.join', 'os.path.join', (["config['paths']['scores_dir']", 'subfolder_pattern'], {}), "(config['paths']['scores_dir'], subfolder_pattern)\n", (5104, 5154), False, 'import os\n'), ((5164, 5210), 'os.makedirs', 'os.makedirs', (['checkpoint_dir_run'], {'exist_ok': '(True)'}), '(checkpoint_dir_run, exist_ok=True)\n', (5175, 5210), False, 'import os\n'), ((5219, 5261), 'os.makedirs', 'os.makedirs', (['scores_dir_run'], {'exist_ok': '(True)'}), '(scores_dir_run, exist_ok=True)\n', (5230, 5261), False, 'import os\n'), ((5284, 5329), 'os.path.join', 'os.path.join', (['checkpoint_dir_run', '"""HRNet.pth"""'], {}), "(checkpoint_dir_run, 'HRNet.pth')\n", (5296, 5329), False, 'import os\n'), ((5350, 5398), 'os.path.join', 'os.path.join', (['checkpoint_dir_run', '"""ShiftNet.pth"""'], {}), "(checkpoint_dir_run, 'ShiftNet.pth')\n", (5362, 5398), False, 'import os\n'), ((5424, 5471), 'os.path.join', 'os.path.join', (['scores_dir_run', '"""val_scores.json"""'], {}), "(scores_dir_run, 'val_scores.json')\n", (5436, 5471), False, 'import os\n'), ((6272, 6332), 'tqdm.auto.tqdm', 'tqdm', (["dataloaders['train']"], {'desc': "('Training iter. %d' % epoch)"}), "(dataloaders['train'], desc='Training iter. %d' % epoch)\n", (6276, 6332), False, 'from tqdm.auto import tqdm\n'), ((10117, 10135), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (10128, 10135), False, 'from collections import defaultdict, deque\n'), ((10166, 10184), 'collections.defaultdict', 'defaultdict', (['float'], {}), '(float)\n', (10177, 10184), False, 'from collections import defaultdict, deque\n'), ((10212, 10229), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (10223, 10229), False, 'from collections import defaultdict, deque\n'), ((10265, 10282), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (10276, 10282), False, 'from collections import defaultdict, deque\n'), ((13396, 13427), 'numpy.concatenate', 'np.concatenate', (['distribution_s2'], {}), '(distribution_s2)\n', (13410, 13427), True, 'import numpy as np\n'), ((13445, 13480), 'numpy.concatenate', 'np.concatenate', (['distribution_deimos'], {}), '(distribution_deimos)\n', (13459, 13480), True, 'import numpy as np\n'), ((13501, 13532), 'numpy.concatenate', 'np.concatenate', (['distribution_sr'], {}), '(distribution_sr)\n', (13515, 13532), True, 'import numpy as np\n'), ((17261, 17273), 'gc.collect', 'gc.collect', ([], {}), '()\n', (17271, 17273), False, 'import gc\n'), ((18439, 18463), 'hrnet.src.DeepNetworks.HRNet.HRNet', 'HRNet', (["config['network']"], {}), "(config['network'])\n", (18444, 18463), False, 'from hrnet.src.DeepNetworks.HRNet import HRNet\n'), ((19062, 19104), 'functools.partial', 'partial', (['augment'], {'permute_timestamps': '(False)'}), '(augment, permute_timestamps=False)\n', (19069, 19104), False, 'from functools import partial\n'), ((21282, 21302), 'json.load', 'json.load', (['read_file'], {}), '(read_file)\n', (21291, 21302), False, 'import json\n'), ((7705, 7808), 'sr.metrics.calculate_metrics', 'calculate_metrics', ([], {'hrs': 'hrs', 'srs': 'srs_shifted', 'metrics': 'loss_metric', 'apply_correction': 'apply_correction'}), '(hrs=hrs, srs=srs_shifted, metrics=loss_metric,\n apply_correction=apply_correction)\n', (7722, 7808), False, 'from sr.metrics import calculate_metrics\n'), ((7941, 7959), 'torch.mean', 'torch.mean', (['scores'], {}), '(scores)\n', (7951, 7959), False, 'import torch\n'), ((9992, 10004), 'gc.collect', 'gc.collect', ([], {}), '()\n', (10002, 10004), False, 'import gc\n'), ((10017, 10041), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (10039, 10041), False, 'import torch\n'), ((10498, 10513), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10511, 10513), False, 'import torch\n'), ((10541, 10597), 'tqdm.auto.tqdm', 'tqdm', (["dataloaders['val']"], {'desc': "('Valid. iter. %d' % epoch)"}), "(dataloaders['val'], desc='Valid. iter. %d' % epoch)\n", (10545, 10597), False, 'from tqdm.auto import tqdm\n'), ((14775, 14786), 'numpy.max', 'np.max', (['srs'], {}), '(srs)\n', (14781, 14786), True, 'import numpy as np\n'), ((15008, 15043), 'hrnet.src.utils.normalize_plotting', 'normalize_plotting', (['normalized_plot'], {}), '(normalized_plot)\n', (15026, 15043), False, 'from hrnet.src.utils import normalize_plotting\n'), ((15109, 15155), 'hrnet.src.utils.normalize_plotting', 'normalize_plotting', (['error_map[plot_chnls, ...]'], {}), '(error_map[plot_chnls, ...])\n', (15127, 15155), False, 'from hrnet.src.utils import normalize_plotting\n'), ((15217, 15257), 'hrnet.src.utils.normalize_plotting', 'normalize_plotting', (['hrs[plot_chnls, ...]'], {}), '(hrs[plot_chnls, ...])\n', (15235, 15257), False, 'from hrnet.src.utils import normalize_plotting\n'), ((15317, 15344), 'numpy.moveaxis', 'np.moveaxis', (['lrs_plot', '(3)', '(1)'], {}), '(lrs_plot, 3, 1)\n', (15328, 15344), True, 'import numpy as np\n'), ((15633, 15813), 'wandb.log', 'wandb.log', (["{'Train loss': train_loss, 'Train loss registration': train_loss_reg,\n 'Train KL loss': train_loss_kl, 'Train loss perceptual':\n train_loss_perceptual}"], {'step': 'epoch'}), "({'Train loss': train_loss, 'Train loss registration':\n train_loss_reg, 'Train KL loss': train_loss_kl, 'Train loss perceptual':\n train_loss_perceptual}, step=epoch)\n", (15642, 15813), False, 'import wandb\n'), ((17023, 17056), 'wandb.log', 'wandb.log', (['val_scores'], {'step': 'epoch'}), '(val_scores, step=epoch)\n', (17032, 17056), False, 'import wandb\n'), ((17069, 17111), 'wandb.log', 'wandb.log', (['baseline_val_scores'], {'step': 'epoch'}), '(baseline_val_scores, step=epoch)\n', (17078, 17111), False, 'import wandb\n'), ((2864, 2892), 'numpy.concatenate', 'np.concatenate', (['batch_scores'], {}), '(batch_scores)\n', (2878, 2892), True, 'import numpy as np\n'), ((2996, 3024), 'numpy.concatenate', 'np.concatenate', (['batch_scores'], {}), '(batch_scores)\n', (3010, 3024), True, 'import numpy as np\n'), ((4610, 4633), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4631, 4633), False, 'import datetime\n'), ((5735, 5760), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5758, 5760), False, 'import torch\n'), ((8092, 8131), 'torch.linalg.norm', 'torch.linalg.norm', (['shifts'], {'ord': '(2)', 'dim': '(1)'}), '(shifts, ord=2, dim=1)\n', (8109, 8131), False, 'import torch\n'), ((8365, 8377), 'numpy.arange', 'np.arange', (['c'], {}), '(c)\n', (8374, 8377), True, 'import numpy as np\n'), ((8847, 8903), 'sr.metrics.compute_perceptual_loss', 'compute_perceptual_loss', (['hrs', 'srs', 'perceptual_loss_model'], {}), '(hrs, srs, perceptual_loss_model)\n', (8870, 8903), False, 'from sr.metrics import compute_perceptual_loss\n'), ((11617, 11675), 'sr.metrics.calculate_metrics', 'calculate_metrics', (['hrs', 'srs', 'val_metrics', 'apply_correction'], {}), '(hrs, srs, val_metrics, apply_correction)\n', (11634, 11675), False, 'from sr.metrics import calculate_metrics\n'), ((13328, 13340), 'gc.collect', 'gc.collect', ([], {}), '()\n', (13338, 13340), False, 'import gc\n'), ((13357, 13381), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (13379, 13381), False, 'import torch\n'), ((14760, 14771), 'numpy.min', 'np.min', (['srs'], {}), '(srs)\n', (14766, 14771), True, 'import numpy as np\n'), ((14874, 14912), 'hrnet.src.utils.normalize_plotting', 'normalize_plotting', (['x[plot_chnls, ...]'], {}), '(x[plot_chnls, ...])\n', (14892, 14912), False, 'from hrnet.src.utils import normalize_plotting\n'), ((11058, 11076), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (11074, 11076), True, 'import numpy as np\n'), ((12381, 12455), 'sr.metrics.calculate_metrics', 'calculate_metrics', (['hrs_cpu', 'lin_interp_imgs', 'val_metrics', 'apply_correction'], {}), '(hrs_cpu, lin_interp_imgs, val_metrics, apply_correction)\n', (12398, 12455), False, 'from sr.metrics import calculate_metrics\n'), ((13841, 13866), 'numpy.concatenate', 'np.concatenate', (['val_names'], {}), '(val_names)\n', (13855, 13866), True, 'import numpy as np\n'), ((14929, 14938), 'numpy.any', 'np.any', (['x'], {}), '(x)\n', (14935, 14938), True, 'import numpy as np\n'), ((6652, 6695), 'torch.sum', 'torch.sum', (['alphas'], {'dim': '(1)', 'dtype': 'torch.int64'}), '(alphas, dim=1, dtype=torch.int64)\n', (6661, 6695), False, 'import torch\n'), ((16136, 16167), 'wandb.Image', 'wandb.Image', (['x'], {'caption': '"""S2 GT"""'}), "(x, caption='S2 GT')\n", (16147, 16167), False, 'import wandb\n'), ((8519, 8540), 'torch.abs', 'torch.abs', (['mean_diffs'], {}), '(mean_diffs)\n', (8528, 8540), False, 'import torch\n'), ((11424, 11445), 'numpy.expand_dims', 'np.expand_dims', (['hr', '(0)'], {}), '(hr, 0)\n', (11438, 11445), True, 'import numpy as np\n'), ((11494, 11515), 'numpy.expand_dims', 'np.expand_dims', (['sr', '(0)'], {}), '(sr, 0)\n', (11508, 11515), True, 'import numpy as np\n'), ((11833, 11856), 'torch.sum', 'torch.sum', (['batch_scores'], {}), '(batch_scores)\n', (11842, 11856), False, 'import torch\n'), ((15918, 15953), 'hrnet.src.utils.normalize_plotting', 'normalize_plotting', (['normalized_plot'], {}), '(normalized_plot)\n', (15936, 15953), False, 'from hrnet.src.utils import normalize_plotting\n'), ((16032, 16072), 'hrnet.src.utils.normalize_plotting', 'normalize_plotting', (['hrs[plot_chnls, ...]'], {}), '(hrs[plot_chnls, ...])\n', (16050, 16072), False, 'from hrnet.src.utils import normalize_plotting\n'), ((16275, 16326), 'hrnet.src.utils.normalize_plotting', 'normalize_plotting', (['lin_interp_img[plot_chnls, ...]'], {}), '(lin_interp_img[plot_chnls, ...])\n', (16293, 16326), False, 'from hrnet.src.utils import normalize_plotting\n'), ((16396, 16436), 'hrnet.src.utils.normalize_plotting', 'normalize_plotting', (['hrs[plot_chnls, ...]'], {}), '(hrs[plot_chnls, ...])\n', (16414, 16436), False, 'from hrnet.src.utils import normalize_plotting\n'), ((16527, 16571), 'hrnet.src.utils.distributions_plot', 'distributions_plot', (['s2', 'deimos', 'sresolved', '(0)'], {}), '(s2, deimos, sresolved, 0)\n', (16545, 16571), False, 'from hrnet.src.utils import distributions_plot\n'), ((16654, 16698), 'hrnet.src.utils.distributions_plot', 'distributions_plot', (['s2', 'deimos', 'sresolved', '(1)'], {}), '(s2, deimos, sresolved, 1)\n', (16672, 16698), False, 'from hrnet.src.utils import distributions_plot\n'), ((16782, 16826), 'hrnet.src.utils.distributions_plot', 'distributions_plot', (['s2', 'deimos', 'sresolved', '(2)'], {}), '(s2, deimos, sresolved, 2)\n', (16800, 16826), False, 'from hrnet.src.utils import distributions_plot\n'), ((16907, 16951), 'hrnet.src.utils.distributions_plot', 'distributions_plot', (['s2', 'deimos', 'sresolved', '(3)'], {}), '(s2, deimos, sresolved, 3)\n', (16925, 16951), False, 'from hrnet.src.utils import distributions_plot\n'), ((11260, 11298), 'torch.sum', 'torch.sum', (['a'], {'dim': '(0)', 'dtype': 'torch.int64'}), '(a, dim=0, dtype=torch.int64)\n', (11269, 11298), False, 'import torch\n'), ((12179, 12222), 'torch.sum', 'torch.sum', (['alphas'], {'dim': '(1)', 'dtype': 'torch.int64'}), '(alphas, dim=1, dtype=torch.int64)\n', (12188, 12222), False, 'import torch\n'), ((12658, 12681), 'torch.sum', 'torch.sum', (['batch_scores'], {}), '(batch_scores)\n', (12667, 12681), False, 'import torch\n')] |
import logging
import numpy as np
import pandas as pd
import faiss
def smart_kmeans_clustering(X, obj_Y, n_clusters, min_obj_per_cluster=5, search_in=20):
logging.info(
u"Params: initial number of clusters: %s, min objects per cluster: %s",
n_clusters, min_obj_per_cluster
)
kmeans = faiss.Kmeans(X.shape[1], n_clusters, verbose=True)
kmeans.train(X)
D, I = kmeans.index.search(X, 1)
cluster_labels = I.reshape(-1)
dists = D[:, 0].reshape(-1)
cum_bad_cluster_ids = np.array([])
for n_obj_per_cl in range(1, min_obj_per_cluster):
df = pd.DataFrame({"obj_id": obj_Y, "cl_id": cluster_labels})
obj_per_cluster = df.groupby("cl_id").obj_id.nunique()
bad_cluster_ids = obj_per_cluster[obj_per_cluster == n_obj_per_cl].index
if len(bad_cluster_ids) > 0:
logging.info("Number of cluster with %s elements = %s", n_obj_per_cl, len(bad_cluster_ids))
cum_bad_cluster_ids = np.r_[cum_bad_cluster_ids, bad_cluster_ids]
# we're changing both cluster_labels and dists, it's easier to work with row_ids
bad_row_ids = np.where(np.in1d(cluster_labels, cum_bad_cluster_ids))[0]
bad_X = X[bad_row_ids, :]
D, I = kmeans.index.search(bad_X, search_in)
mask_m = ~np.in1d(I, cum_bad_cluster_ids).reshape(bad_X.shape[0], -1)
new_labels = []
new_dists = []
for row_id in range(bad_X.shape[0]):
row_mask = mask_m[row_id, :]
new_labels.append(I[row_id, :][row_mask][0])
new_dists.append(D[row_id, :][row_mask][0])
cluster_labels[bad_row_ids] = new_labels
dists[bad_row_ids] = new_dists
final_n_clusters = np.unique(cluster_labels).size
logging.info("Final number of clusters %s", final_n_clusters)
# reindexing
ix = {}
cluster_labels = np.array([ix.setdefault(el, len(ix)) for el in cluster_labels])
return dists, cluster_labels
| [
"numpy.unique",
"numpy.in1d",
"logging.info",
"numpy.array",
"pandas.DataFrame",
"faiss.Kmeans"
] | [((161, 287), 'logging.info', 'logging.info', (['u"""Params: initial number of clusters: %s, min objects per cluster: %s"""', 'n_clusters', 'min_obj_per_cluster'], {}), "(\n u'Params: initial number of clusters: %s, min objects per cluster: %s',\n n_clusters, min_obj_per_cluster)\n", (173, 287), False, 'import logging\n'), ((315, 365), 'faiss.Kmeans', 'faiss.Kmeans', (['X.shape[1]', 'n_clusters'], {'verbose': '(True)'}), '(X.shape[1], n_clusters, verbose=True)\n', (327, 365), False, 'import faiss\n'), ((518, 530), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (526, 530), True, 'import numpy as np\n'), ((1803, 1864), 'logging.info', 'logging.info', (['"""Final number of clusters %s"""', 'final_n_clusters'], {}), "('Final number of clusters %s', final_n_clusters)\n", (1815, 1864), False, 'import logging\n'), ((599, 655), 'pandas.DataFrame', 'pd.DataFrame', (["{'obj_id': obj_Y, 'cl_id': cluster_labels}"], {}), "({'obj_id': obj_Y, 'cl_id': cluster_labels})\n", (611, 655), True, 'import pandas as pd\n'), ((1768, 1793), 'numpy.unique', 'np.unique', (['cluster_labels'], {}), '(cluster_labels)\n', (1777, 1793), True, 'import numpy as np\n'), ((1150, 1194), 'numpy.in1d', 'np.in1d', (['cluster_labels', 'cum_bad_cluster_ids'], {}), '(cluster_labels, cum_bad_cluster_ids)\n', (1157, 1194), True, 'import numpy as np\n'), ((1316, 1347), 'numpy.in1d', 'np.in1d', (['I', 'cum_bad_cluster_ids'], {}), '(I, cum_bad_cluster_ids)\n', (1323, 1347), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import time
import math
from numpy import linalg
import scipy as sc
import scipy.sparse as sparse
import scipy.sparse.linalg
plt.style.use('ggplot')
def spectral(N,nplots):
'''Algorithme de résolution par méthode spectrale de Fourier-Galerkin. N est la taille du vecteur 1D créé représentant la ligne '''
compt=time.time()
# GrilleNT
N=2*int(N/2)
N2=int(N/2)
h = 2*math.pi/N; x = [h*i for i in range(1,N+1)]
# Conditions initiales
v = [math.sin(y) for y in x]
alpha = 0.5
dt = .001 #Pas temporel
# (ik)^2 Vecteur
I = complex(0,1)
l1 = [I*i for i in range(0,N2)]
l2 = [I*i for i in range(-N2+1,0)]
l = l1+[0]+l2
k = np.array(l)
k2 = k**2;
# Graphique
tmax = 10.0; tplot = 0.1
plotgap = int(round(tplot/dt))
data = np.zeros((nplots+1,N))
data[0,:] = v
for i in range(nplots):
v_hat = np.fft.fft(v) # passage dans l'espace spectral
for n in range(plotgap):
v_hat = v_hat / (1-dt*alpha*k2) # backward Euler timestepping
v = np.fft.ifft(v_hat) # retour dans l'espace temporel
data[i+1,:] = np.real(v) # enregistrement des données
return(data)
def implicite(N,rg):
'''Algorithme de résolution par méthode des éléments finis, en Euler implicite optimisé en matrices creuses'''
t=time.time()
h = 2*math.pi/N; x = [h*i for i in range(1,N+1)]
h = 1/(N+1.0)
k = h/2
TFinal = 0.2
NumOfTimeSteps = rg
x = np.linspace(0,1,N+2)
x = x[1:-1]
# IC
u = np.transpose(np.mat(np.sin(2*np.pi*x)))
# Opérateur Laplacien
data = np.ones((3, N))
data[1] = -2*data[1]
diags = [-1,0,1]
L = sparse.spdiags(data,diags,N,N)/(h**2)
# Matrice In
I = sparse.identity(N)
# Data mat des données
data = []
for i in range(NumOfTimeSteps):
# Solver: (I - k/2*L) u_new = (I + k/2*L)*u_old
A = (I -k/2*L)
b = ( I + k/2*L )*u
u = np.transpose(np.mat( sparse.linalg.spsolve(A,b)))
data.append(u)
return(np.squeeze(np.asarray(data)))
NX=2**9
pi=np.pi
K=1
L=0.8
def sol(x,t):
return(np.sin(2*x*pi/NX)*np.exp(-K*t*(pi/2*L)**2))
def sol_grille(t):
return np.array([sol(k,t) for k in range(NX)])
def sol_tps(t):
'''NX la dim de la matrice'''
dt=0.036
data=np.zeros((t,NX))
for i in range(t):
data[i:]=sol_grille(dt*i)
return data
def res(t):
return linalg.norm(sol_grille(t)-spectral(NX,t))
def comp(time):
err1,err2=[],[]
for t in range(1,time):
err1+=[res(t)]
err2+=[linalg.norm(sol_grille(t)-implicite(NX,t))]
X=np.arange(time-1)
plt.clf()
plt.plot(X,err1)
plt.plot(X,err2)
plt.grid()
plt.show()
## Résidu (spectral)
def convergence(taille,n):
temps=time.time()
X=np.arange(n)
Y=[]
for i in range(n):
b=max(spectral(2**taille,i)[-1])
Y+=[b]
plt.clf()
plt.plot(X,Y)
plt.grid()
plt.show()
return(X,Y,time.time()-temps)
## Résidu (Euler)
def convergence2(n):
temps=time.time()
X=np.arange(n-1)
Y=[max(implicite(NX,1))]
for i in range(2,n): #1 sinon on a la liste vide au début
b=max(implicite(NX,i)[-1])
Y+=[b]
plt.clf()
plt.plot(X,Y)
plt.grid()
plt.title("Evolution de l'écart entre la méthode d'Euler et la solution exacte")
plt.ylabel("Résidu")
plt.xlabel("Temps (secondes)")
plt.show()
return(X,Y,time.time()-temps)
## Comparaison des figures de diffusion
X=sol_tps(100)
Y=spectral(NX,99)
Z=implicite(NX,100)
plt.clf()
plt.suptitle("Comparaison des figures de diffusion",fontsize=16)
plt.subplot(311)
plt.title("Solution exacte",fontsize=12)
plt.imshow(X)
plt.grid()
plt.subplot(312)
plt.title("Méthode spectrale",fontsize=12)
plt.imshow(Y)
plt.grid()
plt.subplot(313)
plt.title("Méthode d'Euler Implicite",fontsize=12)
plt.imshow(Z)
plt.grid()
plt.subplots_adjust(bottom=0.1, right=0.8, top=0.9)
cax = plt.axes([0.82, 0.1, 0.02, 0.8]) #3ème paramètre contrôle la largeur de la colorbar
plt.colorbar(cax=cax)
plt.show()
## Résultats (spectral)
#def exp(X):
# Z=[]
# for l in X:
# Z+=[math.exp(-1/21*l)]
# return Z
#EXP=exp(X)
plt.clf()
plt.cla()
plt.close()
plt.semilogx(X[:100],Y[:100],'g',label="Spectral")
plt.semilogx(X[:100],WU[:100],'r',label="Euler")
plt.legend()
plt.title("Evolution de l'écart (en norme) entre les méthodes numériques et la solution exacte")
plt.ylabel("Résidu (sans dimension)")
plt.xlabel("Temps (secondes)")
plt.grid(True, which="both")
plt.show()
## Matrices des résultats
X=[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77,
78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103,
104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116,
117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129,
130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142,
143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155,
156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168,
169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181,
182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194,
195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207,
208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220,
221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246,
247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259,
260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272,
273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285,
286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298,
299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311,
312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324,
325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337,
338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350,
351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363,
364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376,
377, 378, 379, 380, 381, 382, 383, 384, 385, 386, 387, 388, 389,
390, 391, 392, 393, 394, 395, 396, 397, 398, 399]
Y=[1.0,
0.95124131098081666,
0.90486003171650231,
0.86074024282414874,
0.81877167699798925,
0.77884944342152851,
0.74087376561697371,
0.70474973207678349,
0.67038705905409801,
0.63769986491919406,
0.60660645551802328,
0.57702911999639106,
0.54889393657947361,
0.52213058782127941,
0.49667218486229781,
0.47245510025611848,
0.4494188089472031,
0.42750573700237449,
0.40666111771795854,
0.38683285474295492,
0.36797139187614003,
0.35002958921169502,
0.33296260532380939,
0.31672778519580885,
0.30128455361371126,
0.28659431375777689,
0.27262035073859475,
0.25932773983663071,
0.24668325921588857,
0.23465530689354236,
0.22321382175801902,
0.2123302084381363,
0.20197726583552283,
0.1921291191417035,
0.1827611551699436,
0.17384996084022558,
0.16537326466361982,
0.15730988107979904,
0.14963965750858443,
0.14234342398318617,
0.13540294523926413,
0.1288008751400615,
0.12252071332370845,
0.11654676396434921,
0.11086409654401926,
0.10545850853723668,
0.10031648991504261,
0.095425189379778952,
0.090772382246213618,
0.086346439888739884,
0.082136300678291196,
0.078131442336332263,
0.074321855636834772,
0.070698019390509653,
0.067250876648775562,
0.063971812067990477,
0.060852630377373609,
0.057885535896803939,
0.055063113053302845,
0.052378307847508687,
0.049824410223820947,
0.047395037300153403,
0.045084117415382582,
0.042885874954621594,
0.040794815914393587,
0.038805714171628827,
0.036913598422167043,
0.035113739756121573,
0.033401639839052297,
0.031773019669409167,
0.030223808884148055,
0.028750135585790641,
0.027348316665503712,
0.026014848598012265,
0.024746398685340617,
0.023539796727497349,
0.022392027099286518,
0.021300221213443264,
0.020261650351257172,
0.019273718842764798,
0.018333957579467222,
0.017440017843359072,
0.016589665436845705,
0.015780775098878257,
0.015011325193350364,
0.014279392656481956,
0.013583148190561726,
0.01292085169203664,
0.012290847902521631,
0.011691562271860492,
0.011121497022898419,
0.010579227408131137,
0.010063398148874844,
0.0095727200480576202,
0.0091059667681666796,
0.0086619717662986262,
0.0082396253786527259,
0.0078378720471804289,
0.0074557076814598033,
0.0070921771492015642,
0.0067463718891146857,
0.0064174276401655732,
0.0061045222815556228,
0.0058068737780185738,
0.0055237382253025121,
0.0052544079909516081,
0.0049982099457408858,
0.0047545037813439151,
0.0045226804100288315,
0.0043021604423830809,
0.0040923927392622897,
0.0038928530343442344,
0.0037030426238452551,
0.0035224871201244021,
0.0033507352660601735,
0.0031873578072367328,
0.003031946419120809,
0.0028841126865480691,
0.0027434871329683917,
0.0026097182970238543,
0.0024824718541515923,
0.0023614297810161397,
0.0022462895606829326,
0.0021367634265465544,
0.0020325776431240066,
0.0019334718219155777,
0.0018391982706234425,
0.0017495213741014932,
0.0016642170054892626,
0.0015830719660581739,
0.0015058834523701545,
0.0014324585494169032,
0.0013626137484730127,
0.0012961744884579527,
0.0012329747196606327,
0.0011728564887361838,
0.0011156695439377643,
0.0010612709595967287,
0.0010095247789126619,
0.0009603016741604989,
0.0009134786234655058,
0.00086893860333827851,
0.00082657029620134405,
0.00078626781217636826,
0.00074793042443666697,
0.00071146231746357295,
0.00067677234757749821,
0.00064377381514518404,
0.00061238424789382688,
0.00058252519479052439,
0.00055412202997189345,
0.00052710376623381458,
0.0005014028776151795,
0.00047695513063221706,
0.00045369942374161656,
0.00043157763463121633,
0.00041053447495659754,
0.00039051735216053506,
0.00037147623802994453,
0.00035336354366182626,
0.00033613400052570261,
0.00031974454732529572,
0.00030415422237668212,
0.00028932406123394563,
0.00027521699930647253,
0.00026179777922449529,
0.00024903286272137508,
0.00023689034681238634,
0.0002253398840605147,
0.00021435260672998912,
0.00020390105463799008,
0.00019395910652421258,
0.00018450191476675979,
0.00017550584328120336,
0.00016694840844760559,
0.0001588082229178612,
0.00015106494216291991,
0.00014369921362629694,
0.000136692628356791,
0.00013002767499952742,
0.00012368769603033792,
0.00011765684612409551,
0.0001119200525529527,
0.00010646297751551253,
0.00010127198230277731,
9.6334093211319878e-05,
9.1636969118484031e-05,
8.716887063857533e-05,
8.2918630782955567e-05,
7.8875627050712928e-05,
7.5029754880154075e-05,
7.1371402394767048e-05,
6.7891426380537544e-05,
6.4581129434580076e-05,
6.1432238227971737e-05,
5.8436882828461682e-05,
5.5587577031378247e-05,
5.2877199649575371e-05,
5.0298976715656445e-05,
4.7846464551994585e-05,
4.5513533666236483e-05,
4.3294353432040317e-05,
4.1183377516760801e-05,
3.9175330019661416e-05,
3.7265192286008869e-05,
3.5448190364095327e-05,
3.3719783073839564e-05,
3.2075650657147878e-05,
3.0511683981668019e-05,
2.9023974270954267e-05,
2.7608803335376018e-05,
2.6262634279354635e-05,
2.4982102661703017e-05,
2.3764008086975728e-05,
2.2605306206813511e-05,
2.1503101111292045e-05,
2.0454638091258506e-05,
1.9457296753566876e-05,
1.8508584472005732e-05,
1.7606130157549906e-05,
1.6747678332366681e-05,
1.593108349276547e-05,
1.5154304747003068e-05,
1.4415400714541999e-05,
1.3712524674014723e-05,
1.3043919947766545e-05,
1.2407915511442266e-05,
1.1802921817643546e-05,
1.1227426823219322e-05,
1.0679992210260319e-05,
1.0159249791352928e-05,
9.6638980901081428e-06,
9.192699088419467e-06,
8.7444751323202944e-06,
8.3181059887074929e-06,
7.9125260455754856e-06,
7.5267216487630759e-06,
7.1597285685570814e-06,
6.8106295898210419e-06,
6.4785522196261019e-06,
6.1626665066548138e-06,
5.8621829669278922e-06,
5.576350610669897e-06,
5.3044550653823064e-06,
5.0458167904330974e-06,
4.7997893787005915e-06,
4.5657579410269435e-06,
4.3431375694435388e-06,
4.1313718753275084e-06,
3.9299315988358164e-06,
3.7383132861415193e-06,
3.556038031166261e-06,
3.3826502786642327e-06,
3.2177166856661872e-06,
3.0608250384379512e-06,
2.9115832222466227e-06,
2.7696182413596266e-06,
2.6345752868273137e-06,
2.5061168497192742e-06,
2.3839218775980749e-06,
2.2676849721222425e-06,
2.1571156257730572e-06,
2.0519374957975666e-06,
1.9518877135531706e-06,
1.8567162275276664e-06,
1.7661851783927729e-06,
1.6800683045292277e-06,
1.5981503765377009e-06,
1.5202266593222081e-06,
1.4461024004016446e-06,
1.3755923431705667e-06,
1.3085202638927428e-06,
1.2447185312702957e-06,
1.184027687487672e-06,
1.1262960496833579e-06,
1.0713793308533114e-06,
1.019140279238653e-06,
9.694483352963316e-07,
9.2217930539545208e-07,
8.7721505142374772e-07,
8.3444319552843008e-07,
7.9375683925348578e-07,
7.5505429637147498e-07,
7.1823883874209957e-07,
6.8321845456237323e-07,
6.4990561840419911e-07,
6.1821707246460822e-07,
5.8807361848195618e-07,
5.5939991979800807e-07,
5.3212431307122081e-07,
5.0617862917063521e-07,
4.8149802280274762e-07,
4.5802081044555648e-07,
4.3568831618472728e-07,
4.144447250665845e-07,
3.9423694360142171e-07,
3.7501446706848661e-07,
3.5672925329099947e-07,
3.3933560256573802e-07,
3.2279004344709776e-07,
3.0705122410017216e-07,
2.9207980895131186e-07,
2.778383803778721e-07,
2.6429134519143368e-07,
2.5140484568078276e-07,
2.3914667499231749e-07,
2.2748619663639528e-07,
2.163942679184443e-07,
2.0584316710347489e-07,
1.9580652413195269e-07,
1.862592547138755e-07,
1.7717749763633671e-07,
1.6853855512788948e-07,
1.6032083613066612e-07,
1.5250380233847526e-07,
1.4506791686601048e-07,
1.379945954208799e-07,
1.3126615985642509e-07,
1.2486579398924302e-07,
1.18777501570988e-07,
1.1298606630941256e-07,
1.0747701383873104e-07,
1.0223657554425785e-07,
9.7251654150909102e-08,
9.2509790989563769e-08,
8.7999134859473971e-08,
8.370841240890367e-08,
7.962689995996828e-08,
7.5744396707258482e-08,
7.2051199223263547e-08,
6.8538077206877133e-08,
6.519625041437416e-08,
6.201736671520288e-08,
5.8993481217747516e-08,
5.6117036412892302e-08,
5.33808432857579e-08,
5.077806334840585e-08,
4.8302191548604509e-08,
4.5947040011941014e-08,
4.3706722576646775e-08,
4.1575640082484299e-08,
3.9548466376928928e-08,
3.7620135003670646e-08,
3.5785826540166944e-08,
3.4040956552600502e-08,
3.238116413813671e-08,
3.0802301025846158e-08,
2.9300421209051642e-08,
2.7871771083188372e-08,
2.6512780064529332e-08,
2.5220051666328927e-08,
2.3990355010082639e-08,
2.2820616750686212e-08,
2.1707913395313526e-08,
2.0649463996816064e-08,
1.9642623203382479e-08,
1.8684874647087731e-08,
1.7773824654807946e-08,
1.690719626578267e-08,
1.6082823540873063e-08,
1.5298646149293213e-08,
1.4552704219285292e-08,
1.3843133439868994e-08,
1.3168160401423365e-08,
1.2526098163455634e-08,
1.1915342038479934e-08,
1.1334365581468482e-08,
1.0781716774851914e-08,
1.0256014399533994e-08,
9.7559445828508489e-09,
9.2802575148472307e-09,
8.8277643246628539e-09,
8.3973341092219733e-09,
7.9878911068002404e-09,
7.5984120084046698e-09,
7.2279234002472458e-09,
6.8754993309201063e-09,
6.5402589971921677e-09,
6.2213645426431527e-09,
5.9180189636334394e-09,
5.6294641173760043e-09,
5.3549788271322111e-09,
5.0938770797957559e-09,
4.8455063113600459e-09,
4.6092457759839507e-09,
4.3845049945797636e-09,
4.1707222790459928e-09,
3.9673633284566079e-09,
3.7739198936982822e-09,
3.5899085072181381e-09,
3.4148692747073633e-09,
3.2483647257007415e-09,
3.0899787202194144e-09,
2.9393154087243379e-09,
2.7959982427810535e-09,
2.6596690339631061e-09,
2.5299870586421458e-09,
2.4066282064272523e-09,
2.2892841701252727e-09,
2.1776616751975942e-09]
Z= [9.6224426675032202,
9.2591836947011021,
8.9096381921560948,
8.5732884596031589,
8.2496363405950159,
7.9382024846993415,
7.6385256375484838,
7.3501619576909913,
7.0726843592328503,
6.8056818792944123,
6.5487590693472546,
6.3015354095281175,
6.0636447450638942,
5.8347347439715413,
5.6144663752312649,
5.402513406658497,
5.1985619217325043,
5.0023098546649143,
4.8134665430201302,
4.6317522972247085,
4.4568979863285909,
4.2886446394039606,
4.1267430619922267,
3.9709534670299731,
3.8210451197087885,
3.6767959957411587,
3.5379924525279201,
3.4044289127390606,
3.2759075598401948,
3.1522380451127963,
3.0332372057352432,
2.9187287935062365,
2.8085432138095392,
2.7025172744330122,
2.6004939438699886,
2.5023221187451168,
2.4078564000201723,
2.3169568776483582,
2.229488923358153,
2.1453229912598646,
2.0643344259795167,
1.9864032780357812,
1.9114141261869704,
1.8392559064843008,
1.7698217477788454,
1.703008813438162,
1.6387181490384051,
1.5768545358061741,
1.5173263495931619,
1.4600454251746524,
1.404926925670954,
1.3518892168982508,
1.3008537464629679,
1.2517449274204169,
1.2044900263254501,
1.159019055509398,
1.1152646694236166,
1.0731620648962445,
1.0326488851543902,
0.99366512746954161,
0.95615305428955888,
0.92005710772541915,
0.88532382726637981,
0.85190177060129157,
0.81974143742915073,
0.78879519614580951,
0.75901721329841065,
0.73036338570295967,
0.70279127512457573,
0.67626004542361118,
0.65073040207462141,
0.62616453396850502,
0.60252605741177112,
0.57977996223981365,
0.55789255996455345,
0.53683143387950316,
0.51656539104847221,
0.49706441610674357,
0.47829962680630778,
0.46024323123931848,
0.44286848667642748,
0.42614965995900567,
0.4100619893866187,
0.39458164804328749,
0.37968570850824357,
0.36535210889886666,
0.35155962019558856,
0.33828781480026093,
0.32551703628155804,
0.31322837026244077,
0.30140361640673913,
0.29002526146321339,
0.2790764533272711,
0.26854097608185995,
0.25840322598062565,
0.24864818833771085,
0.23926141528999778,
0.23022900439886829,
0.22153757805974816,
0.2131742636889962]
WU=[1.0,
0.9259183694701123,
0.89096381921560974,
0.85732884596030856,
0.82496363405949502,
0.79382024846992805,
0.76385256375484001,
0.73501619576910093,
0.70726843592328004,
0.68056818792943852,
0.6548759069347162,
0.63015354095280807,
0.60636447450638054,
0.58347347439715103,
0.56144663752311696,
0.54025134066584235,
0.51985619217324364,
0.50023098546648748,
0.4813466543020064,
0.46317522972246422,
0.44568979863285435,
0.42886446394039263,
0.41267430619921469,
0.39709534670299118,
0.38210451197087303,
0.36767959957411173,
0.353799245252788,
0.34044289127390281,
0.3275907559840151,
0.31522380451127574,
0.30332372057352064,
0.29187287935061867,
0.28085432138095101,
0.27025172744329851,
0.26004939438699703,
0.25023221187450978,
0.24078564000201588,
0.23169568776483415,
0.22294889233581389,
0.21453229912598545,
0.20643344259794824,
0.19864032780357485,
0.19114141261869319,
0.18392559064842706,
0.17698217477788114,
0.17030088134381438,
0.16387181490383909,
0.15768545358061564,
0.15173263495931494,
0.14600454251746431,
0.14049269256709318,
0.13518892168982216,
0.1300853746462943,
0.12517449274204012,
0.12044900263254317,
0.11590190555093764,
0.11152646694235936,
0.10731620648962284,
0.10326488851543758,
0.099366512746953128,
0.095615305428953148,
0.092005710772539981,
0.088532382726636299,
0.085190177060127367,
0.081974143742913314,
0.078879519614579174,
0.075901721329839494,
0.073036338570294401,
0.070279127512455991,
0.067626004542360119,
0.065073040207460589,
0.062616453396849472,
0.060252605741175802,
0.057977996223980227,
0.055789255996453777,
0.053683143387948738,
0.051656539104845606,
0.049706441610672829,
0.04782996268062889,
0.046024323123929935,
0.044286848667641265,
0.042614965995898993,
0.04100619893866015,
0.039458164804327248,
0.037968570850822586,
0.036535210889885095,
0.035155962019556883,
0.033828781480024724,
0.032551703628154322,
0.031322837026242625,
0.030140361640672214,
0.029002526146320082,
0.027907645332725602,
0.026854097608184829,
0.02584032259806154,
0.024864818833770057,
0.023926141528998777,
0.023022900439885773,
0.022153757805973829,
0.021317426368898558]
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"numpy.array",
"numpy.sin",
"scipy.sparse.spdiags",
"numpy.arange",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.fft.fft",
"matplotlib.pyplot.style.use",
"numpy.asarray",
"matplotlib.pyplot.clos... | [((177, 200), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (190, 200), True, 'import matplotlib.pyplot as plt\n'), ((3673, 3682), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3680, 3682), True, 'import matplotlib.pyplot as plt\n'), ((3683, 3748), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Comparaison des figures de diffusion"""'], {'fontsize': '(16)'}), "('Comparaison des figures de diffusion', fontsize=16)\n", (3695, 3748), True, 'import matplotlib.pyplot as plt\n'), ((3748, 3764), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(311)'], {}), '(311)\n', (3759, 3764), True, 'import matplotlib.pyplot as plt\n'), ((3765, 3806), 'matplotlib.pyplot.title', 'plt.title', (['"""Solution exacte"""'], {'fontsize': '(12)'}), "('Solution exacte', fontsize=12)\n", (3774, 3806), True, 'import matplotlib.pyplot as plt\n'), ((3806, 3819), 'matplotlib.pyplot.imshow', 'plt.imshow', (['X'], {}), '(X)\n', (3816, 3819), True, 'import matplotlib.pyplot as plt\n'), ((3820, 3830), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3828, 3830), True, 'import matplotlib.pyplot as plt\n'), ((3831, 3847), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(312)'], {}), '(312)\n', (3842, 3847), True, 'import matplotlib.pyplot as plt\n'), ((3848, 3891), 'matplotlib.pyplot.title', 'plt.title', (['"""Méthode spectrale"""'], {'fontsize': '(12)'}), "('Méthode spectrale', fontsize=12)\n", (3857, 3891), True, 'import matplotlib.pyplot as plt\n'), ((3891, 3904), 'matplotlib.pyplot.imshow', 'plt.imshow', (['Y'], {}), '(Y)\n', (3901, 3904), True, 'import matplotlib.pyplot as plt\n'), ((3905, 3915), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3913, 3915), True, 'import matplotlib.pyplot as plt\n'), ((3916, 3932), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(313)'], {}), '(313)\n', (3927, 3932), True, 'import matplotlib.pyplot as plt\n'), ((3933, 3984), 'matplotlib.pyplot.title', 'plt.title', (['"""Méthode d\'Euler Implicite"""'], {'fontsize': '(12)'}), '("Méthode d\'Euler Implicite", fontsize=12)\n', (3942, 3984), True, 'import matplotlib.pyplot as plt\n'), ((3984, 3997), 'matplotlib.pyplot.imshow', 'plt.imshow', (['Z'], {}), '(Z)\n', (3994, 3997), True, 'import matplotlib.pyplot as plt\n'), ((3998, 4008), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (4006, 4008), True, 'import matplotlib.pyplot as plt\n'), ((4009, 4060), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.1)', 'right': '(0.8)', 'top': '(0.9)'}), '(bottom=0.1, right=0.8, top=0.9)\n', (4028, 4060), True, 'import matplotlib.pyplot as plt\n'), ((4067, 4099), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.82, 0.1, 0.02, 0.8]'], {}), '([0.82, 0.1, 0.02, 0.8])\n', (4075, 4099), True, 'import matplotlib.pyplot as plt\n'), ((4152, 4173), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'cax': 'cax'}), '(cax=cax)\n', (4164, 4173), True, 'import matplotlib.pyplot as plt\n'), ((4174, 4184), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4182, 4184), True, 'import matplotlib.pyplot as plt\n'), ((4315, 4324), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4322, 4324), True, 'import matplotlib.pyplot as plt\n'), ((4325, 4334), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (4332, 4334), True, 'import matplotlib.pyplot as plt\n'), ((4335, 4346), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4344, 4346), True, 'import matplotlib.pyplot as plt\n'), ((4347, 4400), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (['X[:100]', 'Y[:100]', '"""g"""'], {'label': '"""Spectral"""'}), "(X[:100], Y[:100], 'g', label='Spectral')\n", (4359, 4400), True, 'import matplotlib.pyplot as plt\n'), ((4398, 4449), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (['X[:100]', 'WU[:100]', '"""r"""'], {'label': '"""Euler"""'}), "(X[:100], WU[:100], 'r', label='Euler')\n", (4410, 4449), True, 'import matplotlib.pyplot as plt\n'), ((4447, 4459), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4457, 4459), True, 'import matplotlib.pyplot as plt\n'), ((4460, 4566), 'matplotlib.pyplot.title', 'plt.title', (['"""Evolution de l\'écart (en norme) entre les méthodes numériques et la solution exacte"""'], {}), '(\n "Evolution de l\'écart (en norme) entre les méthodes numériques et la solution exacte"\n )\n', (4469, 4566), True, 'import matplotlib.pyplot as plt\n'), ((4557, 4594), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Résidu (sans dimension)"""'], {}), "('Résidu (sans dimension)')\n", (4567, 4594), True, 'import matplotlib.pyplot as plt\n'), ((4595, 4625), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Temps (secondes)"""'], {}), "('Temps (secondes)')\n", (4605, 4625), True, 'import matplotlib.pyplot as plt\n'), ((4626, 4654), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {'which': '"""both"""'}), "(True, which='both')\n", (4634, 4654), True, 'import matplotlib.pyplot as plt\n'), ((4655, 4665), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4663, 4665), True, 'import matplotlib.pyplot as plt\n'), ((372, 383), 'time.time', 'time.time', ([], {}), '()\n', (381, 383), False, 'import time\n'), ((742, 753), 'numpy.array', 'np.array', (['l'], {}), '(l)\n', (750, 753), True, 'import numpy as np\n'), ((870, 895), 'numpy.zeros', 'np.zeros', (['(nplots + 1, N)'], {}), '((nplots + 1, N))\n', (878, 895), True, 'import numpy as np\n'), ((1394, 1405), 'time.time', 'time.time', ([], {}), '()\n', (1403, 1405), False, 'import time\n'), ((1538, 1562), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(N + 2)'], {}), '(0, 1, N + 2)\n', (1549, 1562), True, 'import numpy as np\n'), ((1679, 1694), 'numpy.ones', 'np.ones', (['(3, N)'], {}), '((3, N))\n', (1686, 1694), True, 'import numpy as np\n'), ((1817, 1835), 'scipy.sparse.identity', 'sparse.identity', (['N'], {}), '(N)\n', (1832, 1835), True, 'import scipy.sparse as sparse\n'), ((2406, 2423), 'numpy.zeros', 'np.zeros', (['(t, NX)'], {}), '((t, NX))\n', (2414, 2423), True, 'import numpy as np\n'), ((2723, 2742), 'numpy.arange', 'np.arange', (['(time - 1)'], {}), '(time - 1)\n', (2732, 2742), True, 'import numpy as np\n'), ((2745, 2754), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2752, 2754), True, 'import matplotlib.pyplot as plt\n'), ((2759, 2776), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'err1'], {}), '(X, err1)\n', (2767, 2776), True, 'import matplotlib.pyplot as plt\n'), ((2780, 2797), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'err2'], {}), '(X, err2)\n', (2788, 2797), True, 'import matplotlib.pyplot as plt\n'), ((2801, 2811), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2809, 2811), True, 'import matplotlib.pyplot as plt\n'), ((2816, 2826), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2824, 2826), True, 'import matplotlib.pyplot as plt\n'), ((2890, 2901), 'time.time', 'time.time', ([], {}), '()\n', (2899, 2901), False, 'import time\n'), ((2908, 2920), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (2917, 2920), True, 'import numpy as np\n'), ((3013, 3022), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3020, 3022), True, 'import matplotlib.pyplot as plt\n'), ((3027, 3041), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'Y'], {}), '(X, Y)\n', (3035, 3041), True, 'import matplotlib.pyplot as plt\n'), ((3045, 3055), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3053, 3055), True, 'import matplotlib.pyplot as plt\n'), ((3060, 3070), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3068, 3070), True, 'import matplotlib.pyplot as plt\n'), ((3155, 3166), 'time.time', 'time.time', ([], {}), '()\n', (3164, 3166), False, 'import time\n'), ((3173, 3189), 'numpy.arange', 'np.arange', (['(n - 1)'], {}), '(n - 1)\n', (3182, 3189), True, 'import numpy as np\n'), ((3336, 3345), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3343, 3345), True, 'import matplotlib.pyplot as plt\n'), ((3350, 3364), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'Y'], {}), '(X, Y)\n', (3358, 3364), True, 'import matplotlib.pyplot as plt\n'), ((3368, 3378), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3376, 3378), True, 'import matplotlib.pyplot as plt\n'), ((3383, 3468), 'matplotlib.pyplot.title', 'plt.title', (['"""Evolution de l\'écart entre la méthode d\'Euler et la solution exacte"""'], {}), '("Evolution de l\'écart entre la méthode d\'Euler et la solution exacte"\n )\n', (3392, 3468), True, 'import matplotlib.pyplot as plt\n'), ((3468, 3488), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Résidu"""'], {}), "('Résidu')\n", (3478, 3488), True, 'import matplotlib.pyplot as plt\n'), ((3493, 3523), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Temps (secondes)"""'], {}), "('Temps (secondes)')\n", (3503, 3523), True, 'import matplotlib.pyplot as plt\n'), ((3528, 3538), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3536, 3538), True, 'import matplotlib.pyplot as plt\n'), ((526, 537), 'math.sin', 'math.sin', (['y'], {}), '(y)\n', (534, 537), False, 'import math\n'), ((955, 968), 'numpy.fft.fft', 'np.fft.fft', (['v'], {}), '(v)\n', (965, 968), True, 'import numpy as np\n'), ((1121, 1139), 'numpy.fft.ifft', 'np.fft.ifft', (['v_hat'], {}), '(v_hat)\n', (1132, 1139), True, 'import numpy as np\n'), ((1194, 1204), 'numpy.real', 'np.real', (['v'], {}), '(v)\n', (1201, 1204), True, 'import numpy as np\n'), ((1749, 1782), 'scipy.sparse.spdiags', 'sparse.spdiags', (['data', 'diags', 'N', 'N'], {}), '(data, diags, N, N)\n', (1763, 1782), True, 'import scipy.sparse as sparse\n'), ((2137, 2153), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (2147, 2153), True, 'import numpy as np\n'), ((2210, 2233), 'numpy.sin', 'np.sin', (['(2 * x * pi / NX)'], {}), '(2 * x * pi / NX)\n', (2216, 2233), True, 'import numpy as np\n'), ((2228, 2262), 'numpy.exp', 'np.exp', (['(-K * t * (pi / 2 * L) ** 2)'], {}), '(-K * t * (pi / 2 * L) ** 2)\n', (2234, 2262), True, 'import numpy as np\n'), ((1617, 1638), 'numpy.sin', 'np.sin', (['(2 * np.pi * x)'], {}), '(2 * np.pi * x)\n', (1623, 1638), True, 'import numpy as np\n'), ((3086, 3097), 'time.time', 'time.time', ([], {}), '()\n', (3095, 3097), False, 'import time\n'), ((3554, 3565), 'time.time', 'time.time', ([], {}), '()\n', (3563, 3565), False, 'import time\n'), ((2063, 2090), 'scipy.sparse.linalg.spsolve', 'sparse.linalg.spsolve', (['A', 'b'], {}), '(A, b)\n', (2084, 2090), True, 'import scipy.sparse as sparse\n')] |
""" The LaTex example was derived from: http://matplotlib.org/users/usetex.html
"""
from bokeh.models import Label
from bokeh.palettes import Spectral4
from bokeh.plotting import output_file, figure, show
import numpy as np
from scipy.special import jv
output_file('external_resources.html')
class LatexLabel(Label):
"""A subclass of `Label` with all of the same class attributes except
canvas mode isn't supported and DOM manipulation happens in the coffeescript
superclass implementation that requires setting `render_mode='css'`).
Only the render method of LabelView is overwritten to perform the
text -> latex (via katex) conversion
"""
__javascript__ = ["https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.9.0/katex.min.js"]
__css__ = ["https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.9.0/katex.min.css"]
__implementation__ = """
import {Label, LabelView} from "models/annotations/label"
export class LatexLabelView extends LabelView
render: () ->
# Here because AngleSpec does units tranform and label doesn't support specs
switch @model.angle_units
when "rad" then angle = -1 * @model.angle
when "deg" then angle = -1 * @model.angle * Math.PI/180.0
panel = @model.panel ? @plot_view.frame
xscale = @plot_view.frame.xscales[@model.x_range_name]
yscale = @plot_view.frame.yscales[@model.y_range_name]
sx = if @model.x_units == "data" then xscale.compute(@model.x) else panel.xview.compute(@model.x)
sy = if @model.y_units == "data" then yscale.compute(@model.y) else panel.yview.compute(@model.y)
sx += @model.x_offset
sy -= @model.y_offset
@_css_text(@plot_view.canvas_view.ctx, "", sx, sy, angle)
katex.render(@model.text, @el, {displayMode: true})
export class LatexLabel extends Label
type: 'LatexLabel'
default_view: LatexLabelView
"""
p = figure(title="LaTex Extension Demonstration", plot_width=800, plot_height=350,
background_fill_color="#fafafa")
p.x_range.range_padding = 0
x = np.arange(0.0, 20.0, 0.02)
for i, n in enumerate([0, 1, 4, 7]):
p.line(x, jv(n, x), line_width=3, color=Spectral4[i], alpha=0.8, legend="𝜈=%d" % n)
text = (r"\text{Bessel Functions of the First Kind: }" +
r"J_\nu = \sum_{m=0}^{\infty}\frac{(-1)^m}{m!\ \Gamma(m+\nu+1)}" +
r"\left(\frac{x}{2}\right)^{2m+\nu}")
latex = LatexLabel(text=text,x=4.5, y=250, x_units='data', y_units='screen',
render_mode='css', text_font_size='8pt',
background_fill_color="white", border_line_color="lightgrey")
p.add_layout(latex)
show(p)
| [
"bokeh.plotting.show",
"bokeh.plotting.figure",
"numpy.arange",
"scipy.special.jv",
"bokeh.plotting.output_file"
] | [((256, 294), 'bokeh.plotting.output_file', 'output_file', (['"""external_resources.html"""'], {}), "('external_resources.html')\n", (267, 294), False, 'from bokeh.plotting import output_file, figure, show\n'), ((1856, 1972), 'bokeh.plotting.figure', 'figure', ([], {'title': '"""LaTex Extension Demonstration"""', 'plot_width': '(800)', 'plot_height': '(350)', 'background_fill_color': '"""#fafafa"""'}), "(title='LaTex Extension Demonstration', plot_width=800, plot_height=\n 350, background_fill_color='#fafafa')\n", (1862, 1972), False, 'from bokeh.plotting import output_file, figure, show\n'), ((2012, 2038), 'numpy.arange', 'np.arange', (['(0.0)', '(20.0)', '(0.02)'], {}), '(0.0, 20.0, 0.02)\n', (2021, 2038), True, 'import numpy as np\n'), ((2585, 2592), 'bokeh.plotting.show', 'show', (['p'], {}), '(p)\n', (2589, 2592), False, 'from bokeh.plotting import output_file, figure, show\n'), ((2091, 2099), 'scipy.special.jv', 'jv', (['n', 'x'], {}), '(n, x)\n', (2093, 2099), False, 'from scipy.special import jv\n')] |
# --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file
path
#Code starts here
data = pd.read_csv(path)
data.rename(mapper={'Total':'Total_Medals'},axis=1,inplace=True)
print(data.head(10))
# --------------
#Code starts here
data['Better_Event'] = np.where(data['Total_Summer']==data['Total_Winter'],'Both',np.where(data['Total_Summer']>data['Total_Winter'],'Summer','Winter'))
better_event = data['Better_Event'].value_counts(
ascending=False).index[0]
print(data['Better_Event'].head())
print(better_event)
# --------------
#Code starts here
top_countries = data[['Country_Name','Total_Summer', 'Total_Winter','Total_Medals']]
lastRow = len(top_countries)-1
top_countries.drop(index=lastRow,inplace=True)
def top_ten(df,col):
country_list = list(df.nlargest(n=10,columns=col)['Country_Name'])
return country_list
top_10_summer,top_10_winter,top_10=top_ten(top_countries,'Total_Summer'),top_ten(top_countries,'Total_Winter'),top_ten(top_countries,'Total_Medals')
common=[]
for country in top_10:
if (country in top_10_summer) and (country in top_10_winter):
common.append(country)
print(common)
# --------------
#Code starts here
import matplotlib.pyplot as plt
summer_df = data[data['Country_Name'].isin(top_10_summer)]
winter_df = data[data['Country_Name'].isin(top_10_winter)]
top_df = data[data['Country_Name'].isin(top_10)]
fig, (ax1,ax2,ax3)=plt.subplots(3,1,figsize=(14,21))
fig.tight_layout(pad=3.0)
plt.setp(ax1.get_xticklabels(), rotation=30)
plt.setp(ax2.get_xticklabels(), rotation=30)
plt.setp(ax3.get_xticklabels(), rotation=30)
summer_df.plot(x='Country_Name',y='Total_Summer',kind='bar',ax=ax1)
ax1.set_title('Total Summer Medals')
ax1.set_xlabel('Country Name')
ax1.set_ylabel('Medals count')
winter_df.plot(x='Country_Name',y='Total_Winter',kind='bar',ax=ax2)
ax2.set_title('Total Winter Medals')
ax2.set_xlabel('Country Name')
ax2.set_ylabel('Medals count')
top_df.plot(x='Country_Name',y='Total_Medals',kind='bar',ax=ax3)
ax3.set_title('Total Medals')
ax3.set_xlabel('Country Name')
ax3.set_ylabel('Medals count')
# --------------
#Code starts here
summer_df['Golden_Ratio']= summer_df['Gold_Summer']/summer_df['Total_Summer']
summer_max_ratio = summer_df['Golden_Ratio'].max()
summer_country_gold = summer_df[summer_df['Golden_Ratio']==summer_max_ratio]['Country_Name'].values[0]
winter_df['Golden_Ratio']= winter_df['Gold_Winter']/winter_df['Total_Winter']
winter_max_ratio = winter_df['Golden_Ratio'].max()
winter_country_gold = winter_df[winter_df['Golden_Ratio']==winter_max_ratio]['Country_Name'].values[0]
top_df['Golden_Ratio']= top_df['Gold_Total']/top_df['Total_Medals']
top_max_ratio = top_df['Golden_Ratio'].max()
top_country_gold = top_df[top_df['Golden_Ratio']==top_max_ratio]['Country_Name'].values[0]
print(summer_max_ratio,summer_country_gold)
print(winter_max_ratio,winter_country_gold)
print(top_max_ratio,top_country_gold)
# --------------
#Code starts here
data_1 = data.drop(index=len(data)-1)
data_1['Total_Points'] = 3*data_1['Gold_Total']+2*data_1['Silver_Total']+data_1['Bronze_Total']
most_points = data_1['Total_Points'].max()
best_country = data_1[data_1['Total_Points']==most_points]['Country_Name'].values[0]
print(most_points,best_country)
# --------------
#Code starts here
best = data[data['Country_Name']==best_country]
best = best[['Gold_Total','Silver_Total','Bronze_Total']]
best.plot(kind='bar',stacked=True)
plt.xlabel('United States')
plt.ylabel('Medals Tally')
plt.xticks(rotation=45)
| [
"matplotlib.pyplot.xticks",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"numpy.where",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.subplots"
] | [((171, 188), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (182, 188), True, 'import pandas as pd\n'), ((1485, 1521), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'figsize': '(14, 21)'}), '(3, 1, figsize=(14, 21))\n', (1497, 1521), True, 'import matplotlib.pyplot as plt\n'), ((3522, 3549), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""United States"""'], {}), "('United States')\n", (3532, 3549), True, 'import matplotlib.pyplot as plt\n'), ((3550, 3576), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Medals Tally"""'], {}), "('Medals Tally')\n", (3560, 3576), True, 'import matplotlib.pyplot as plt\n'), ((3577, 3600), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (3587, 3600), True, 'import matplotlib.pyplot as plt\n'), ((396, 469), 'numpy.where', 'np.where', (["(data['Total_Summer'] > data['Total_Winter'])", '"""Summer"""', '"""Winter"""'], {}), "(data['Total_Summer'] > data['Total_Winter'], 'Summer', 'Winter')\n", (404, 469), True, 'import numpy as np\n')] |
from bricks_modeling.connectivity_graph import ConnectivityGraph
import numpy as np
from numpy import linalg as LA
import util.geometry_util as geo_util
from solvers.rigidity_solver.algo_core import (
spring_energy_matrix,
transform_matrix_fitting,
solve_rigidity
)
from solvers.rigidity_solver.internal_structure import structure_sampling
import copy
def simulate_step(structure_graph: ConnectivityGraph, n: int, bricks, step_size=1):
structure_graph.bricks = bricks
points, edges, points_on_brick, direction_for_abstract_edge = structure_sampling(structure_graph)
M = spring_energy_matrix(points, edges, direction_for_abstract_edge)
e_pairs = geo_util.eigen(M, symmetric=True)
# collect all eigen vectors with zero eigen value
zeroeigenspace = [e_vec for e_val, e_vec in e_pairs if abs(e_val) < 1e-6]
print("Number of points", len(points))
# Trivial basis -- orthonormalized translation along / rotation wrt 3 axes
basis = geo_util.trivial_basis(points)
# cast the eigenvectors corresponding to zero eigenvalues into nullspace of the trivial basis,
# in other words, the new vectors doesn't have any components (projection) in the span of the trivial basis
reduced_zeroeigenspace = [geo_util.subtract_orthobasis(vec, basis) for vec in zeroeigenspace]
# count zero vectors in reduced eigenvectors
num_zerovectors = sum([np.isclose(vec, np.zeros_like(vec)).all() for vec in reduced_zeroeigenspace])
# In 3d cases, if the object only has 6 DOF, then exactly 6 eigenvectors for eigenvalue 0 are reduced to zerovector.
assert num_zerovectors == 6
e_vec = reduced_zeroeigenspace[n]
e_vec = e_vec / LA.norm(e_vec)
deformed_bricks = copy.deepcopy(bricks)
delta_x = e_vec.reshape(-1, 3)
for i in range(len(bricks)):
indices_on_brick_i = np.array(points_on_brick[i])
points_before = points[indices_on_brick_i]
points_after = points_before + step_size * delta_x[indices_on_brick_i]
R, T = transform_matrix_fitting(points_before, points_after)
deformed_bricks[i].trans_matrix[:3, :3] = (
R @ deformed_bricks[i].trans_matrix[:3, :3]
)
deformed_bricks[i].trans_matrix[:3, 3] = (
R @ deformed_bricks[i].trans_matrix[:3, 3] + T
)
deformed_bricks[i].color = 4 # transparent color : 43
return deformed_bricks | [
"util.geometry_util.subtract_orthobasis",
"solvers.rigidity_solver.internal_structure.structure_sampling",
"solvers.rigidity_solver.algo_core.spring_energy_matrix",
"numpy.linalg.norm",
"solvers.rigidity_solver.algo_core.transform_matrix_fitting",
"numpy.array",
"copy.deepcopy",
"util.geometry_util.tr... | [((551, 586), 'solvers.rigidity_solver.internal_structure.structure_sampling', 'structure_sampling', (['structure_graph'], {}), '(structure_graph)\n', (569, 586), False, 'from solvers.rigidity_solver.internal_structure import structure_sampling\n'), ((596, 660), 'solvers.rigidity_solver.algo_core.spring_energy_matrix', 'spring_energy_matrix', (['points', 'edges', 'direction_for_abstract_edge'], {}), '(points, edges, direction_for_abstract_edge)\n', (616, 660), False, 'from solvers.rigidity_solver.algo_core import spring_energy_matrix, transform_matrix_fitting, solve_rigidity\n'), ((676, 709), 'util.geometry_util.eigen', 'geo_util.eigen', (['M'], {'symmetric': '(True)'}), '(M, symmetric=True)\n', (690, 709), True, 'import util.geometry_util as geo_util\n'), ((979, 1009), 'util.geometry_util.trivial_basis', 'geo_util.trivial_basis', (['points'], {}), '(points)\n', (1001, 1009), True, 'import util.geometry_util as geo_util\n'), ((1725, 1746), 'copy.deepcopy', 'copy.deepcopy', (['bricks'], {}), '(bricks)\n', (1738, 1746), False, 'import copy\n'), ((1252, 1292), 'util.geometry_util.subtract_orthobasis', 'geo_util.subtract_orthobasis', (['vec', 'basis'], {}), '(vec, basis)\n', (1280, 1292), True, 'import util.geometry_util as geo_util\n'), ((1687, 1701), 'numpy.linalg.norm', 'LA.norm', (['e_vec'], {}), '(e_vec)\n', (1694, 1701), True, 'from numpy import linalg as LA\n'), ((1845, 1873), 'numpy.array', 'np.array', (['points_on_brick[i]'], {}), '(points_on_brick[i])\n', (1853, 1873), True, 'import numpy as np\n'), ((2019, 2072), 'solvers.rigidity_solver.algo_core.transform_matrix_fitting', 'transform_matrix_fitting', (['points_before', 'points_after'], {}), '(points_before, points_after)\n', (2043, 2072), False, 'from solvers.rigidity_solver.algo_core import spring_energy_matrix, transform_matrix_fitting, solve_rigidity\n'), ((1413, 1431), 'numpy.zeros_like', 'np.zeros_like', (['vec'], {}), '(vec)\n', (1426, 1431), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 28 09:27:49 2020
@author: <NAME>
"""
import pickle
import pandas as pd
import numpy as np
from country import country
from scipy.integrate import solve_ivp
from scipy.optimize import minimize
from scipy.optimize import dual_annealing
from scipy.optimize import brute
from scipy.interpolate import interp1d
from scipy.ndimage.filters import uniform_filter1d
import psutil
from functools import partial
import multiprocessing as mp
from tqdm import tqdm_notebook as tqdm
import pdb
from datetime import date, datetime, timedelta
import time
from pathlib import Path
from matplotlib import pyplot as plt
import statsmodels.api as sm
from sklearn import linear_model
import matplotlib.patches as mpatches
import country_converter as coco
import math
import seaborn as sns
# --------------------------------------------------------
# Global variables, chosen cohorts of data and estimates
# --------------------------------------------------------
from param_simple import *
# ----------------------
# Main class
# ----------------------
class solveCovid:
def __init__(self,iso2: str): # eg 'US'
self.iso2 = iso2
# Policy strategies for forecast
self.policy = 'optim' # ['optim', 'linear']
self.phi_option = 'fit' # ['fit','exo']: Fit phi to latest data or specify as exogenous
self.phi_exo = 2.5e-9 # weight on mobility in social welfare function
self.phi_min = 1e-13 # Lowerbound for phi - authorities care about output
# Infection rate model for forecast
self.gamma_tilde_model = 'AR1' # ['AR1','AR2','shock']
self.gamma_shock_length = 10 # Shock gamma_tilde for x days
self.gamma_shock_depth = 0.5 # Daily increment of gamma
self.default_init_single = default_init_single
self.default_bounds_single = default_bounds_single
# Vaccine assumptions
self.vac_assump = 'vac_base' # Vaccination scenarios: ['vac_base','vac_worse','vac_better']
self.vac_receiver = 'S+R' # Vaccines given to S or S+R? ['S only','S+R']
self.effi_one = 0.5 # Efficacy after one dose in %
self.effi_two = 0.95 # Efficacy after two doses in %
self.target_weight = 0.7 # How targeted vaccine distribution is (1 = sequenced from eldest to youngest, 0 is random)
self.vac_base_cover = 1 # Baseline: (already started): % of effective coverage by December 2021 (to be controlled by country-specific scaling factor below)
self.vac_base_delayedstart = '2021-06-30' # Baseline: (hasn't started): first date of vaccination
self.vac_base_delayedcover = 0.75 # Baseline: (hasn't started): % of contracted dosages deployed by December 2021
self.vac_worse_cover = 0.3 # Worse (started): Use by end of 2021
self.vac_worse_delayedstart = '2021-09-30' # Worse (hasn't started): Starting date
self.vac_worse_delayedcover = 0.3 # Worse (hasn't started): Use by end of 2021
self.vac_better_cover = 1.3
self.vac_better_delayedstart = '2021-06-30'
self.vac_better_delayedcover = 1
# Reinfection and loss of immunity
self.reinfect = 'immune' # ['immune','reinfect']
self.r_re1_R = np.log(2)/10000 # Baseline: R loses immunity after 3 years
self.r_re1_V = np.log(2)/10000 # Baseline: V loses immunity after 3 years
self.r_re2_R = np.log(2)/60 # Downside risk: R loses immunity after 60 days, approx 1% of R lose immunity each day
self.r_re2_V = np.log(2)/60 # Downside risk: V loses immunity after 60 days, approx 1% of V lose immunity each day
# Death probabilities
self.pdth_assump = 'martingale' # ['martingale','treatment']
self.pdth_min = 0.005 # Lowerbound on death probability - countries with very few cases still think there is death probability
self.pdth_halflife = 60 # Halflife for treatment case; no. of days it takes to close half the gap of current and assumed minimum death prob
self.pdth_theta = np.exp(-np.log(2)/self.pdth_halflife)
# --------------- 1. Preliminary: Get the data ------------------------
def prelim(self):
iso2 = self.iso2
self.N = df1.fillna(method='ffill')['population'][iso2].iloc[-1]
df2 = df1.iloc[:,df1.columns.get_level_values(1)==iso2][[
'total_cases','total_deaths','new_cases','new_deaths',
'google_smooth','icu_patients','hosp_patients','reproduction_rate',
'new_tests','tests_per_case','aged_70_older',
'vac_total','vac_people',
'vac_fully']][df1['total_cases'][iso2] > virus_thres]
df2 = df2.droplevel('iso2',axis=1)
df2['vac_total'] = df2['vac_total'].interpolate()
df2['vac_people'] = df2['vac_people'].interpolate()
if iso2 == 'AU' or iso2 == 'SA': # Countries with no breakdowns; do manual approximation
df2['vac_partial'] = 0.8 * df2['vac_total']
df2['vac_fully'] = 0.2 * df2['vac_total']
else : # For most countries,
date1 = df2['vac_fully'].first_valid_index() # Next 2 lines fill NA in 'vac_fully', so vac_partial is defined
df2['vac_fully'].iloc[:df2.index.get_loc(date1)-1] = 0
df2['vac_fully'] = df2['vac_fully'].interpolate()
df2['vac_partial'] = df2['vac_people'] - df2['vac_fully']
df2 = df2.fillna(0) # Replace NaN by 0 - deaths and vaccinations
PopulationI = df2['total_cases'][0]
PopulationD = df2['total_deaths'][0]
if PopulationD==0:
PopulationD = 0
PopulationR = 5
else:
PopulationR = PopulationD * 5
PopulationCI = PopulationI - PopulationD - PopulationR # Undetected and infectious cases
self.cases_data_fit = df2['total_cases'].tolist()
self.deaths_data_fit = df2['total_deaths'].tolist()
self.newcases_data_fit = df2['new_cases'].tolist()
self.newdeaths_data_fit = df2['new_deaths'].tolist()
self.balance = self.cases_data_fit[-1] / max(self.deaths_data_fit[-1], 10) / 3
date_day_since100 = pd.to_datetime(df2.index[0])
self.maxT = (default_maxT - date_day_since100).days + 1
self.mobility_vec = df2['google_smooth'].values
self.T = len(df2)
self.t_cases = np.arange(0,self.T)
self.mobility_interp = interp1d(self.t_cases,self.mobility_vec,bounds_error=False,fill_value=0.,kind='cubic')
self.GLOBAL_PARAMS = (self.N, PopulationCI, PopulationR, PopulationD, PopulationI, p_d, p_h, p_v)
self.gamma_0_days = 1 # average of gamma_t during first n days becomes the target
# Compute vaccination parameters
self.vac_partial = df2['vac_partial'].values
self.vac_fully = df2['vac_fully'].values
#self.vac_contracted = 1000*df_vac.loc[iso2]['No. of people covered (thousands)']/self.N
df2['V_'] = self.N * (self.effi_one*df2['vac_partial']
+ self.effi_two*df2['vac_fully'])/100 # V = expected number of effectively vaccinated persons
ix = pd.date_range(start=df2.index[0], end=default_maxT, freq='D') # Expand time-sample, to include forecast later
df_v = df2.reindex(ix)
# Vaccination assumptions
if self.iso2 in ['GB','US']:
vac_scale = 1
elif self.iso2 in ['BE','FR','DE','IT','NL','PL','SG','ES','CH','RO','CL','CA']:
vac_scale = 0.8
elif self.iso2 in ['AU','SA','SE','TR']:
vac_scale = 0.65
elif self.iso2 in ['AR','BR','MX','RU']:
vac_scale = 0.50
elif self.iso2 in ['ID','IN','JP','KR','MY','TH']:
vac_scale = 0.25
elif self.iso2 in ['ZA']:
vac_scale = 0.10
else:
vac_scale = 0.50
print('Missing vaccine assumption for selected country')
if self.vac_assump == 'vac_base':
if df2['V_'][-1] > 0: # already started
df_v['V_'].loc['2021-12-31'] = self.vac_base_cover * vac_scale * self.N
elif df2['V_'][-1] == 0: # If has not started, assume starting by xxx and cover xxx at year end
df_v['V_'].loc[self.vac_base_delayedstart] = 100 # 100 = assumed number of effectively vaccinated on first day
df_v['V_'].loc['2021-12-31'] = self.vac_base_delayedcover* vac_scale*self.N # partial orders filled by year end
elif self.vac_assump == 'vac_worse':
if df2['V_'][-1] > 0:
df_v['V_'].loc['2021-12-31'] = self.vac_worse_cover * vac_scale * self.N
elif df2['V_'][-1] == 0:
df_v['V_'].loc[self.vac_worse_delayedstart] = 100
df_v['V_'].loc['2021-12-31'] = self.vac_worse_delayedcover* vac_scale*self.N
elif self.vac_assump == 'vac_better':
if df2['V_'][-1]>0:
df_v['V_'].loc['2021-12-31'] = self.vac_better_cover * vac_scale * self.N
elif df2['V_'][-1] == 0:
df_v['V_'].loc[self.vac_better_delayedstart] = 100
df_v['V_'].loc['2021-12-31'] = self.vac_better_delayedcover* vac_scale*self.N
df_v['V_'] = df_v['V_'].interpolate()
df_v['V_'] = df_v['V_'].clip(0,self.N)
self.df2 = df2
self.df_v = df_v
print(f'Data preparation for {iso2} done')
# --------------------------3 . SEIR model ------------------
def step_seir(self, t, x, gamma_t, p_dth) -> list:
"""
SEIR model building on DELPHI v.3
Features 16 distinct states, taking into account undetected, deaths, hospitalized and
recovered
[0 S, 1 E, 2 I, 3 UR, 4 DHR, 5 DQR, 6 UD, 7 DHD, 8 DQD, 9 R, 10 D,
11 TH, 12 DVR,13 DVD, 14 DD, 15 DT, 16 V]
"""
S, E, I, AR, DHR, DQR, AD, DHD, DQD, R, D, TH, DVR, DVD, DD, DT, V = x
r_v = self.df_v['V_'].iloc[t+1] - self.df_v['V_'].iloc[t]
# Reinfection parameters
if self.reinfect == 'immune':
r_re_R = self.r_re1_R
r_re_V = self.r_re1_V
elif self.reinfect == 'reinfect':
if t <= self.T:
r_re_R = self.r_re1_R
r_re_V = self.r_re1_V
else:
r_re_R = self.r_re2_R
r_re_V = self.r_re2_V
# Vaccination recipients (S, or S+R)
if self.vac_receiver == 'S only':
zeta = 1
elif self.vac_receiver == 'S+R':
zeta = S/(S+R)
else:
print('Re-specify vaccine recipient choice')
# Main equations
S1 = S - gamma_t * S * I / self.N + r_re_R*R +r_re_V*V - r_v * zeta
if S1 < 0: # Vaccination reaches saturating point
S1 = 0
r_v = (S - gamma_t * S * I / self.N + r_re_R*R +r_re_V*V) /zeta
E1 = E + gamma_t * S * I / self.N - r_i * E
I1 = I + r_i * E - r_d * I
AR1 = AR + r_d * (1 - p_dth) * (1 - p_d) * I - r_ri * AR
DHR1 = DHR + r_d * (1 - p_dth) * p_d * p_h * I - r_rh * DHR
DQR1 = DQR + r_d * (1 - p_dth) * p_d * (1 - p_h) * I - r_ri * DQR
AD1 = AD + r_d * p_dth * (1 - p_d) * I - r_dth * AD
DHD1 = DHD + r_d * p_dth * p_d * p_h * I - r_dth * DHD
DQD1 = DQD + r_d * p_dth * p_d * (1 - p_h) * I - r_dth * DQD
R1 = R + r_ri * (AR + DQR) + r_rh * DHR - r_re_R*R - r_v * (1-zeta)
D1 = D + r_dth * (AD + DQD + DHD)
# Helper states
TH1 = TH + r_d * p_d * p_h * I
DVR1 = DVR + r_d * (1 - p_dth) * p_d * p_h * p_v * I - r_rv * DVR
DVD1 = DVD + r_d * p_dth * p_d * p_h * p_v * I - r_dth * DVD
DD1 = DD + r_dth * (DHD + DQD)
DT1 = DT + r_d * p_d * I
V1 = V + r_v -r_re_V*V
x1 = [S1, E1, I1, AR1, DHR1, DQR1, AD1, DHD1, DQD1,
R1, D1, TH1, DVR1, DVD1, DD1, DT1, V1]
return x1
# ------------------ X. Construct initial conditions
def initial_states_func(self,k):
N, PopulationCI, PopulationR, PopulationD, PopulationI, p_d, p_h, p_v = self.GLOBAL_PARAMS
p_dth0 = self.newdeaths_data_fit[0]/(r_dth*PopulationCI) # Set p_dth0 to match D1-D0 to newdeaths_data_fit
E_0 = PopulationCI / p_d * k
I_0 = PopulationCI / p_d * k
UR_0 = (PopulationCI / p_d - PopulationCI) * (1 - p_dth0)
DHR_0 = (PopulationCI * p_h) * (1 - p_dth0)
DQR_0 = PopulationCI * (1 - p_h) * (1 - p_dth0)
UD_0 = (PopulationCI / p_d - PopulationCI) * p_dth0
DHD_0 = PopulationCI * p_h * p_dth0
DQD_0 = PopulationCI * (1 - p_h) * p_dth0
R_0 = PopulationR / p_d
D_0 = PopulationD / p_d
S_0 = N - (E_0 +I_0 +UR_0 +DHR_0 +DQR_0 +UD_0 +DHD_0 +DQD_0 +R_0 +D_0)
TH_0 = PopulationCI * p_h
DVR_0 = (PopulationCI * p_h * p_v) * (1 - p_dth0)
DVD_0 = (PopulationCI * p_h * p_v) * p_dth0
DD_0 = PopulationD
DT_0 = PopulationI
V_0 = 0
x_init = [
S_0, E_0, I_0, UR_0, DHR_0, DQR_0, UD_0, DHD_0, DQD_0, R_0,
D_0, TH_0, DVR_0, DVD_0, DD_0, DT_0, V_0
]
return x_init
# Find k=k1,k2 that matches gamma_0 to 2.08 (R0=6 equivalent)
def loss_gamma0(self,k):
newcases = np.array(self.newcases_data_fit)
newdeaths = np.array(self.newdeaths_data_fit)
newcases_sm = uniform_filter1d(newcases, size=21, mode='nearest')
newdeaths_sm = uniform_filter1d(newdeaths, size=21, mode='nearest')
gamma_t_vec = []
x_init = self.initial_states_func(k)
(S_0, E_0, I_0, UR_0, DHR_0, DQR_0, UD_0, DHD_0, DQD_0, R_0,
D_0, TH_0, DVR_0, DVD_0, DD_0, DT_0, V_0) = x_init
newcases_sm2 = np.append(newcases_sm, newcases_sm[-2:]) # Extend the list for forward projection below
newdeaths_sm2 = np.append(newdeaths_sm, newdeaths_sm[-1])
x_0 = x_init.copy()
for t in range(self.gamma_0_days): # Target first n days
gamma_t = (newcases_sm2[t+2]/(r_d*p_d) - (1-r_d)**2 *I_0 - r_i*(2-r_d-r_i)*E_0 )*self.N/(r_i*S_0*I_0)
p_dth = (newdeaths_sm2[t+1] - r_dth*(1-r_dth)*(DHD_0 + DQD_0))/(r_dth*r_d*p_d*I_0)
gamma_t = np.clip(gamma_t, 0.01, 10)
p_dth = np.clip(p_dth,0,1) # Probability limit [0,1]
x_1 = self.step_seir(t, x_0, gamma_t, p_dth)
x_0 = x_1
gamma_t_vec.append(gamma_t)
gamma_0 = np.mean(gamma_t_vec)
loss = (gamma_0 - (r_d*6) )**2 # gamma_0 equivalent to R0=6 is 2.08
return loss
def fit_gamma0(self):
output = dual_annealing(
self.loss_gamma0,
x0 = [5],
bounds = [(1,50)],
)
k_star = output.x
return k_star
def get_initial_conditions(self):
if Path(f'../params/param_fixed/kstar.csv').exists():
df = pd.read_csv(f'../params/param_fixed/kstar.csv')
kstar = df[self.iso2].values[0]
else:
kstar = self.fit_gamma0()[0] # find kstar that matches gamma_0 to target
x_init = self.initial_states_func(kstar)
return x_init
# -------------------- x. Implied gamma_t and pdth_t in-sample -------------------
def gamma_t_compute(self):
newcases = np.array(self.newcases_data_fit)
newdeaths = np.array(self.newdeaths_data_fit)
newcases_sm = uniform_filter1d(newcases, size=21, mode='nearest')
newdeaths_sm = uniform_filter1d(newdeaths, size=21, mode='nearest')
gamma_t_vec = []
p_dth_vec = []
x_init = self.get_initial_conditions()
S_0, E_0, I_0, AR_0, DHR_0, DQR_0, AD_0, DHD_0, DQD_0, R_0, D_0, TH_0, DVR_0, DVD_0, DD_0, DT_0, V_0 = x_init
S_vec = [S_0]
E_vec = [E_0]
I_vec = [I_0]
DT_vec = [DT_0]
DD_vec = [DD_0]
DHR_vec = [DHR_0]
DHD_vec = [DHD_0]
newcases_sm2 = np.append(newcases_sm, newcases_sm[-2:]) # Extend the list for forward projection below
newdeaths_sm2 = np.append(newdeaths_sm, newdeaths_sm[-1])
x_0 = x_init.copy()
for t in range(len(newcases)):
# Work backwards to compute 'exact' gamma_t and p_dth
gamma_t = (newcases_sm2[t+2]/(r_d*p_d) - (1-r_d)**2 *I_0 - r_i*(2-r_d-r_i)*E_0 )*self.N/(r_i*S_0*I_0)
p_dth = (newdeaths_sm2[t+1] - r_dth*(1-r_dth)*(DHD_0 + DQD_0))/(r_dth*r_d*p_d*I_0)
gamma_t = np.clip(gamma_t, 0.01, 10)
p_dth = np.clip(p_dth,0,1) # Probability limit [0,1]
x_1 = self.step_seir(t, x_0, gamma_t, p_dth)
S_0, E_0, I_0, AR_0, DHR_0, DQR_0, AD_0, DHD_0, DQD_0, R_0, D_0, TH_0, DVR_0, DVD_0, DD_0, DT_0, V_0 = x_1
x_0 = x_1
gamma_t_vec.append(gamma_t)
p_dth_vec.append(p_dth)
S_vec.append(S_0)
I_vec.append(I_0)
E_vec.append(E_0)
DT_vec.append(DT_0)
DD_vec.append(DD_0)
DHR_vec.append(DHR_0)
DHD_vec.append(DHD_0)
self.df2['gamma_t'] = gamma_t_vec
self.df2['pdth_t'] = p_dth_vec
self.S_vec = S_vec # In-sample estmates, useful for phi calculation later on
self.I_vec = I_vec
self.DHR_vec = DHR_vec # For fitting death probability
self.DHD_vec = DHD_vec
HD_HR = np.array(self.DHR_vec) + np.array(self.DHD_vec)
self.df2['HD_HR'] = 100*HD_HR[:-1]/self.N
# gamma_t_sm = uniform_filter1d(gamma_t_vec, size=6, mode='nearest')
# self.df2['gamma_sm'] = gamma_t_sm
return gamma_t_vec, p_dth_vec
# -------------------- x. Estimating the model -----------
def gamma_func(self, params):
m_t = self.df2['google_smooth'].values
tvec = np.arange(len(m_t))
beta0, beta1 = params
gamma_vec = beta0*np.exp(beta1* m_t)
return gamma_vec
def loss_betas(self, params) -> float:
gamma_model = self.gamma_func(params)
loss = sum( (self.df2['gamma_t'].values[:len(gamma_model)] - gamma_model)**2 )
return loss
def fitmodel(self):
# A. Fit beta0 and beta1
x0 = self.default_init_single
bounds_0 = self.default_bounds_single
output = dual_annealing(
self.loss_betas,
x0 = x0,
bounds = bounds_0,
)
best_betas = output.x
self.best_betas = best_betas
# B. Fit the residual (gamma_tilde) to AR models
m_t = self.df2['google_smooth'].values
tvec = np.arange(len(self.df2))
beta0, beta1 = self.best_betas
self.df2['gamma_mob'] = beta0*np.exp(beta1* m_t)
self.df2['gamma_tilde'] = self.df2['gamma_t'] - self.df2['gamma_mob']
self.df2['gamma_tilde_sm'] = uniform_filter1d(self.df2['gamma_tilde'],
size=21, mode='reflect')
self.df2['gamma_tilde_resid'] = self.df2['gamma_tilde'] - self.df2['gamma_tilde_sm']
y = self.df2['gamma_tilde_sm']
self.df2['gamma_tilde_sm_lag1'] = self.df2['gamma_tilde_sm'].shift(1) # No constant term
self.df2['gamma_tilde_sm_lag2'] = self.df2['gamma_tilde_sm'].shift(2)
reg_AR1 = sm.OLS(y,self.df2['gamma_tilde_sm_lag1'],missing='drop').fit()
reg_AR2 = sm.OLS(y,self.df2[['gamma_tilde_sm_lag1','gamma_tilde_sm_lag2']],missing='drop').fit()
best_rho1 = reg_AR1.params[0]
best_rho1 = np.clip(best_rho1, 0.1, 0.99) #Assume stationarity
best_rho2 = reg_AR2.params[:]
best_params = np.array([beta0, beta1, best_rho1, best_rho2[0], best_rho2[1]])
self.best_rho1 = best_rho1
self.best_rho2 = best_rho2
self.best_params = best_params
# C. Empirically fit phi for optimal policy to last observation
if self.phi_option == 'fit':
m = self.df2['google_smooth'][-15:].mean() # Take average of last 15 days to smooth volatility
s = self.S_vec[-1]/self.N
i = self.I_vec[-1]/self.N
gamma_tilde = self.df2['gamma_tilde'][-1]
pdth = self.df2['pdth_t'][-1]
pdth = max(pdth, self.pdth_min) # Get around cases where pdth=0 for countries with very few cases
LHS1 = pdth*r_d*i*s*(beta0*beta1*np.exp(beta1*m))
LHS2 = pdth*r_d*i*(1 - r_d + s*(gamma_tilde + beta0*np.exp(beta1*m)))
phi = -(LHS1 * LHS2)/m
self.phi = max(phi, self.phi_min)
elif self.phi_option == 'exo':
self.phi = self.phi_exo
return best_params
# ------------------ x. Forecasts ---------------------------
def step_gamma_tilde(self, gamma_tilde_lag1, gamma_tilde_lag2, model='AR1'):
if model =='AR1':
return self.best_rho1*gamma_tilde_lag1
elif model =='AR2':
return self.best_rho2[0]*gamma_tilde_lag1 + self.best_rho2[1]*gamma_tilde_lag2
def mobility_choice(self,x,gamma_tilde,pdth):
if self.policy == 'constant':
mob = self.poparam_constant
elif self.policy == 'linear-I': # Respond linearly to infection level
mob = self.poparam_linear_I[0] + self.poparam_linear_I[1]*x[2]
elif self.policy == 'linear-dI': # Respond to new infections
dI = r_i*x[1] - r_d*x[2] # x[1]=E, x[2]=I
mob = self.poparam_linear_dI[0] + self.poparam_linear_dI[1]*dI
elif self.policy == 'optim': # Analytical optimal policy based on simplified model and quadratic losses
beta0 = self.best_params[0]
beta1 = self.best_params[1]
phi = self.phi
s = x[0]/self.N
i = x[2]/self.N
m_set = np.linspace(-1,0,101)
RHS = -phi*m_set
LHS1 = pdth*r_d*i*s*(beta0*beta1*np.exp(beta1*m_set))
LHS2 = pdth*r_d*i*(1 - r_d + s*(gamma_tilde + beta0*np.exp(beta1*m_set)))
LHS = LHS1 * LHS2
m_id = np.argmin(np.abs(RHS-LHS))
mob = m_set[m_id]
return mob
def fatality_factor(self,V): # Factor to adjust 'base' fatality prob
idx = (f_table[self.iso2]['vaccine_%'] - V/self.N).abs().argmin() # Find idx to look up in fatality table
factor = f_table[self.iso2]['fatality_ratio'][idx]
return factor
def sim_seir(self):
df2 = self.df2
ix = pd.date_range(start=df2.index[0], end=default_maxT, freq='D') # Expand time-sample, to include forecast later
df3 = df2.reindex(ix)
x_init = self.get_initial_conditions()
x_data = np.array(x_init)
gamma_tilde_fc = self.df2['gamma_tilde'].values
gamma_tilde_sm_fc = self.df2['gamma_tilde_sm'].values
pdth_t_targ = [] # Death prob when vaccines are targeted
pdth_t_base = [] # Base death prob if vaccines are given randomly
pdth_t_fc = self.df2['pdth_t'].values
pdth_t_base_fc = pdth_t_fc.copy()
gamma_mob_fc = self.df2['gamma_mob'].values
mob_fc = self.df2['google_smooth'].values
# Load parameters
if hasattr(self, 'best_params'):
beta0, beta1, rho, rhos_1, rhos_2 = self.best_params
else:
df_param = pd.read_csv(f'../params/{param_load_folder}/param_est.csv')
beta0, beta1, rho, rhos_1, rhos_2 = df_param[self.iso2]
for t in range(self.maxT):
factor = self.fatality_factor(x_init[-1])
eta = self.target_weight
if t<len(self.df2): # In sample
pdth_t = pdth_t_fc[t]
pdth_base = pdth_t/(eta*factor + 1-eta)
pdth_targ = factor*pdth_base
# if t==len(self.df2): # Parse pdth_base of hospitalised/N
# y = pdth_t_base
# X = self.df2['HD_HR'].shift(30) # Use lagged hospitalised as the predictor
# X = sm.add_constant(X)
# reg_pdth = sm.OLS(y,X, missing='drop').fit()
# thetas = reg_pdth.params
# self.best_theta = thetas
# pdb.set_trace()
# pdth_t_basex = y - thetas[0] - thetas[1]*X # Base death prob, parsed of hospitalisation wave
# self.df2['pdth_base'] = pdth_t_base
# self.df2['pdth_base_x'] = pdth_t_basex
if t>len(self.df2)-1: # Out of sample
# Death probability
if self.pdth_assump == 'martingale': # Martingale death rate
pdth_base = pdth_t_base[-1]
elif self.pdth_assump == 'treatment': # Death prob slowly declines to assumed minimum and assumed halflife
pdth_base = self.pdth_theta*pdth_t_base[-1] + (1-self.pdth_theta)*self.pdth_min
pdth_base = max(pdth_base, self.pdth_min) # To get around pdth=0 for countries with very few cases
pdth_t = (eta*factor + 1-eta)*pdth_base
pdth_targ = factor*pdth_base
# Gamma_tilde
if self.gamma_tilde_model == 'AR1':
gamma_tilde = rho*gamma_tilde_sm_fc[t-1]
elif self.gamma_tilde_model == 'AR2':
gamma_tilde = rhos_1*gamma_tilde_sm_fc[t-1] + rhos_2*gamma_tilde_sm_fc[t-2]
elif self.gamma_tilde_model =='shock':
if t < len(self.df2) + self.gamma_shock_length:
gamma_tilde = gamma_tilde_sm_fc[len(self.df2)-1] + self.gamma_shock_depth
else:
gamma_tilde = rho*gamma_tilde_sm_fc[t-1]
# Mobility and overall gamma_t
mob_t = self.mobility_choice(x_init, gamma_tilde, pdth_t)
mob_t = max(mob_t, max_lockdown)
gamma_mob_t = beta0*np.exp(beta1*mob_t)
gamma_t = gamma_tilde + gamma_mob_t
# Append to data array
gamma_tilde_sm_fc = np.append(gamma_tilde_sm_fc, gamma_tilde)
gamma_tilde_fc = np.append(gamma_tilde_fc, gamma_tilde)
gamma_mob_fc = np.append(gamma_mob_fc, gamma_mob_t)
mob_fc = np.append(mob_fc, mob_t)
pdth_t_fc = np.append(pdth_t_fc, pdth_t)
pdth_t_base.append(pdth_base)
pdth_t_targ.append(pdth_targ)
# For in sample, use 'true' inputs
gamma_t = gamma_tilde_fc[t] + gamma_mob_fc[t]
p_dth = pdth_t_fc[t]
if t < range(self.maxT)[-1]: # Stop forecasting at the final period
x_next = self.step_seir(t, x_init, gamma_t, p_dth)
x_data = np.vstack((x_data, np.array(x_next)))
x_init = x_next
# Fill dataframe
col_temp = ['S', 'E', 'I', 'AR', 'DHR', 'DQR', 'AD', 'DHD', 'DQD', 'R', 'D', 'TH', 'DVR', 'DVD', 'DD', 'DT', 'V']
df4 = pd.DataFrame(x_data, columns=col_temp, index=df3.index)
df3 = df3.merge(df4, how='left', left_index=True, right_index=True)
df3['gamma_tilde_fc'] = gamma_tilde_fc
df3['gamma_mob_fc'] = gamma_mob_fc
df3['gamma_t_fc'] = df3['gamma_tilde_fc'] + df3['gamma_mob_fc']
df3['mob_fc'] = mob_fc
df3['pdth_t_fc'] = pdth_t_fc
df3['pdth_t_base'] = np.array(pdth_t_base)
df3['pdth_t_targ'] = np.array(pdth_t_targ)
df3[['S_N','I_N','DT_N','DD_N','V_N']] = df3[['S','I','DT','DD','V']]/self.N
self.df3 = df3
return df3
# ------------------ 5. Predict and plot ---------------------
def plot_all(self, saveplot=False):
df = self.df3
transpa = 0.0
fig, ax = plt.subplots(nrows=2, ncols=3, figsize=(15,8), constrained_layout=True)
# df_bar = df_bar0[['GDP lost','Total deaths']]
# df_bar.plot(kind='bar', ax=ax[1,2], secondary_y='Total deaths', rot=0, legend=False)
# ax[1,2].set_ylabel('percent')
# ax[1,2].right_ax.set_ylabel('per million')
# ax[1,2].set_title('Losses of lives and output',fontsize='x-large')
# L = [mpatches.Patch(color=c, label=col)
# for col,c in zip( ('GDP loss','Deaths (rhs)'), plt.rcParams['axes.prop_cycle'].by_key()['color'])]
# ax[1,2] = plt.legend(handles=L, loc=1, framealpha=transpa)
ax[0,0].plot(df.index, 100*df['total_cases']/self.N, linewidth = 3, label='Case data', color='blue')
ax[0,0].plot(df.index, 100*df['DT']/self.N, label='$DT_t$', color='red')
ax[0,0].axvline(df.index[self.T], linewidth = 2, color='gray', linestyle=':')
ax[0,0].set_title('Cases',fontsize='x-large')
ax[0,0].set(ylabel = '% of population')
ax2 = ax[0,0].twinx()
ax2.plot(df.index, 100*df['I']/self.N, label='$I_t$ (rhs)',color='green',linestyle='--')
lines, labels = ax[0,0].get_legend_handles_labels()
lines2, labels2 = ax2.get_legend_handles_labels()
ax2.legend(lines + lines2, labels + labels2, loc='center right', framealpha=transpa,fontsize='x-large')
#ax2.set(ylabel='% of population')
ax[0,1].plot(df.index, 100*df['total_deaths']/self.N, linewidth = 3, label='Death data', color='blue')
ax[0,1].plot(df.index, 100*df['DD']/self.N, label='$DD_t$', color='red')
ax[0,1].axvline(df.index[self.T], linewidth = 2, color='gray', linestyle=':')
ax[0,1].set_title('Deaths',fontsize='x-large')
ax[0,1].set(ylabel='% of population')
ax[0,1].legend(loc='best', framealpha=transpa ,fontsize='x-large')
ax[0,2].plot(df.index, 100*df['S']/self.N, label='$S_t$',color='red')
ax[0,2].plot(df.index, 100*df['V']/self.N, label='$V_t$',color='red',linestyle=':')
ax[0,2].axvline(df.index[self.T], linewidth = 2, color='gray', linestyle=':')
ax[0,2].set_title('Susceptible & vaccinated',fontsize='x-large')
ax[0,2].legend(loc='best',framealpha=transpa ,fontsize='x-large')
ax[0,2].set(ylabel='% of population')
ax[1,0].plot(df.index, df['gamma_t'], label=r'$\gamma_t$',color='red')
ax[1,0].plot(df.index, df['gamma_mob'], label=r'$\gamma^{m}_t$', color ='blue')
ax[1,0].plot(df.index, df['gamma_tilde'], label=r'$\gamma^{d}$', color='orange')
ax[1,0].plot(df.index, df['gamma_t_fc'], color='red',linestyle=':')
ax[1,0].plot(df.index, df['gamma_mob_fc'], color ='blue',linestyle=':')
ax[1,0].plot(df.index, df['gamma_tilde_fc'], color='orange',linestyle=':')
ax[1,0].axvline(df.index[self.T], linewidth = 2, color='gray', linestyle=':')
ax[1,0].set_title('Infection rate',fontsize='x-large')
ax[1,0].legend(loc='best',framealpha=transpa ,fontsize='x-large')
ax[1,1].plot(df.index, 100*df['google_smooth'], linewidth = 3, label='Google mobility', color='blue')
ax[1,1].plot(df.index, 100*df['mob_fc'], label='Model', color='red')
ax[1,1].axvline(df.index[self.T], linewidth = 2, color='gray', linestyle=':')
ax[1,1].legend(loc=0,framealpha=transpa ,fontsize='x-large')
ax[1,1].set_title('Activity',fontsize='x-large')
ax[1,1].set(ylabel='% deviations from norm')
ax[1,2].plot(df.index, 100*df['pdth_t'], label='Death probability', linewidth=3, color='blue')
ax[1,2].plot(df.index, 100*df['pdth_t_fc'], color='black', label='Forecast')
ax[1,2].plot(df.index, 100*df['pdth_t_base'], color='black', linestyle='dashed', label='Random vaccines')
ax[1,2].plot(df.index, 100*df['pdth_t_targ'], color='black', linestyle=':', label='Targeted vaccines')
ax[1,2].axvline(df.index[self.T], linewidth = 2, color='gray', linestyle=':')
ax[1,2].legend(loc=0,framealpha=transpa ,fontsize='x-large')
ax[1,2].set_title('Death probability',fontsize='x-large')
ax[1,2].set(ylabel='%')
plt.setp(ax[0,0].get_xticklabels(), rotation=30, horizontalalignment='right')
plt.setp(ax[0,1].get_xticklabels(), rotation=30, horizontalalignment='right')
plt.setp(ax[0,2].get_xticklabels(), rotation=30, horizontalalignment='right')
plt.setp(ax[1,0].get_xticklabels(), rotation=30, horizontalalignment='right')
plt.setp(ax[1,1].get_xticklabels(), rotation=30, horizontalalignment='right')
plt.setp(ax[1,2].get_xticklabels(), rotation=30, horizontalalignment='right')
cname = coco.convert(names=self.iso2,to='name_short')
fig.suptitle(f'{cname}-{self.vac_assump}-{self.reinfect}',fontsize='xx-large')
if saveplot:
Path(f'../pics/fig_{date.today()}').mkdir(exist_ok=True)
fig.savefig(f'../pics/fig_{date.today()}/{self.iso2}-{self.policy}-{self.gamma_tilde_model}-{self.vac_assump}-{self.reinfect}.png')
return fig
def plot_portrait(self, saveplot=False):
df = self.df3
transpa = 0.0
fig, ax = plt.subplots(nrows=3, ncols=2, figsize=(10,12), constrained_layout=True)
ax[0,0].plot(df.index, 100*df['total_cases']/self.N, linewidth = 3, label='Case data', color='blue')
ax[0,0].plot(df.index, 100*df['DT']/self.N, label='$DT_t$', color='red')
ax[0,0].axvline(df.index[self.T], linewidth = 2, color='gray', linestyle=':')
ax[0,0].set_title('Cases',fontsize='x-large')
ax[0,0].set(ylabel = '% of population')
ax2 = ax[0,0].twinx()
ax2.plot(df.index, 100*df['I']/self.N, label='$I_t$ (rhs)',color='green',linestyle='--')
ax2.grid(None)
lines, labels = ax[0,0].get_legend_handles_labels()
lines2, labels2 = ax2.get_legend_handles_labels()
ax2.legend(lines + lines2, labels + labels2, loc='center right', framealpha=transpa,fontsize='x-large')
#ax2.set(ylabel='% of population')
ax[0,1].plot(df.index, 100*df['total_deaths']/self.N, linewidth = 3, label='Death data', color='blue')
ax[0,1].plot(df.index, 100*df['DD']/self.N, label='$DD_t$', color='red')
ax[0,1].axvline(df.index[self.T], linewidth = 2, color='gray', linestyle=':')
ax[0,1].set_title('Deaths',fontsize='x-large')
ax[0,1].set(ylabel='% of population')
ax[0,1].legend(loc='best', framealpha=transpa ,fontsize='x-large')
ax[1,0].plot(df.index, 100*df['S']/self.N, label='$S_t$',color='red')
ax[1,0].plot(df.index, 100*df['V']/self.N, label='$V_t$',color='red',linestyle=':')
ax[1,0].axvline(df.index[self.T], linewidth = 2, color='gray', linestyle=':')
ax[1,0].set_title('Susceptible & vaccinated',fontsize='x-large')
ax[1,0].legend(loc='best',framealpha=transpa ,fontsize='x-large')
ax[1,0].set(ylabel='% of population')
ax[1,1].plot(df.index, df['gamma_t'], label=r'$\gamma_t$',color='red')
ax[1,1].plot(df.index, df['gamma_mob'], label=r'$\gamma^{m}_t$', color ='blue')
ax[1,1].plot(df.index, df['gamma_tilde'], label=r'$\gamma^{d}$', color='orange')
ax[1,1].plot(df.index, df['gamma_t_fc'], color='red',linestyle=':')
ax[1,1].plot(df.index, df['gamma_mob_fc'], color ='blue',linestyle=':')
ax[1,1].plot(df.index, df['gamma_tilde_fc'], color='orange',linestyle=':')
ax[1,1].axvline(df.index[self.T], linewidth = 2, color='gray', linestyle=':')
ax[1,1].set_title('Infection rate',fontsize='x-large')
ax[1,1].legend(loc='best',framealpha=transpa ,fontsize='x-large')
ax[2,0].plot(df.index, 100*df['google_smooth'], linewidth = 3, label='Google mobility', color='blue')
ax[2,0].plot(df.index, 100*df['mob_fc'], label='Model', color='red')
ax[2,0].axvline(df.index[self.T], linewidth = 2, color='gray', linestyle=':')
ax[2,0].legend(loc=0,framealpha=transpa ,fontsize='x-large')
ax[2,0].set_title('Mobility',fontsize='x-large')
ax[2,0].set(ylabel='% deviations from norm')
ax[2,1].plot(df.index, 100*df['pdth_t'], label='Death probability', linewidth=3, color='blue')
ax[2,1].plot(df.index, 100*df['pdth_t_fc'], color='black', label='Forecast')
ax[2,1].plot(df.index, 100*df['pdth_t_base'], color='black', linestyle='dashed', label='Random vaccines')
ax[2,1].plot(df.index, 100*df['pdth_t_targ'], color='black', linestyle=':', label='Targeted vaccines')
ax[2,1].axvline(df.index[self.T], linewidth = 2, color='gray', linestyle=':')
ax[2,1].legend(loc=0,framealpha=transpa ,fontsize='x-large')
ax[2,1].set_title('Death probability',fontsize='x-large')
ax[2,1].set(ylabel='%')
plt.setp(ax[0,0].get_xticklabels(), rotation=30, horizontalalignment='right')
plt.setp(ax[0,1].get_xticklabels(), rotation=30, horizontalalignment='right')
plt.setp(ax[1,0].get_xticklabels(), rotation=30, horizontalalignment='right')
plt.setp(ax[1,1].get_xticklabels(), rotation=30, horizontalalignment='right')
plt.setp(ax[2,0].get_xticklabels(), rotation=30, horizontalalignment='right')
plt.setp(ax[2,1].get_xticklabels(), rotation=30, horizontalalignment='right')
cname = coco.convert(names=self.iso2,to='name_short')
fig.suptitle(f'{cname}',fontsize=18)
if saveplot:
Path(f'../pics/fig_{date.today()}').mkdir(exist_ok=True)
fig.savefig(f'../pics/fig_{date.today()}/Portrait-{self.iso2}-{self.policy}-{self.gamma_tilde_model}-{self.vac_assump}-{self.reinfect}.pdf')
return fig
# ---------------------------------------------
# Calling functions
# ---------------------------------------------
# -----------------------------------------
# x. Prelim parameters estimation
# Estimate k_star and save in file (only need to do this once)
def estimate_kstar(cset=['US']):
dict = {'Parameter': ['kstar']}
for c in cset:
tmp = solveCovid(c)
tmp.prelim()
kstar = tmp.fit_gamma0()
dict[c] = kstar
df = pd.DataFrame(dict)
df.to_csv(f'../params/param_fixed/kstar.csv',index=False)
return df
# -------------------------
# x. Run complete package under scenarios: estimate, forecast, plot, save
def run_baseline(cset=['US']):
p_dict = {'Parameters': ['beta0','beta1','rho','rhos_1','rhos_2','phi']}
for c in cset:
tmp = solveCovid(c)
tmp.prelim()
tmp.gamma_t_compute()
tmp.fitmodel()
p_dict[c] = np.append(tmp.best_params, 1e9*tmp.phi)
tmp.sim_seir()
tmp.plot_all(saveplot='False')
tmp.df3.to_csv(f'../output/{out_save_folder}/df3_{tmp.iso2}.csv')
pd.DataFrame(p_dict).to_csv(f'../params/{param_save_folder}/param_est.csv',float_format='%.4f',index=False)
def run_gammashock(cset=['US']):
for c in cset:
tmp = solveCovid(c)
tmp.prelim()
tmp.gamma_t_compute()
tmp.fitmodel()
tmp.gamma_tilde_model = 'shock'
tmp.sim_seir()
tmp.plot_all(saveplot=True)
def run_vaccines(cset=['US'],vac_assump='vac_worse'):
for c in cset:
tmp = solveCovid(c)
tmp.vac_assump = vac_assump
tmp.prelim()
tmp.gamma_t_compute()
tmp.fitmodel()
tmp.sim_seir()
tmp.plot_all(saveplot=True)
def run_reinfect(cset=['US'],reinfect = 'reinfect'):
for c in cset:
tmp = solveCovid(c)
tmp.reinfect = reinfect
tmp.prelim()
tmp.gamma_t_compute()
tmp.fitmodel()
tmp.sim_seir()
tmp.plot_all(saveplot=True)
def run_scenarios(cset=['US']): # Save class objects under various scenarios so we could draw plots across countries/scenarios
p_dict = {'Parameters': ['beta0','beta1','rho','rhos_1','rhos_2','phi']}
for c in cset:
#Baseline
tmp = solveCovid(c)
tmp.prelim()
tmp.gamma_t_compute()
tmp.fitmodel()
p_dict[c] = np.append(tmp.best_params, 1e9*tmp.phi)
tmp.sim_seir()
tmp.plot_all(saveplot=True)
name = f'../output/{out_save_folder}/{c}_baseline.pkl'
pickle.dump(tmp,open(name,'wb'))
# Vaccines
t_vac = solveCovid(c)
t_vac.vac_assump = 'vac_worse'
t_vac.prelim()
t_vac.gamma_t_compute()
t_vac.fitmodel()
t_vac.sim_seir()
t_vac.plot_all(saveplot=True)
name = f'../output/{out_save_folder}/{c}_vacworse.pkl'
pickle.dump(t_vac,open(name,'wb'))
# Spikes
t_spike = solveCovid(c)
t_spike.prelim()
t_spike.gamma_t_compute()
t_spike.fitmodel()
t_spike.gamma_tilde_model = 'shock'
t_spike.sim_seir()
t_spike.plot_all(saveplot=True)
name = f'../output/{out_save_folder}/{c}_shock.pkl'
pickle.dump(t_spike,open(name,'wb'))
# Reinfection
t_reinfect = solveCovid(c)
t_reinfect.reinfect = 'reinfect'
t_reinfect.prelim()
t_reinfect.gamma_t_compute()
t_reinfect.fitmodel()
t_reinfect.sim_seir()
t_reinfect.plot_all(saveplot=True)
name = f'../output/{out_save_folder}/{c}_reinfect.pkl'
pickle.dump(t_reinfect,open(name,'wb'))
# Better
t_better = solveCovid(c)
t_better.vac_assump = 'vac_better' # (a) 30% Faster vaccines
t_better.target_weight = 0.9 # (b) More targeted
t_better.prelim()
t_better.gamma_t_compute()
t_better.fitmodel()
t_better.sim_seir()
t_better.plot_all(saveplot=True)
name = f'../output/{out_save_folder}/{c}_better.pkl'
pickle.dump(t_better,open(name,'wb'))
pd.DataFrame(p_dict).to_csv(f'../params/{param_save_folder}/param_est.csv',float_format='%.4f',index=False)
def save_results(cset=['US']): # Unpack pickle and save all results into an excel
with pd.ExcelWriter(f'../output/{out_save_folder}/output_all.xlsx') as writer:
for c in cset:
print(f'Loading pickle for {c}')
tmp = pickle.load(open(f'../output/{out_load_folder}/{c}_baseline.pkl','rb'))
t_vac = pickle.load(open(f'../output/{out_load_folder}/{c}_vacworse.pkl','rb'))
t_spike = pickle.load(open(f'../output/{out_load_folder}/{c}_shock.pkl','rb'))
t_reinfect = pickle.load(open(f'../output/{out_load_folder}/{c}_reinfect.pkl','rb'))
#t_better = pickle.load(open(f'../output/{out_load_folder}/{c}_better.pkl','rb'))
tmp.df3.to_excel(writer, sheet_name=f'{c}_base')
t_vac.df3.to_excel(writer, sheet_name=f'{c}_vacworse')
t_spike.df3.to_excel(writer, sheet_name=f'{c}_shock')
t_reinfect.df3.to_excel(writer, sheet_name=f'{c}_reinfect')
#t_better.df3.to_excel(writer, sheet_name=f'{c}_better')
# ---------------------------------------------------
# x. Plotting functions
# ***** Utilities *****
def scatter1(x,y,xlab,ylab,df):
x1 = df[x]
y1 = df[y]
fig, ax = plt.subplots(figsize=(10,8))
ax.scatter(x1,y1,marker='o',facecolors='none', edgecolors='none')
for i, label in enumerate(df.index):
ax.annotate(label, (x1.iloc[i], y1.iloc[i]), size=16)
ax.plot(np.unique(x1),
np.poly1d(np.polyfit(x1, y1, 1))(np.unique(x1)),
color='black')
ax.set_xlabel(xlab,size=20)
ax.set_ylabel(ylab,size=20)
plt.xticks(fontsize= 20)
plt.yticks(fontsize= 20)
return fig, ax
def scatter2(x,y,x2,y2,xlab,ylab,df):
x1 = df[x]
y1 = df[y]
x2 = df[x2]
y2 = df[y2]
fig, ax = plt.subplots(figsize=(10,8))
ax.scatter(x1,y1,marker='o',facecolors='none', edgecolors='none')
for i, label in enumerate(df.index):
ax.annotate(label, (x1.iloc[i], y1.iloc[i]), size=16, color='gray')
ax.plot(np.unique(x1),
np.poly1d(np.polyfit(x1, y1, 1))(np.unique(x1)),
color='gray')
ax.set_xlabel(xlab,size=20)
ax.set_ylabel(ylab,size=20)
# Super impose with a new set
ax.scatter(x2,y2,marker='o',facecolors='none', edgecolors='none')
for i, label in enumerate(df.index):
ax.annotate(label, (x2.iloc[i], y2.iloc[i]), size=16, color='blue')
ax.plot(np.unique(x2),
np.poly1d(np.polyfit(x2, y2, 1))(np.unique(x2)),
color='blue')
ax.set_xlabel(xlab,size=20)
ax.set_ylabel(ylab,size=20)
plt.xticks(fontsize= 20)
plt.yticks(fontsize= 20)
return fig, ax
def all_output(cset=['US','DE']):
data_col = ['Mob 2021','Mob fc',
'GDP 2021','GDP fc',
'dDeath 2021','dDeath fc',
'dD/mn 2021','dD/mn fc',
'Mob 2021 3rdwave', 'Mob fc 3rdwave',
'GDP 2021 3rdwave', 'GDP fc 3rdwave',
'dDeath 2021 3rdwave', 'dDeath fc 3rdwave',
'dD/mn 2021 3rdwave', 'dD/mn fc 3rdwave',
'Mob 2021 vacworse', 'Mob fc vacworse',
'GDP 2021 vacworse', 'GDP fc vacworse',
'dDeath 2021 vacworse', 'dDeath fc vacworse',
'dD/mn 2021 vacworse', 'dD/mn fc vacworse',
'Mob 2021 reinfect', 'Mob fc reinfect',
'GDP 2021 reinfect', 'GDP fc reinfect',
'dDeath 2021 reinfect', 'dDeath fc reinfect',
'dD/mn 2021 reinfect', 'dD/mn fc reinfect',
# 'Mob 2021 better', 'Mob fc better',
# 'GDP 2021 better', 'GDP fc better',
# 'dDeath 2021 better', 'dDeath fc better',
# 'dD/mn 2021 better', 'dD/mn fc better',
]
data = {}
df_yratio = pd.read_csv(f'../output/growth-mob.csv', index_col=0)
for c in cset:
tmp = pickle.load(open(f'../output/{out_load_folder}/{c}_baseline.pkl','rb'))
tmp1 = pickle.load(open(f'../output/{out_load_folder}/{c}_shock.pkl','rb'))
tmp2 = pickle.load(open(f'../output/{out_load_folder}/{c}_vacworse.pkl','rb'))
tmp3 = pickle.load(open(f'../output/{out_load_folder}/{c}_reinfect.pkl','rb'))
# tmp4 = pickle.load(open(f'../output/{out_load_folder}/{c}_better.pkl','rb'))
cnum = tmp.df3.index.get_loc('2020-12-31')+1
d = tmp.df3['total_cases'].last_valid_index()
dnum = tmp.df3.index.get_loc(d)+1
mob_2021 = tmp.df3['mob_fc'].iloc[cnum:].mean() # Average mobility for 2021
mob_fc = tmp.df3['mob_fc'].iloc[dnum:].mean() # Average mobility from current date till year end
GDP_2021 = 100*mob_2021*df_yratio.loc[c]['ym_ratio']
GDP_fc = 100*mob_fc*df_yratio.loc[c]['ym_ratio']
dD_2021 = tmp.df3['DD'][-1] - tmp.df3['DD'][cnum]
dD_fc = tmp.df3['DD'][-1] - tmp.df3['DD'][dnum]
dD_mn_2021 = 1000000*dD_2021/tmp.N
dD_mn_fc = 1000000*dD_fc/tmp.N
mob_2021_shock = tmp1.df3['mob_fc'].iloc[cnum:].mean() # Average mobility for 2021
mob_fc_shock = tmp1.df3['mob_fc'].iloc[dnum:].mean() # Average mobility from current date till year end
GDP_2021_shock = 100*mob_2021_shock*df_yratio.loc[c]['ym_ratio']
GDP_fc_shock = 100*mob_fc_shock*df_yratio.loc[c]['ym_ratio']
dD_2021_shock = tmp1.df3['DD'][-1] - tmp1.df3['DD'][cnum]
dD_fc_shock = tmp1.df3['DD'][-1] - tmp1.df3['DD'][dnum]
dD_mn_2021_shock = 1000000*dD_2021_shock/tmp.N
dD_mn_fc_shock = 1000000*dD_fc_shock/tmp.N
mob_2021_vacworse = tmp2.df3['mob_fc'].iloc[cnum:].mean() # Average mobility for 2021
mob_fc_vacworse = tmp2.df3['mob_fc'].iloc[dnum:].mean() # Average mobility from current date till year end
GDP_2021_vacworse = 100*mob_2021_vacworse*df_yratio.loc[c]['ym_ratio']
GDP_fc_vacworse = 100*mob_fc_vacworse*df_yratio.loc[c]['ym_ratio']
dD_2021_vacworse = tmp2.df3['DD'][-1] - tmp2.df3['DD'][cnum]
dD_fc_vacworse = tmp2.df3['DD'][-1] - tmp2.df3['DD'][dnum]
dD_mn_2021_vacworse = 1000000*dD_2021_vacworse/tmp.N
dD_mn_fc_vacworse = 1000000*dD_fc_vacworse/tmp.N
mob_2021_reinfect = tmp3.df3['mob_fc'].iloc[cnum:].mean() # Average mobility for 2021
mob_fc_reinfect = tmp3.df3['mob_fc'].iloc[dnum:].mean() # Average mobility from current date till year end
GDP_2021_reinfect = 100*mob_2021_reinfect*df_yratio.loc[c]['ym_ratio']
GDP_fc_reinfect = 100*mob_fc_reinfect*df_yratio.loc[c]['ym_ratio']
dD_2021_reinfect = tmp3.df3['DD'][-1] - tmp3.df3['DD'][cnum]
dD_fc_reinfect = tmp3.df3['DD'][-1] - tmp3.df3['DD'][dnum]
dD_mn_2021_reinfect = 1000000*dD_2021_reinfect/tmp.N
dD_mn_fc_reinfect = 1000000*dD_fc_reinfect/tmp.N
# mob_2021_better = tmp4.df3['mob_fc'].iloc[cnum:].mean() # Average mobility for 2021
# mob_fc_better = tmp4.df3['mob_fc'].iloc[dnum:].mean() # Average mobility from current date till year end
# GDP_2021_better = 100*mob_2021_better*df_yratio.loc[c]['ym_ratio']
# GDP_fc_better = 100*mob_fc_better*df_yratio.loc[c]['ym_ratio']
# dD_2021_better = tmp4.df3['DD'][-1] - tmp4.df3['DD'][cnum]
# dD_fc_better = tmp4.df3['DD'][-1] - tmp4.df3['DD'][dnum]
# dD_mn_2021_better = 1000000*dD_2021_better/tmp.N
# dD_mn_fc_better = 1000000*dD_fc_better/tmp.N
data[c] = [mob_2021,mob_fc,
GDP_2021,GDP_fc,
dD_2021,dD_fc,
dD_mn_2021,dD_mn_fc,
mob_2021_shock, mob_fc_shock,
GDP_2021_shock, GDP_fc_shock,
dD_2021_shock, dD_fc_shock,
dD_mn_2021_shock, dD_mn_fc_shock,
mob_2021_vacworse, mob_fc_vacworse,
GDP_2021_vacworse, GDP_fc_vacworse,
dD_2021_vacworse, dD_fc_vacworse,
dD_mn_2021_vacworse, dD_mn_fc_vacworse,
mob_2021_reinfect, mob_fc_reinfect,
GDP_2021_reinfect, GDP_fc_reinfect,
dD_2021_reinfect, dD_fc_reinfect,
dD_mn_2021_reinfect, dD_mn_fc_reinfect,
# mob_2021_better, mob_fc_better,
# GDP_2021_better, GDP_fc_better,
# dD_2021_better, dD_fc_better,
# dD_mn_2021_better, dD_mn_fc_better,
]
df_out = pd.DataFrame.from_dict(data, orient='index', columns=data_col)
name = f'../output/{out_save_folder}/all_output.pkl'
pickle.dump(df_out,open(name,'wb'))
with pd.ExcelWriter(f'../output/{out_save_folder}/output_condensed.xlsx') as writer:
df_out.to_excel(writer, sheet_name='output')
return df_out
def update_table(cset=['US','DE']):
data_col = ['Mobility 2021','Mobility, now to mid 2022',
'Deaths/mn 2021','Deaths/mn, now to mid 2022',
]
data = {}
for c in cset:
tmp = pickle.load(open(f'../output/{out_load_folder}/{c}_baseline.pkl','rb'))
cnum = tmp.df3.index.get_loc('2021-01-31')
cnum2 = tmp.df3.index.get_loc('2021-12-31')
d = tmp.df3['total_cases'].last_valid_index()
dnum = tmp.df3.index.get_loc(d)+1
mob_2021 = tmp.df3['mob_fc'].iloc[cnum:cnum2].mean() # Average mobility for 2021
mob_fc = tmp.df3['mob_fc'].iloc[dnum:].mean() # Average mobility from current date till year end
dD_2021 = tmp.df3['DD'][cnum2] - tmp.df3['DD'][cnum]
dD_fc = tmp.df3['DD'][-1] - tmp.df3['DD'][dnum]
dD_mn_2021 = 1000000*dD_2021/tmp.N
dD_mn_fc = 1000000*dD_fc/tmp.N
data[c] = [100*mob_2021,100*mob_fc,
dD_mn_2021,dD_mn_fc,
]
df_out = pd.DataFrame.from_dict(data, orient='index', columns=data_col)
df_out = df_out.round(decimals=1)
return df_out
# Compare 2 scenarios, showing 4 key charts
def plot_2cases(c,df,df2,tmp,title,filename,saveplot=False):
# Compute differences between 2 cases (lives saved at end points, and average mobility differences)
d_death = int(round(df2['fitted_deaths'][-1]-df['fitted_deaths'][-1],0))
d_m = round((df2['fitted_m']['2021-01-01':'2021-12-31'] - df['fitted_m']['2021-01-01':'2021-12-31']).mean(),3)
# Draw figure
fig3, ax = plt.subplots(nrows=2, ncols=2, figsize=(10,8), constrained_layout=True)
ax[0,0].plot(df.index, 100*df['fitted_cases']/tmp.N, linewidth = 2, label='Cases', color='red')
ax[0,0].plot(df2.index, 100*df2['fitted_cases']/tmp.N, linewidth = 2, label='Cases alt', color='red', linestyle=':')
ax[0,0].plot(df.index, 100*df['fitted_I']/tmp.N, linewidth = 2, label='Infected', color='green')
ax[0,0].plot(df2.index, 100*df2['fitted_I']/tmp.N, linewidth = 2, label='Infected alt', color='green',linestyle=':')
ax[0,0].axvline(df.index[tmp.T], linewidth = 2, color='gray', linestyle=':')
ax[0,0].legend(loc='center right',fontsize='x-large',fancybox=True, framealpha=0.5)
ax[0,0].set_title('Cases',fontsize='x-large')
ax[0,1].plot(df.index, 100*df['fitted_deaths']/tmp.N, linewidth = 2, label='Deaths', color='red')
ax[0,1].plot(df2.index, 100*df2['fitted_deaths']/tmp.N, linewidth = 2, label='Deaths alt', color='red', linestyle=':')
ax[0,1].axvline(df.index[tmp.T], linewidth = 2, color='gray', linestyle=':')
ax[0,1].legend(loc='lower right',fontsize='x-large',fancybox=True, framealpha=0.5)
ax[0,1].set_title('Deaths',fontsize='x-large')
ax[0,1].annotate(f'$\Delta$Death={d_death}', xy=(0.1,0.9), xycoords='axes fraction')
ax[1,0].plot(df.index, 100*df['fitted_S']/tmp.N, linewidth = 2, label='S ', color='red')
ax[1,0].plot(df2.index, 100*df2['fitted_S']/tmp.N, linewidth = 2, label='S alt', color='red',linestyle=':')
ax[1,0].plot(df.index, 100*df['fitted_V']/tmp.N, linewidth = 2, label='V ', color='green')
ax[1,0].plot(df2.index, 100*df2['fitted_V']/tmp.N, linewidth = 2, label='V alt', color='green',linestyle=':')
ax[1,0].axvline(df.index[tmp.T], linewidth = 2, color='gray', linestyle=':')
ax[1,0].legend(loc='upper right',fontsize='x-large',fancybox=True, framealpha=0.5)
ax[1,0].set_title('Susceptible & Vaccinated',fontsize='x-large')
ax[1,1].plot(df.index, 100*df['fitted_m'], linewidth = 2, label='Mobility', color='red')
ax[1,1].plot(df2.index, 100*df2['fitted_m'], linewidth = 2, label='Mobility alt', color='red', linestyle=':')
ax[1,1].axvline(df.index[tmp.T], linewidth = 2, color='gray', linestyle=':')
ax[1,1].legend(loc='lower right',fontsize='x-large',fancybox=True, framealpha=0.5)
ax[1,1].set_title('Mobility',fontsize='x-large')
ax[1,1].annotate(f'$\Delta$m={d_m}', xy=(0.1,0.9), xycoords='axes fraction')
plt.setp(ax[0,0].get_xticklabels(), rotation=30, horizontalalignment='right')
plt.setp(ax[0,1].get_xticklabels(), rotation=30, horizontalalignment='right')
plt.setp(ax[1,0].get_xticklabels(), rotation=30, horizontalalignment='right')
plt.setp(ax[1,1].get_xticklabels(), rotation=30, horizontalalignment='right')
ax[0,0].set_ylabel('Percent of population')
ax[0,1].set_ylabel('Percent of population')
ax[1,0].set_ylabel('Percent of population')
ax[1,1].set_ylabel('Percent deviations from norm')
fig3.suptitle(f'{c}: {title}',fontsize='x-large')
if saveplot:
Path(f'../pics/fig_{date.today()}').mkdir(exist_ok=True)
fig3.savefig(f'../pics/fig_{date.today()}/fig-{tmp.iso2}-{filename}.png')
# ------------------------------
# x. Diagnostic/Inspect functions
# Plot m_t and gamma_t (data)
def plot_m_gamma(cset=['US','DE']):
no_fig = len(cset)
nrows_ = round(no_fig/2)
ncols_ = 2
fig, ax = plt.subplots(nrows=nrows_, ncols=ncols_, figsize=(14,6*nrows_))
i = 0
j = 0
for c in cset:
tmp = solveCovid(c)
tmp.prelim()
tmp.gamma_t_compute()
ax[i,j].plot(tmp.df2['gamma_sm'], label = 'gamma_sm', color='black')
ax2 = ax[i,j].twinx()
ax2.plot(tmp.df2['google_smooth'], label='mobility',color='blue')
lines, labels = ax[i,j].get_legend_handles_labels()
lines2, labels2 = ax2.get_legend_handles_labels()
ax2.legend(lines + lines2, labels + labels2, loc=0, fontsize='x-large')
plt.setp(ax[i,j].get_xticklabels(), rotation=30, horizontalalignment='right')
ax[i,j].set_title(f'{c}')
if j == 0:
j +=1
else:
j = 0
i +=1
# Plot realised mobility against model-implied I/N
def policy_check():
cset = ['US','DE','FR']
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(15,6), constrained_layout=True)
n = 0
dict = {}
for c in cset:
tmp = solveCovid(c)
tmp.prelim()
param = mod.get_params('round2')['all']
fig_tmp, df = tmp.predict(params=param,plot=False,saveplot=False)
dfa = df[['google_smooth','fitted_I']].dropna()
dfa.loc[:,'fitted_I'] = dfa['fitted_I']/tmp.N
dfa = dfa.iloc[-120:-1]
ax[n].plot(dfa.index, dfa['google_smooth'],label='mobility')
ax2 = ax[n].twinx()
ax2.plot(dfa.index, dfa['fitted_I'],label='infected',color='g')
ax[n].set_title(c,fontsize='x-large')
n +=1
# Regressions on more recent samples
X = sm.add_constant(dfa['fitted_I'])
y = dfa['google_smooth']
model = sm.OLS(y,X).fit()
dict[c]=model.summary()
return dict
| [
"numpy.clip",
"pandas.read_csv",
"numpy.polyfit",
"numpy.log",
"scipy.interpolate.interp1d",
"numpy.array",
"country_converter.convert",
"statsmodels.api.OLS",
"pandas.ExcelWriter",
"numpy.arange",
"scipy.ndimage.filters.uniform_filter1d",
"pandas.date_range",
"numpy.mean",
"pandas.to_date... | [((38757, 38775), 'pandas.DataFrame', 'pd.DataFrame', (['dict'], {}), '(dict)\n', (38769, 38775), True, 'import pandas as pd\n'), ((43864, 43893), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (43876, 43893), True, 'from matplotlib import pyplot as plt\n'), ((44258, 44281), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(20)'}), '(fontsize=20)\n', (44268, 44281), True, 'from matplotlib import pyplot as plt\n'), ((44288, 44311), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(20)'}), '(fontsize=20)\n', (44298, 44311), True, 'from matplotlib import pyplot as plt\n'), ((44453, 44482), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (44465, 44482), True, 'from matplotlib import pyplot as plt\n'), ((45272, 45295), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(20)'}), '(fontsize=20)\n', (45282, 45295), True, 'from matplotlib import pyplot as plt\n'), ((45302, 45325), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(20)'}), '(fontsize=20)\n', (45312, 45325), True, 'from matplotlib import pyplot as plt\n'), ((46695, 46748), 'pandas.read_csv', 'pd.read_csv', (['f"""../output/growth-mob.csv"""'], {'index_col': '(0)'}), "(f'../output/growth-mob.csv', index_col=0)\n", (46706, 46748), True, 'import pandas as pd\n'), ((51451, 51513), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['data'], {'orient': '"""index"""', 'columns': 'data_col'}), "(data, orient='index', columns=data_col)\n", (51473, 51513), True, 'import pandas as pd\n'), ((52858, 52920), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['data'], {'orient': '"""index"""', 'columns': 'data_col'}), "(data, orient='index', columns=data_col)\n", (52880, 52920), True, 'import pandas as pd\n'), ((53423, 53495), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(2)', 'figsize': '(10, 8)', 'constrained_layout': '(True)'}), '(nrows=2, ncols=2, figsize=(10, 8), constrained_layout=True)\n', (53435, 53495), True, 'from matplotlib import pyplot as plt\n'), ((56865, 56931), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'nrows_', 'ncols': 'ncols_', 'figsize': '(14, 6 * nrows_)'}), '(nrows=nrows_, ncols=ncols_, figsize=(14, 6 * nrows_))\n', (56877, 56931), True, 'from matplotlib import pyplot as plt\n'), ((57772, 57844), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(3)', 'figsize': '(15, 6)', 'constrained_layout': '(True)'}), '(nrows=1, ncols=3, figsize=(15, 6), constrained_layout=True)\n', (57784, 57844), True, 'from matplotlib import pyplot as plt\n'), ((6283, 6311), 'pandas.to_datetime', 'pd.to_datetime', (['df2.index[0]'], {}), '(df2.index[0])\n', (6297, 6311), True, 'import pandas as pd\n'), ((6485, 6505), 'numpy.arange', 'np.arange', (['(0)', 'self.T'], {}), '(0, self.T)\n', (6494, 6505), True, 'import numpy as np\n'), ((6537, 6633), 'scipy.interpolate.interp1d', 'interp1d', (['self.t_cases', 'self.mobility_vec'], {'bounds_error': '(False)', 'fill_value': '(0.0)', 'kind': '"""cubic"""'}), "(self.t_cases, self.mobility_vec, bounds_error=False, fill_value=\n 0.0, kind='cubic')\n", (6545, 6633), False, 'from scipy.interpolate import interp1d\n'), ((7273, 7334), 'pandas.date_range', 'pd.date_range', ([], {'start': 'df2.index[0]', 'end': 'default_maxT', 'freq': '"""D"""'}), "(start=df2.index[0], end=default_maxT, freq='D')\n", (7286, 7334), True, 'import pandas as pd\n'), ((13603, 13635), 'numpy.array', 'np.array', (['self.newcases_data_fit'], {}), '(self.newcases_data_fit)\n', (13611, 13635), True, 'import numpy as np\n'), ((13657, 13690), 'numpy.array', 'np.array', (['self.newdeaths_data_fit'], {}), '(self.newdeaths_data_fit)\n', (13665, 13690), True, 'import numpy as np\n'), ((13714, 13765), 'scipy.ndimage.filters.uniform_filter1d', 'uniform_filter1d', (['newcases'], {'size': '(21)', 'mode': '"""nearest"""'}), "(newcases, size=21, mode='nearest')\n", (13730, 13765), False, 'from scipy.ndimage.filters import uniform_filter1d\n'), ((13790, 13842), 'scipy.ndimage.filters.uniform_filter1d', 'uniform_filter1d', (['newdeaths'], {'size': '(21)', 'mode': '"""nearest"""'}), "(newdeaths, size=21, mode='nearest')\n", (13806, 13842), False, 'from scipy.ndimage.filters import uniform_filter1d\n'), ((14093, 14133), 'numpy.append', 'np.append', (['newcases_sm', 'newcases_sm[-2:]'], {}), '(newcases_sm, newcases_sm[-2:])\n', (14102, 14133), True, 'import numpy as np\n'), ((14206, 14247), 'numpy.append', 'np.append', (['newdeaths_sm', 'newdeaths_sm[-1]'], {}), '(newdeaths_sm, newdeaths_sm[-1])\n', (14215, 14247), True, 'import numpy as np\n'), ((14811, 14831), 'numpy.mean', 'np.mean', (['gamma_t_vec'], {}), '(gamma_t_vec)\n', (14818, 14831), True, 'import numpy as np\n'), ((14988, 15046), 'scipy.optimize.dual_annealing', 'dual_annealing', (['self.loss_gamma0'], {'x0': '[5]', 'bounds': '[(1, 50)]'}), '(self.loss_gamma0, x0=[5], bounds=[(1, 50)])\n', (15002, 15046), False, 'from scipy.optimize import dual_annealing\n'), ((15702, 15734), 'numpy.array', 'np.array', (['self.newcases_data_fit'], {}), '(self.newcases_data_fit)\n', (15710, 15734), True, 'import numpy as np\n'), ((15756, 15789), 'numpy.array', 'np.array', (['self.newdeaths_data_fit'], {}), '(self.newdeaths_data_fit)\n', (15764, 15789), True, 'import numpy as np\n'), ((15813, 15864), 'scipy.ndimage.filters.uniform_filter1d', 'uniform_filter1d', (['newcases'], {'size': '(21)', 'mode': '"""nearest"""'}), "(newcases, size=21, mode='nearest')\n", (15829, 15864), False, 'from scipy.ndimage.filters import uniform_filter1d\n'), ((15889, 15941), 'scipy.ndimage.filters.uniform_filter1d', 'uniform_filter1d', (['newdeaths'], {'size': '(21)', 'mode': '"""nearest"""'}), "(newdeaths, size=21, mode='nearest')\n", (15905, 15941), False, 'from scipy.ndimage.filters import uniform_filter1d\n'), ((16357, 16397), 'numpy.append', 'np.append', (['newcases_sm', 'newcases_sm[-2:]'], {}), '(newcases_sm, newcases_sm[-2:])\n', (16366, 16397), True, 'import numpy as np\n'), ((16470, 16511), 'numpy.append', 'np.append', (['newdeaths_sm', 'newdeaths_sm[-1]'], {}), '(newdeaths_sm, newdeaths_sm[-1])\n', (16479, 16511), True, 'import numpy as np\n'), ((18759, 18814), 'scipy.optimize.dual_annealing', 'dual_annealing', (['self.loss_betas'], {'x0': 'x0', 'bounds': 'bounds_0'}), '(self.loss_betas, x0=x0, bounds=bounds_0)\n', (18773, 18814), False, 'from scipy.optimize import dual_annealing\n'), ((19312, 19378), 'scipy.ndimage.filters.uniform_filter1d', 'uniform_filter1d', (["self.df2['gamma_tilde']"], {'size': '(21)', 'mode': '"""reflect"""'}), "(self.df2['gamma_tilde'], size=21, mode='reflect')\n", (19328, 19378), False, 'from scipy.ndimage.filters import uniform_filter1d\n'), ((19981, 20010), 'numpy.clip', 'np.clip', (['best_rho1', '(0.1)', '(0.99)'], {}), '(best_rho1, 0.1, 0.99)\n', (19988, 20010), True, 'import numpy as np\n'), ((20094, 20157), 'numpy.array', 'np.array', (['[beta0, beta1, best_rho1, best_rho2[0], best_rho2[1]]'], {}), '([beta0, beta1, best_rho1, best_rho2[0], best_rho2[1]])\n', (20102, 20157), True, 'import numpy as np\n'), ((22952, 23013), 'pandas.date_range', 'pd.date_range', ([], {'start': 'df2.index[0]', 'end': 'default_maxT', 'freq': '"""D"""'}), "(start=df2.index[0], end=default_maxT, freq='D')\n", (22965, 23013), True, 'import pandas as pd\n'), ((23160, 23176), 'numpy.array', 'np.array', (['x_init'], {}), '(x_init)\n', (23168, 23176), True, 'import numpy as np\n'), ((27506, 27561), 'pandas.DataFrame', 'pd.DataFrame', (['x_data'], {'columns': 'col_temp', 'index': 'df3.index'}), '(x_data, columns=col_temp, index=df3.index)\n', (27518, 27561), True, 'import pandas as pd\n'), ((27904, 27925), 'numpy.array', 'np.array', (['pdth_t_base'], {}), '(pdth_t_base)\n', (27912, 27925), True, 'import numpy as np\n'), ((27956, 27977), 'numpy.array', 'np.array', (['pdth_t_targ'], {}), '(pdth_t_targ)\n', (27964, 27977), True, 'import numpy as np\n'), ((28296, 28368), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(3)', 'figsize': '(15, 8)', 'constrained_layout': '(True)'}), '(nrows=2, ncols=3, figsize=(15, 8), constrained_layout=True)\n', (28308, 28368), True, 'from matplotlib import pyplot as plt\n'), ((33108, 33154), 'country_converter.convert', 'coco.convert', ([], {'names': 'self.iso2', 'to': '"""name_short"""'}), "(names=self.iso2, to='name_short')\n", (33120, 33154), True, 'import country_converter as coco\n'), ((33623, 33696), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(3)', 'ncols': '(2)', 'figsize': '(10, 12)', 'constrained_layout': '(True)'}), '(nrows=3, ncols=2, figsize=(10, 12), constrained_layout=True)\n', (33635, 33696), True, 'from matplotlib import pyplot as plt\n'), ((37895, 37941), 'country_converter.convert', 'coco.convert', ([], {'names': 'self.iso2', 'to': '"""name_short"""'}), "(names=self.iso2, to='name_short')\n", (37907, 37941), True, 'import country_converter as coco\n'), ((39217, 39267), 'numpy.append', 'np.append', (['tmp.best_params', '(1000000000.0 * tmp.phi)'], {}), '(tmp.best_params, 1000000000.0 * tmp.phi)\n', (39226, 39267), True, 'import numpy as np\n'), ((40733, 40783), 'numpy.append', 'np.append', (['tmp.best_params', '(1000000000.0 * tmp.phi)'], {}), '(tmp.best_params, 1000000000.0 * tmp.phi)\n', (40742, 40783), True, 'import numpy as np\n'), ((42711, 42773), 'pandas.ExcelWriter', 'pd.ExcelWriter', (['f"""../output/{out_save_folder}/output_all.xlsx"""'], {}), "(f'../output/{out_save_folder}/output_all.xlsx')\n", (42725, 42773), True, 'import pandas as pd\n'), ((44082, 44095), 'numpy.unique', 'np.unique', (['x1'], {}), '(x1)\n', (44091, 44095), True, 'import numpy as np\n'), ((44685, 44698), 'numpy.unique', 'np.unique', (['x1'], {}), '(x1)\n', (44694, 44698), True, 'import numpy as np\n'), ((45093, 45106), 'numpy.unique', 'np.unique', (['x2'], {}), '(x2)\n', (45102, 45106), True, 'import numpy as np\n'), ((51630, 51698), 'pandas.ExcelWriter', 'pd.ExcelWriter', (['f"""../output/{out_save_folder}/output_condensed.xlsx"""'], {}), "(f'../output/{out_save_folder}/output_condensed.xlsx')\n", (51644, 51698), True, 'import pandas as pd\n'), ((58505, 58537), 'statsmodels.api.add_constant', 'sm.add_constant', (["dfa['fitted_I']"], {}), "(dfa['fitted_I'])\n", (58520, 58537), True, 'import statsmodels.api as sm\n'), ((3307, 3316), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (3313, 3316), True, 'import numpy as np\n'), ((3390, 3399), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (3396, 3399), True, 'import numpy as np\n'), ((3473, 3482), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (3479, 3482), True, 'import numpy as np\n'), ((3597, 3606), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (3603, 3606), True, 'import numpy as np\n'), ((14577, 14603), 'numpy.clip', 'np.clip', (['gamma_t', '(0.01)', '(10)'], {}), '(gamma_t, 0.01, 10)\n', (14584, 14603), True, 'import numpy as np\n'), ((14625, 14645), 'numpy.clip', 'np.clip', (['p_dth', '(0)', '(1)'], {}), '(p_dth, 0, 1)\n', (14632, 14645), True, 'import numpy as np\n'), ((15289, 15336), 'pandas.read_csv', 'pd.read_csv', (['f"""../params/param_fixed/kstar.csv"""'], {}), "(f'../params/param_fixed/kstar.csv')\n", (15300, 15336), True, 'import pandas as pd\n'), ((16882, 16908), 'numpy.clip', 'np.clip', (['gamma_t', '(0.01)', '(10)'], {}), '(gamma_t, 0.01, 10)\n', (16889, 16908), True, 'import numpy as np\n'), ((16930, 16950), 'numpy.clip', 'np.clip', (['p_dth', '(0)', '(1)'], {}), '(p_dth, 0, 1)\n', (16937, 16950), True, 'import numpy as np\n'), ((17794, 17816), 'numpy.array', 'np.array', (['self.DHR_vec'], {}), '(self.DHR_vec)\n', (17802, 17816), True, 'import numpy as np\n'), ((17819, 17841), 'numpy.array', 'np.array', (['self.DHD_vec'], {}), '(self.DHD_vec)\n', (17827, 17841), True, 'import numpy as np\n'), ((18313, 18332), 'numpy.exp', 'np.exp', (['(beta1 * m_t)'], {}), '(beta1 * m_t)\n', (18319, 18332), True, 'import numpy as np\n'), ((19176, 19195), 'numpy.exp', 'np.exp', (['(beta1 * m_t)'], {}), '(beta1 * m_t)\n', (19182, 19195), True, 'import numpy as np\n'), ((23806, 23865), 'pandas.read_csv', 'pd.read_csv', (['f"""../params/{param_load_folder}/param_est.csv"""'], {}), "(f'../params/{param_load_folder}/param_est.csv')\n", (23817, 23865), True, 'import pandas as pd\n'), ((39401, 39421), 'pandas.DataFrame', 'pd.DataFrame', (['p_dict'], {}), '(p_dict)\n', (39413, 39421), True, 'import pandas as pd\n'), ((42503, 42523), 'pandas.DataFrame', 'pd.DataFrame', (['p_dict'], {}), '(p_dict)\n', (42515, 42523), True, 'import pandas as pd\n'), ((44143, 44156), 'numpy.unique', 'np.unique', (['x1'], {}), '(x1)\n', (44152, 44156), True, 'import numpy as np\n'), ((44746, 44759), 'numpy.unique', 'np.unique', (['x1'], {}), '(x1)\n', (44755, 44759), True, 'import numpy as np\n'), ((45154, 45167), 'numpy.unique', 'np.unique', (['x2'], {}), '(x2)\n', (45163, 45167), True, 'import numpy as np\n'), ((15220, 15260), 'pathlib.Path', 'Path', (['f"""../params/param_fixed/kstar.csv"""'], {}), "(f'../params/param_fixed/kstar.csv')\n", (15224, 15260), False, 'from pathlib import Path\n'), ((19752, 19810), 'statsmodels.api.OLS', 'sm.OLS', (['y', "self.df2['gamma_tilde_sm_lag1']"], {'missing': '"""drop"""'}), "(y, self.df2['gamma_tilde_sm_lag1'], missing='drop')\n", (19758, 19810), True, 'import statsmodels.api as sm\n'), ((19834, 19922), 'statsmodels.api.OLS', 'sm.OLS', (['y', "self.df2[['gamma_tilde_sm_lag1', 'gamma_tilde_sm_lag2']]"], {'missing': '"""drop"""'}), "(y, self.df2[['gamma_tilde_sm_lag1', 'gamma_tilde_sm_lag2']], missing\n ='drop')\n", (19840, 19922), True, 'import statsmodels.api as sm\n'), ((26573, 26614), 'numpy.append', 'np.append', (['gamma_tilde_sm_fc', 'gamma_tilde'], {}), '(gamma_tilde_sm_fc, gamma_tilde)\n', (26582, 26614), True, 'import numpy as np\n'), ((26649, 26687), 'numpy.append', 'np.append', (['gamma_tilde_fc', 'gamma_tilde'], {}), '(gamma_tilde_fc, gamma_tilde)\n', (26658, 26687), True, 'import numpy as np\n'), ((26720, 26756), 'numpy.append', 'np.append', (['gamma_mob_fc', 'gamma_mob_t'], {}), '(gamma_mob_fc, gamma_mob_t)\n', (26729, 26756), True, 'import numpy as np\n'), ((26783, 26807), 'numpy.append', 'np.append', (['mob_fc', 'mob_t'], {}), '(mob_fc, mob_t)\n', (26792, 26807), True, 'import numpy as np\n'), ((26837, 26865), 'numpy.append', 'np.append', (['pdth_t_fc', 'pdth_t'], {}), '(pdth_t_fc, pdth_t)\n', (26846, 26865), True, 'import numpy as np\n'), ((44120, 44141), 'numpy.polyfit', 'np.polyfit', (['x1', 'y1', '(1)'], {}), '(x1, y1, 1)\n', (44130, 44141), True, 'import numpy as np\n'), ((44723, 44744), 'numpy.polyfit', 'np.polyfit', (['x1', 'y1', '(1)'], {}), '(x1, y1, 1)\n', (44733, 44744), True, 'import numpy as np\n'), ((45131, 45152), 'numpy.polyfit', 'np.polyfit', (['x2', 'y2', '(1)'], {}), '(x2, y2, 1)\n', (45141, 45152), True, 'import numpy as np\n'), ((58589, 58601), 'statsmodels.api.OLS', 'sm.OLS', (['y', 'X'], {}), '(y, X)\n', (58595, 58601), True, 'import statsmodels.api as sm\n'), ((4118, 4127), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (4124, 4127), True, 'import numpy as np\n'), ((20834, 20851), 'numpy.exp', 'np.exp', (['(beta1 * m)'], {}), '(beta1 * m)\n', (20840, 20851), True, 'import numpy as np\n'), ((26422, 26443), 'numpy.exp', 'np.exp', (['(beta1 * mob_t)'], {}), '(beta1 * mob_t)\n', (26428, 26443), True, 'import numpy as np\n'), ((56587, 56599), 'datetime.date.today', 'date.today', ([], {}), '()\n', (56597, 56599), False, 'from datetime import date, datetime, timedelta\n'), ((22273, 22296), 'numpy.linspace', 'np.linspace', (['(-1)', '(0)', '(101)'], {}), '(-1, 0, 101)\n', (22284, 22296), True, 'import numpy as np\n'), ((27290, 27306), 'numpy.array', 'np.array', (['x_next'], {}), '(x_next)\n', (27298, 27306), True, 'import numpy as np\n'), ((33377, 33389), 'datetime.date.today', 'date.today', ([], {}), '()\n', (33387, 33389), False, 'from datetime import date, datetime, timedelta\n'), ((38122, 38134), 'datetime.date.today', 'date.today', ([], {}), '()\n', (38132, 38134), False, 'from datetime import date, datetime, timedelta\n'), ((22540, 22557), 'numpy.abs', 'np.abs', (['(RHS - LHS)'], {}), '(RHS - LHS)\n', (22546, 22557), True, 'import numpy as np\n'), ((56513, 56525), 'datetime.date.today', 'date.today', ([], {}), '()\n', (56523, 56525), False, 'from datetime import date, datetime, timedelta\n'), ((20916, 20933), 'numpy.exp', 'np.exp', (['(beta1 * m)'], {}), '(beta1 * m)\n', (20922, 20933), True, 'import numpy as np\n'), ((22371, 22392), 'numpy.exp', 'np.exp', (['(beta1 * m_set)'], {}), '(beta1 * m_set)\n', (22377, 22392), True, 'import numpy as np\n'), ((33300, 33312), 'datetime.date.today', 'date.today', ([], {}), '()\n', (33310, 33312), False, 'from datetime import date, datetime, timedelta\n'), ((38045, 38057), 'datetime.date.today', 'date.today', ([], {}), '()\n', (38055, 38057), False, 'from datetime import date, datetime, timedelta\n'), ((22457, 22478), 'numpy.exp', 'np.exp', (['(beta1 * m_set)'], {}), '(beta1 * m_set)\n', (22463, 22478), True, 'import numpy as np\n')] |
import numpy as np
from sklearn.datasets import load_iris
from sklearn.ensemble import RandomForestRegressor
iris = load_iris()
rf = RandomForestRegressor(random_state = 35)
from sklearn.model_selection import RandomizedSearchCV
X = iris.data
y = iris.target
n_estimators = [int(x) for x in np.linspace(start = 1, stop = 20, num = 20)]
max_features = ['auto', 'sqrt']
max_depth = [int(x) for x in np.linspace(10, 120, num = 12)]
min_samples_split = [2, 6, 10]
min_samples_leaf = [1, 3, 4]
bootstrap = [True, False]
random_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'bootstrap': bootstrap}
rf_random = RandomizedSearchCV(estimator = rf, param_distributions = random_grid, n_iter = 100, cv = 5, verbose=2, random_state=35, n_jobs = -1)
rf_random.fit(X,y)
# this prints the contents of the parameters in the random grid
print ('Random grid: ', random_grid, '\n')
# print the best parameters
print ('Best Parameters: ', rf_random.best_params_, ' \n') | [
"sklearn.datasets.load_iris",
"numpy.linspace",
"sklearn.ensemble.RandomForestRegressor",
"sklearn.model_selection.RandomizedSearchCV"
] | [((116, 127), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (125, 127), False, 'from sklearn.datasets import load_iris\n'), ((133, 171), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'random_state': '(35)'}), '(random_state=35)\n', (154, 171), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((741, 868), 'sklearn.model_selection.RandomizedSearchCV', 'RandomizedSearchCV', ([], {'estimator': 'rf', 'param_distributions': 'random_grid', 'n_iter': '(100)', 'cv': '(5)', 'verbose': '(2)', 'random_state': '(35)', 'n_jobs': '(-1)'}), '(estimator=rf, param_distributions=random_grid, n_iter=\n 100, cv=5, verbose=2, random_state=35, n_jobs=-1)\n', (759, 868), False, 'from sklearn.model_selection import RandomizedSearchCV\n'), ((296, 333), 'numpy.linspace', 'np.linspace', ([], {'start': '(1)', 'stop': '(20)', 'num': '(20)'}), '(start=1, stop=20, num=20)\n', (307, 333), True, 'import numpy as np\n'), ((402, 430), 'numpy.linspace', 'np.linspace', (['(10)', '(120)'], {'num': '(12)'}), '(10, 120, num=12)\n', (413, 430), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.
import itertools
from typing import Optional
import numpy as np
import math
import paddle.distributed as dist
from paddle.io import Sampler, BatchSampler
class DistributedBatchSampler(BatchSampler):
def __init__(self,
dataset,
batch_size,
num_replicas=None,
rank=None,
shuffle=False,
drop_last=False):
self.dataset = dataset
assert isinstance(batch_size, int) and batch_size > 0, \
"batch_size should be a positive integer"
self.batch_size = batch_size
assert isinstance(shuffle, bool), \
"shuffle should be a boolean value"
self.shuffle = shuffle
assert isinstance(drop_last, bool), \
"drop_last should be a boolean number"
from paddle.fluid.dygraph.parallel import ParallelEnv
if num_replicas is not None:
assert isinstance(num_replicas, int) and num_replicas > 0, \
"num_replicas should be a positive integer"
self.nranks = num_replicas
else:
self.nranks = ParallelEnv().nranks
if rank is not None:
assert isinstance(rank, int) and rank >= 0, \
"rank should be a non-negative integer"
self.local_rank = rank
else:
self.local_rank = ParallelEnv().local_rank
self.drop_last = drop_last
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.nranks))
self.total_size = self.num_samples * self.nranks
def __iter__(self):
num_samples = len(self.dataset)
indices = np.arange(num_samples).tolist()
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
if self.shuffle:
np.random.RandomState(self.epoch).shuffle(indices)
self.epoch += 1
# subsample
def _get_indices_by_batch_size(indices):
subsampled_indices = []
last_batch_size = self.total_size % (self.batch_size * self.nranks)
assert last_batch_size % self.nranks == 0
last_local_batch_size = last_batch_size // self.nranks
for i in range(self.local_rank * self.batch_size,
len(indices) - last_batch_size,
self.batch_size * self.nranks):
subsampled_indices.extend(indices[i:i + self.batch_size])
indices = indices[len(indices) - last_batch_size:]
subsampled_indices.extend(indices[
self.local_rank * last_local_batch_size:(
self.local_rank + 1) * last_local_batch_size])
return subsampled_indices
if self.nranks > 1:
indices = _get_indices_by_batch_size(indices)
assert len(indices) == self.num_samples
_sample_iter = iter(indices)
batch_indices = []
for idx in self._infinite_indices():
batch_indices.append(idx)
if len(batch_indices) == self.batch_size:
yield batch_indices
batch_indices = []
if not self.drop_last and len(batch_indices) > 0:
yield batch_indices
def __len__(self):
num_samples = self.num_samples
num_samples += int(not self.drop_last) * (self.batch_size - 1)
return num_samples // self.batch_size
def _infinite_indices(self):
np.random.seed(1)
while True:
if self.shuffle:
yield from np.random.permutation(len(self.dataset))
else:
yield from np.arange(len(self.dataset))
def set_epoch(self, epoch):
self.epoch = epoch
class DistributedYoloBatchSampler(DistributedBatchSampler):
"""
This batch sampler will generate mini-batches of (mosaic, index) tuples from another sampler.
It works just like the :class:`paddle.io.BatchSampler`,
but it will turn on/off the mosaic aug.
"""
def __init__(self, *args, mosaic=True, **kwargs):
super().__init__(*args, **kwargs)
self.mosaic = mosaic
def __iter__(self):
for batch in super().__iter__():
yield [(self.mosaic, idx) for idx in batch]
class YoloBatchSampler(BatchSampler):
"""
This batch sampler will generate mini-batches of (mosaic, index) tuples from another sampler.
It works just like the :class:`paddle.io.BatchSampler`,
but it will turn on/off the mosaic aug.
"""
def __init__(self, *args, mosaic=True, **kwargs):
super().__init__(*args, **kwargs)
self.mosaic = mosaic
def __iter__(self):
for batch in super().__iter__():
yield [(self.mosaic, idx) for idx in batch]
class InfiniteSampler(Sampler):
"""
In training, we only care about the "infinite stream" of training data.
So this sampler produces an infinite stream of indices and
all workers cooperate to correctly shuffle the indices and sample different indices.
The samplers in each worker effectively produces `indices[worker_id::num_workers]`
where `indices` is an infinite stream of indices consisting of
`shuffle(range(size)) + shuffle(range(size)) + ...` (if shuffle is True)
or `range(size) + range(size) + ...` (if shuffle is False)
"""
def __init__(
self,
size: int,
shuffle: bool = True,
seed: Optional[int] = 0,
rank=0,
world_size=1,
):
"""
Args:
size (int): the total number of data of the underlying dataset to sample from
shuffle (bool): whether to shuffle the indices or not
seed (int): the initial seed of the shuffle. Must be the same
across all workers. If None, will use a random seed shared
among workers (require synchronization among all workers).
"""
self._size = size
assert size > 0
self._shuffle = shuffle
self._seed = int(seed)
self._rank = dist.get_rank()
self._world_size = dist.get_world_size()
def __iter__(self):
start = self._rank
yield from itertools.islice(
self._infinite_indices(), start, None, self._world_size
)
def _infinite_indices(self):
np.random.seed(self._seed)
while True:
if self._shuffle:
yield from np.random.permutation(self._size)
else:
yield from np.arange(self._size)
def __len__(self):
return self._size // self._world_size
| [
"paddle.distributed.get_rank",
"paddle.fluid.dygraph.parallel.ParallelEnv",
"paddle.distributed.get_world_size",
"numpy.random.seed",
"numpy.random.RandomState",
"numpy.arange",
"numpy.random.permutation"
] | [((3615, 3632), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (3629, 3632), True, 'import numpy as np\n'), ((6203, 6218), 'paddle.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (6216, 6218), True, 'import paddle.distributed as dist\n'), ((6246, 6267), 'paddle.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (6265, 6267), True, 'import paddle.distributed as dist\n'), ((6477, 6503), 'numpy.random.seed', 'np.random.seed', (['self._seed'], {}), '(self._seed)\n', (6491, 6503), True, 'import numpy as np\n'), ((1244, 1257), 'paddle.fluid.dygraph.parallel.ParallelEnv', 'ParallelEnv', ([], {}), '()\n', (1255, 1257), False, 'from paddle.fluid.dygraph.parallel import ParallelEnv\n'), ((1492, 1505), 'paddle.fluid.dygraph.parallel.ParallelEnv', 'ParallelEnv', ([], {}), '()\n', (1503, 1505), False, 'from paddle.fluid.dygraph.parallel import ParallelEnv\n'), ((1797, 1819), 'numpy.arange', 'np.arange', (['num_samples'], {}), '(num_samples)\n', (1806, 1819), True, 'import numpy as np\n'), ((1975, 2008), 'numpy.random.RandomState', 'np.random.RandomState', (['self.epoch'], {}), '(self.epoch)\n', (1996, 2008), True, 'import numpy as np\n'), ((6581, 6614), 'numpy.random.permutation', 'np.random.permutation', (['self._size'], {}), '(self._size)\n', (6602, 6614), True, 'import numpy as np\n'), ((6660, 6681), 'numpy.arange', 'np.arange', (['self._size'], {}), '(self._size)\n', (6669, 6681), True, 'import numpy as np\n')] |
import cv2
import time # Remove Later
import numpy as np
video = cv2.VideoCapture("./img/vert2.mp4")
target_low = (0, 0, 0)
target_high = (50, 50, 50)
while True:
ret, frame = video.read()
if not ret:
video = cv2.VideoCapture("./img/vert2.mp4")
continue
image = frame
image = cv2.resize(image, (0,0), fx=0.25, fy=0.25)
image = cv2.GaussianBlur(image, (5,5), 3)
Blackline= cv2.inRange(image, target_low, target_high)
kernel = np.ones((3,3), np.uint8)
Blackline = cv2.erode(Blackline, kernel, iterations=1) # Remove noise
Blackline = cv2.dilate(Blackline, kernel, iterations=9) # Restore box sizes
contours, hierarchy = cv2.findContours(Blackline.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(image, contours, -1, (0, 200, 0), 3)
for c in contours:
x,y,w,h = cv2.boundingRect(c)
cv2.rectangle(image, (x, y), (x+w, y+h), (0, 0, 255), 3)
cv2.line(image, (x+(w//2), 200), (x+(w//2), 250),(255,0,0),3)
cv2.imshow("orginal with line", image)
time.sleep(0.025)
key = cv2.waitKey(1)
if key == 27:
break
cv2.waitKey(0)
cv2.destroyAllWindows() | [
"cv2.rectangle",
"cv2.drawContours",
"numpy.ones",
"cv2.dilate",
"cv2.inRange",
"cv2.erode",
"cv2.line",
"time.sleep",
"cv2.imshow",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.resize",
"cv2.GaussianBlur",
"cv2.waitKey",
"cv2.boundingRect"
] | [((66, 101), 'cv2.VideoCapture', 'cv2.VideoCapture', (['"""./img/vert2.mp4"""'], {}), "('./img/vert2.mp4')\n", (82, 101), False, 'import cv2\n'), ((1154, 1168), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1165, 1168), False, 'import cv2\n'), ((1169, 1192), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1190, 1192), False, 'import cv2\n'), ((311, 354), 'cv2.resize', 'cv2.resize', (['image', '(0, 0)'], {'fx': '(0.25)', 'fy': '(0.25)'}), '(image, (0, 0), fx=0.25, fy=0.25)\n', (321, 354), False, 'import cv2\n'), ((366, 400), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['image', '(5, 5)', '(3)'], {}), '(image, (5, 5), 3)\n', (382, 400), False, 'import cv2\n'), ((416, 459), 'cv2.inRange', 'cv2.inRange', (['image', 'target_low', 'target_high'], {}), '(image, target_low, target_high)\n', (427, 459), False, 'import cv2\n'), ((473, 498), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.uint8'], {}), '((3, 3), np.uint8)\n', (480, 498), True, 'import numpy as np\n'), ((514, 556), 'cv2.erode', 'cv2.erode', (['Blackline', 'kernel'], {'iterations': '(1)'}), '(Blackline, kernel, iterations=1)\n', (523, 556), False, 'import cv2\n'), ((597, 640), 'cv2.dilate', 'cv2.dilate', (['Blackline', 'kernel'], {'iterations': '(9)'}), '(Blackline, kernel, iterations=9)\n', (607, 640), False, 'import cv2\n'), ((772, 825), 'cv2.drawContours', 'cv2.drawContours', (['image', 'contours', '(-1)', '(0, 200, 0)', '(3)'], {}), '(image, contours, -1, (0, 200, 0), 3)\n', (788, 825), False, 'import cv2\n'), ((1033, 1071), 'cv2.imshow', 'cv2.imshow', (['"""orginal with line"""', 'image'], {}), "('orginal with line', image)\n", (1043, 1071), False, 'import cv2\n'), ((1077, 1094), 'time.sleep', 'time.sleep', (['(0.025)'], {}), '(0.025)\n', (1087, 1094), False, 'import time\n'), ((1105, 1119), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1116, 1119), False, 'import cv2\n'), ((227, 262), 'cv2.VideoCapture', 'cv2.VideoCapture', (['"""./img/vert2.mp4"""'], {}), "('./img/vert2.mp4')\n", (243, 262), False, 'import cv2\n'), ((869, 888), 'cv2.boundingRect', 'cv2.boundingRect', (['c'], {}), '(c)\n', (885, 888), False, 'import cv2\n'), ((901, 961), 'cv2.rectangle', 'cv2.rectangle', (['image', '(x, y)', '(x + w, y + h)', '(0, 0, 255)', '(3)'], {}), '(image, (x, y), (x + w, y + h), (0, 0, 255), 3)\n', (914, 961), False, 'import cv2\n'), ((966, 1035), 'cv2.line', 'cv2.line', (['image', '(x + w // 2, 200)', '(x + w // 2, 250)', '(255, 0, 0)', '(3)'], {}), '(image, (x + w // 2, 200), (x + w // 2, 250), (255, 0, 0), 3)\n', (974, 1035), False, 'import cv2\n')] |
# -*- coding: utf-8 -*-
##########################################################################
# pySAP - Copyright (C) CEA, 2017 - 2018
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
This module contains linears operators classes.
"""
# Package import
import pysap
from pysap.base.utils import flatten
from pysap.base.utils import unflatten
# Third party import
import numpy
class Wavelet2(object):
""" The 2D wavelet transform class.
"""
def __init__(self, wavelet_name, nb_scale=4, verbose=0):
""" Initialize the 'Wavelet2' class.
Parameters
----------
wavelet_name: str
the wavelet name to be used during the decomposition.
nb_scales: int, default 4
the number of scales in the decomposition.
verbose: int, default 0
the verbosity level.
"""
self.nb_scale = nb_scale
if wavelet_name not in pysap.AVAILABLE_TRANSFORMS:
raise ValueError(
"Unknown transformation '{0}'.".format(wavelet_name))
transform_klass = pysap.load_transform(wavelet_name)
self.transform = transform_klass(
nb_scale=self.nb_scale, verbose=verbose)
self.coeffs_shape = None
def op(self, data):
""" Define the wavelet operator.
This method returns the input data convolved with the wavelet filter.
Parameters
----------
data: ndarray or Image
input 2D data array.
Returns
-------
coeffs: ndarray
the wavelet coefficients.
"""
if isinstance(data, numpy.ndarray):
data = pysap.Image(data=data)
self.transform.data = data
self.transform.analysis()
coeffs, self.coeffs_shape = flatten(self.transform.analysis_data)
return coeffs
def adj_op(self, coeffs, dtype="array"):
""" Define the wavelet adjoint operator.
This method returns the reconsructed image.
Parameters
----------
coeffs: ndarray
the wavelet coefficients.
dtype: str, default 'array'
if 'array' return the data as a ndarray, otherwise return a
pysap.Image.
Returns
-------
data: ndarray
the reconstructed data.
"""
self.transform.analysis_data = unflatten(coeffs, self.coeffs_shape)
image = self.transform.synthesis()
if dtype == "array":
return image.data
return image
def l2norm(self, shape):
""" Compute the L2 norm.
Parameters
----------
shape: uplet
the data shape.
Returns
-------
norm: float
the L2 norm.
"""
# Create fake data
shape = numpy.asarray(shape)
shape += shape % 2
fake_data = numpy.zeros(shape)
fake_data[list(zip(shape // 2))] = 1
# Call mr_transform
data = self.op(fake_data)
# Compute the L2 norm
return numpy.linalg.norm(data)
| [
"pysap.Image",
"pysap.base.utils.flatten",
"numpy.asarray",
"pysap.base.utils.unflatten",
"pysap.load_transform",
"numpy.zeros",
"numpy.linalg.norm"
] | [((1312, 1346), 'pysap.load_transform', 'pysap.load_transform', (['wavelet_name'], {}), '(wavelet_name)\n', (1332, 1346), False, 'import pysap\n'), ((2021, 2058), 'pysap.base.utils.flatten', 'flatten', (['self.transform.analysis_data'], {}), '(self.transform.analysis_data)\n', (2028, 2058), False, 'from pysap.base.utils import flatten\n'), ((2605, 2641), 'pysap.base.utils.unflatten', 'unflatten', (['coeffs', 'self.coeffs_shape'], {}), '(coeffs, self.coeffs_shape)\n', (2614, 2641), False, 'from pysap.base.utils import unflatten\n'), ((3049, 3069), 'numpy.asarray', 'numpy.asarray', (['shape'], {}), '(shape)\n', (3062, 3069), False, 'import numpy\n'), ((3117, 3135), 'numpy.zeros', 'numpy.zeros', (['shape'], {}), '(shape)\n', (3128, 3135), False, 'import numpy\n'), ((3290, 3313), 'numpy.linalg.norm', 'numpy.linalg.norm', (['data'], {}), '(data)\n', (3307, 3313), False, 'import numpy\n'), ((1893, 1915), 'pysap.Image', 'pysap.Image', ([], {'data': 'data'}), '(data=data)\n', (1904, 1915), False, 'import pysap\n')] |
from copy import copy, deepcopy
import numpy as np
from unittest import TestCase
from transition_system.arc_eager import ArcEager, ArcEagerDynamicOracle
def generate_all_projective_parses(size):
arc_eager = ArcEager(1)
initial = arc_eager.state(size)
stack = []
stack.append(initial)
parses = set()
while len(stack):
state = stack.pop()
if arc_eager.is_final(state):
heads, labels = arc_eager.extract_parse(state)
parses.add(tuple(heads))
else:
for action in arc_eager.allowed(state):
state_copy = deepcopy(state)
arc_eager.perform(state_copy, action)
stack.append(state_copy)
return parses
class MockSentence:
def __init__(self, num_tokens):
self.adjacency = np.zeros((num_tokens, num_tokens), dtype=bool)
class TestArcEager(TestCase):
def test_dynamic_oracle_is_complete(self):
SIZE = 4
arc_eager = ArcEager(1)
dyn_oracle = ArcEagerDynamicOracle()
valid_parses = generate_all_projective_parses(SIZE)
for valid_parse in valid_parses:
sent = MockSentence(len(valid_parse) + 1)
for v, u in enumerate(valid_parse):
sent.adjacency[u, v] = True
state = arc_eager.state(SIZE)
while not arc_eager.is_final(state):
allowed_actions = arc_eager.allowed(state)
costs = dyn_oracle(state, sent, allowed_actions)
self.assertEqual(costs.min(), 0)
index = costs.argmin()
arc_eager.perform(state, allowed_actions[index])
heads, labels = arc_eager.extract_parse(state)
self.assertEqual(tuple(heads), valid_parse) | [
"numpy.zeros",
"transition_system.arc_eager.ArcEagerDynamicOracle",
"transition_system.arc_eager.ArcEager",
"copy.deepcopy"
] | [((214, 225), 'transition_system.arc_eager.ArcEager', 'ArcEager', (['(1)'], {}), '(1)\n', (222, 225), False, 'from transition_system.arc_eager import ArcEager, ArcEagerDynamicOracle\n'), ((814, 860), 'numpy.zeros', 'np.zeros', (['(num_tokens, num_tokens)'], {'dtype': 'bool'}), '((num_tokens, num_tokens), dtype=bool)\n', (822, 860), True, 'import numpy as np\n'), ((978, 989), 'transition_system.arc_eager.ArcEager', 'ArcEager', (['(1)'], {}), '(1)\n', (986, 989), False, 'from transition_system.arc_eager import ArcEager, ArcEagerDynamicOracle\n'), ((1011, 1034), 'transition_system.arc_eager.ArcEagerDynamicOracle', 'ArcEagerDynamicOracle', ([], {}), '()\n', (1032, 1034), False, 'from transition_system.arc_eager import ArcEager, ArcEagerDynamicOracle\n'), ((602, 617), 'copy.deepcopy', 'deepcopy', (['state'], {}), '(state)\n', (610, 617), False, 'from copy import copy, deepcopy\n')] |
"""
DNN Modules
The feed backward will be completed in the batch-wise operation
"""
import math
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from qtorch.quant import float_quantize
from torch.nn import init
from .function import *
class Conv2d(nn.Module):
def __init__(self, in_channels: int, out_channels: int, kernel_size: int, stride=1, padding=0,
dilation=1, groups=1, bias=False, lr=0.1, momentum=0.9, use_relu=True, relu_inplace=True, low_precision=False):
super(Conv2d, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.bias = bias
self.lp = low_precision
if self.lp:
# low precision values
self.w_man = 2
self.w_exp = 5
self.g_man = 2
self.g_exp = 5
self.x_man = 2
self.x_exp = 5
# accumulation precisions
self.y_exp = 5
self.y_man = 10
self.c_tc = 8 # tensor core channels
# initialize
fan_in = out_channels * kernel_size * kernel_size
weight_std = np.sqrt(2. / fan_in)
self.weight= torch.empty(out_channels, in_channels, kernel_size, kernel_size).normal_(mean=0.0, std=weight_std).cuda()
if bias:
self.bias = torch.empty(out_channels).normal_(mean=0.0, std=weight_std).cuda()
else:
self.bias = torch.zeros(out_channels).cuda()
# convolution
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
# gradient
self.w_grad = torch.zeros_like(self.weight).cuda()
# SGD with momentum
self.momentum = momentum
self.lr = lr
self.w_vel = torch.zeros_like(self.weight).cuda()
def conv(self, input):
c_out, c_in, k, _ = self.weight.size()
if c_in <= 3:
self.c_tc = c_in
c_iter = c_in // self.c_tc
# compute the original output
odim = math.floor((input.size(2) + 2*self.padding - self.dilation * (self.weight.size(2)-1)-1)/self.stride + 1)
y_total = torch.zeros((input.size(0), self.weight.size(0), odim, odim)).cuda()
# low precision floating point
if self.lp:
self.weight = float_quantize(self.weight.data, exp=self.w_exp, man=self.w_man, rounding="nearest")
self.input = float_quantize(self.input, exp=self.x_exp, man=self.x_man, rounding="nearest")
# if c_in != 3:
# self.input = float_quantize(self.input, exp=self.x_exp, man=self.x_man, rounding="nearest")
# else:
# self.input = self.input
for ii in range(c_iter):
maskc = torch.zeros_like(self.weight.data)
maskc[:, ii*self.c_tc:(ii+1)*self.c_tc, :, :] = 1 # select 8 input channels
for ih in range(k):
for iw in range(k):
maskk = torch.zeros_like(self.weight.data)
maskk[:,:,ih,iw] = 1
mask = maskc * maskk # combined mask
# low precision output
y = F.conv2d(self.input, self.weight.data*mask, self.bias, self.stride, self.padding, self.dilation, self.groups)
# high precision accumulation
y_total += y
if self.lp:
y_total = float_quantize(y_total, exp=self.y_exp, man=self.y_man, rounding="nearest")
return y_total
def forward(self, input: Tensor):
self.input = input.cuda()
# convolution
self.out = self.conv(self.input)
if self.lp:
self.out = float_quantize(self.out, exp=self.x_exp, man=self.x_man, rounding="nearest")
return self.out
def zero_grad(self):
self.w_grad.fill_(0.)
def feed_backward(self, output_grad):
r"""
Gradient computation based on 1 image
"""
if len(output_grad.size()) < 4:
output_grad.unsqueeze(0)
output_grad_t = output_grad.transpose(0,1)
input_i = self.input
input_i_t = input_i.transpose(0,1)
# flip the weight
weight_flip = torch.flip(self.weight, [2,3])
weight_t = weight_flip.transpose(0,1)
dout = F.conv2d(output_grad, weight_t, stride=self.stride, padding=self.padding)
# output gradient
if self.lp:
dout = float_quantize(dout, exp=self.g_exp, man=self.g_man, rounding="nearest")
# weight gradient accumulation
if self.lp:
dw = torch.zeros_like(self.weight).transpose(0,1)
for batch_idx in range(output_grad.size(0)):
input_i = self.input[batch_idx].unsqueeze(0)
output_grad_i = output_grad[batch_idx].unsqueeze(0)
input_i_t = input_i.transpose(0,1)
output_grad_i_t = output_grad_i.transpose(0,1)
dwi = F.conv2d(input_i_t, output_grad_i_t, stride=self.stride, padding=self.padding)
dw += dwi
dw = float_quantize(dw, exp=self.y_exp, man=self.y_man, rounding="nearest")
else:
dw = F.conv2d(input_i_t, output_grad_t, stride=self.stride, padding=self.padding)
self.w_grad = dw.transpose(0,1)
return dout
def weight_update(self):
self.w_vel = self.momentum * self.w_vel + self.w_grad
# low precision velocity
self.weight_old = self.weight.clone()
if self.lp:
self.w_vel = float_quantize(self.w_vel, exp=self.g_exp, man=self.g_man, rounding="nearest")
self.weight -= self.lr * self.w_vel
def extra_repr(self):
return super(Conv2d, self).extra_repr() + 'in_channels={}, out_channels={}, kernel_size={}, stride={}, padding={}, lp={}'.format(self.in_channels,
self.out_channels, self.kernel_size, self.stride, self.padding, self.lp)
class Linear(nn.Module):
def __init__(self, in_features, out_features, bias=True, lr=0.1, momentum=0.9, low_precision=False):
super(Linear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.lp = low_precision
if self.lp:
# low precision values
self.w_man = 2
self.w_exp = 5
self.g_man = 2
self.g_exp = 5
self.x_man = 2
self.x_exp = 5
# accumulation precisions
self.y_exp = 5
self.y_man = 10
weight_bound = np.sqrt(6. / (in_features + out_features))
self.weight = torch.empty(out_features, in_features).uniform_(-weight_bound, weight_bound).cuda()
if bias:
self.bias = torch.empty(out_features).uniform_(-weight_bound, weight_bound).cuda()
else:
self.bias = torch.zeros(out_features).cuda()
# Gradient
self.w_grad = torch.zeros_like(self.weight).cuda()
self.b_grad = torch.zeros_like(self.bias).cuda()
# SGD with momentum
self.momentum = momentum
self.lr = lr
self.w_vel = torch.zeros_like(self.weight).cuda()
self.b_vel = torch.zeros_like(self.bias).cuda()
def zero_grad(self):
self.w_grad.fill_(0.)
self.b_grad.fill_(0.)
def forward(self, input):
self.input = input.cuda()
# low precision floating point
if self.lp:
self.weight = float_quantize(self.weight.data, exp=self.w_exp, man=self.w_man, rounding="nearest")
self.bias = float_quantize(self.bias.data, exp=self.w_exp, man=self.w_man, rounding="nearest")
self.input = float_quantize(self.input, exp=self.x_exp, man=self.x_man, rounding="nearest")
self.out = F.linear(self.input, self.weight, self.bias)
if self.lp:
self.out = float_quantize(self.out, exp=self.y_exp, man=self.y_man, rounding="nearest")
return self.out
def feed_backward(self, out_gradient):
r"""
Gradient computation based on 1 image
dw = out_grad.T @ input
db = out_grad.sum()
out_grad: (1, out_features)
input: (1, in_features)
"""
out_grad_transpose = out_gradient.transpose(0,1)
self.w_grad = torch.matmul(out_grad_transpose, self.input)
self.b_grad = out_gradient.sum(dim=0).view(self.bias.size())
# output gradient
dout = torch.matmul(out_gradient, self.weight.data)
if self.lp:
dout = float_quantize(dout, exp=self.g_exp, man=self.g_man, rounding="nearest")
return dout
def weight_update(self):
r"""
Update the weight after the gradient accumulation
"""
self.w_vel = self.momentum * self.w_vel + self.w_grad
self.b_vel = self.momentum * self.b_vel + self.b_grad
self.weight_old = self.weight.clone()
self.bias_old = self.bias.clone()
self.weight -= self.lr * self.w_vel
self.bias -= self.lr * self.b_vel
def extra_repr(self):
return super(Linear, self).extra_repr() + 'in_features={}, out_features={}, lp={}'.format(self.in_features, self.out_features, self.lp)
class MaxPool2d(nn.Module):
r"""
Implementing max pooling with pytorch function
"""
def __init__(self, kernel_size, stride, low_precision=False):
super(MaxPool2d, self).__init__()
self.kernel_size = kernel_size
self.stride = stride
self.name = 'MaxPool2d'
self.type = 'pool'
self.lp = low_precision
if self.lp:
self.g_man = 2
self.g_exp = 5
def forward(self, input):
self.input = input
self.out = maxpool_2d(input, f=self.kernel_size, s=self.stride)
self.N, self.C, self.H, self.W = self.out.size()
return self.out
def feed_backward(self, out_gradient):
r"""
Gradient computation based on 1 image
"""
if len(out_gradient.size()) == 2:
out_gradient = out_gradient.view(-1, self.C, self.H, self.W) # if the gradient flow back from the flatten
dout = maxpoolBackward(out_gradient, self.input, f=self.kernel_size, s=self.stride)
if self.lp:
dout = float_quantize(dout, exp=self.g_exp, man=self.g_man, rounding="nearest")
return dout
def extra_repr(self):
return super(MaxPool2d, self).extra_repr() + 'kernel_size={}, stride={}, lp={}'.format(self.kernel_size, self.stride, self.lp)
class BatchNorm(nn.Module):
def __init__(self, num_features, batch_size=128, eps=1e-5, m=0.1, lr=0.1, momentum=0.9, affine=True, use_relu=True, relu_inplace=True):
super(BatchNorm, self).__init__()
self.num_features = num_features
self.eps = eps
self.m = m
self.affine = affine
self.training = True
self.batch_size = batch_size
# running statistics
self.running_mean = torch.zeros(num_features).cuda()
self.running_var = torch.ones(num_features).cuda()
# affine transformation
self.weight = torch.Tensor(num_features)
self.bias = torch.Tensor(num_features)
# initialize the weights and bias
init.ones_(self.weight)
init.zeros_(self.bias)
# gradient accumulation
self.w_grad = torch.zeros_like(self.weight).cuda()
self.b_grad = torch.zeros_like(self.bias).cuda()
# SGD with momentum
self.momentum = momentum
self.lr = lr
self.w_vel = torch.zeros_like(self.weight).cuda()
self.b_vel = torch.zeros_like(self.bias).cuda()
def zero_grad(self):
self.w_grad.fill_(0.)
self.b_grad.fill_(0.)
def forward(self, input:Tensor):
self.input = input.cuda()
# if self.training:
# self.mean = self.input.mean([0,2,3])
# self.var = self.input.var([0,2,3])
# self.std = torch.sqrt(self.var + self.eps)
# # update running statistics
# self.running_mean = self.momentum * self.mean + (1 - self.m) * self.running_mean
# self.running_var = self.momentum * self.std + (1 - self.m) * self.running_var
# else:
# self.mean = self.running_mean
# self.var = self.running_var
# self.std = torch.sqrt(self.var + self.eps)
self.mean = self.input.mean([0,2,3])
self.var = self.input.var([0,2,3])
self.std = torch.sqrt(self.var + self.eps)
# update running statistics
self.running_mean = self.momentum * self.mean + (1 - self.m) * self.running_mean
self.running_var = self.momentum * self.std + (1 - self.m) * self.running_var
self.inv_std = 1 / (self.std[None, :, None, None])
self.xmu = self.input - self.mean[None, :, None, None]
self.xhat = self.xmu.mul(self.inv_std)
if self.affine:
self.output = self.xhat * self.weight[None, :, None, None] + self.bias[None, :, None, None]
return self.output
def feed_backward(self, output_grad):
self.b_grad = output_grad.sum(dim=0).sum([1,2])
self.w_grad = output_grad.mul(self.xhat).sum(dim=0).sum([1,2])
ppc = self.input.size(2) * self.input.size(3)
dinv_std = self.xmu
dinvvar = (1.0 / (2.0 * torch.sqrt(1/self.var[None, :, None, None]))) * dinv_std
dvar = (-1.0 / (self.var[None, :, None, None] ** 2)) * dinvvar
ddenominator = (self.input - self.mean[None, :, None, None]) * (2 * (ppc - 1) / ppc ** 2) * dvar
dcentered = torch.sqrt(1/self.var)
dnumerator = (1.0 - 1.0 / ppc) * dcentered[None, :, None, None]
dX = ddenominator + dnumerator
dout = dX * output_grad
return dout
def weight_update(self):
self.w_vel = self.momentum * self.w_vel + self.w_grad
self.b_vel = self.momentum * self.b_vel + self.b_grad
self.weight_old = self.weight.clone()
self.bias_old = self.bias.clone()
self.weight -= self.lr * self.w_vel
self.bias -= self.lr * self.b_vel
def extra_repr(self):
return super(BatchNorm, self).extra_repr() + 'num_features={}, eps={}'.format(self.num_features, self.eps)
class ReLU(nn.Module):
def __init__(self, inplace=True):
super(ReLU, self).__init__()
self.inplace = inplace
def forward(self, input):
self.output = F.relu(input, inplace=self.inplace)
return self.output
def feed_backward(self, output_grad):
relu_mask = torch.ceil(torch.clamp(self.output, min=0, max=1))
dout = output_grad * relu_mask
return dout
class MSE(nn.Module):
def __init__(self, num_classes, low_precision=False):
super(MSE, self).__init__()
self.num_classes = int(num_classes)
self.exp = 5
self.man = 2
self.lp = low_precision
self.name = 'MSELoss'
self.type = 'lossFunc'
def feed_forward(self, output, target):
self.batch = output.size(0)
self.output = output
self.target = target
assert self.num_classes == output.size(1), "Number of classes and the output dim must be identical"
loss = F.mse_loss(output, target)
if self.lp:
loss = float_quantize(loss, exp=self.exp, man=self.man, rounding="nearest")
return output, loss
def feed_backward(self):
"""
evaluate the gradient w.r.t output
"""
dout = 2 * (self.output - self.target) / (self.output.size(0)*self.output.size(1))
# output gradient
if self.lp:
dout = float_quantize(dout, exp=self.exp, man=self.man, rounding="nearest")
return dout
def weight_grad(self, groups=0):
pass
def apply_weight_grad(self, learning_rate=1.0, momentum=0.5,
batch_size=100, last_group=False):
pass
def extra_repr(self):
return super(MSE, self).extra_repr() + 'lp={}'.format(self.lp)
| [
"torch.nn.functional.linear",
"torch.nn.functional.conv2d",
"torch.nn.functional.mse_loss",
"numpy.sqrt",
"torch.nn.init.ones_",
"torch.Tensor",
"torch.sqrt",
"torch.nn.init.zeros_",
"qtorch.quant.float_quantize",
"torch.matmul",
"torch.flip",
"torch.nn.functional.relu",
"torch.zeros_like",
... | [((1376, 1397), 'numpy.sqrt', 'np.sqrt', (['(2.0 / fan_in)'], {}), '(2.0 / fan_in)\n', (1383, 1397), True, 'import numpy as np\n'), ((4549, 4580), 'torch.flip', 'torch.flip', (['self.weight', '[2, 3]'], {}), '(self.weight, [2, 3])\n', (4559, 4580), False, 'import torch\n'), ((4642, 4715), 'torch.nn.functional.conv2d', 'F.conv2d', (['output_grad', 'weight_t'], {'stride': 'self.stride', 'padding': 'self.padding'}), '(output_grad, weight_t, stride=self.stride, padding=self.padding)\n', (4650, 4715), True, 'import torch.nn.functional as F\n'), ((6957, 7000), 'numpy.sqrt', 'np.sqrt', (['(6.0 / (in_features + out_features))'], {}), '(6.0 / (in_features + out_features))\n', (6964, 7000), True, 'import numpy as np\n'), ((8198, 8242), 'torch.nn.functional.linear', 'F.linear', (['self.input', 'self.weight', 'self.bias'], {}), '(self.input, self.weight, self.bias)\n', (8206, 8242), True, 'import torch.nn.functional as F\n'), ((8715, 8759), 'torch.matmul', 'torch.matmul', (['out_grad_transpose', 'self.input'], {}), '(out_grad_transpose, self.input)\n', (8727, 8759), False, 'import torch\n'), ((8879, 8923), 'torch.matmul', 'torch.matmul', (['out_gradient', 'self.weight.data'], {}), '(out_gradient, self.weight.data)\n', (8891, 8923), False, 'import torch\n'), ((11586, 11612), 'torch.Tensor', 'torch.Tensor', (['num_features'], {}), '(num_features)\n', (11598, 11612), False, 'import torch\n'), ((11633, 11659), 'torch.Tensor', 'torch.Tensor', (['num_features'], {}), '(num_features)\n', (11645, 11659), False, 'import torch\n'), ((11711, 11734), 'torch.nn.init.ones_', 'init.ones_', (['self.weight'], {}), '(self.weight)\n', (11721, 11734), False, 'from torch.nn import init\n'), ((11743, 11765), 'torch.nn.init.zeros_', 'init.zeros_', (['self.bias'], {}), '(self.bias)\n', (11754, 11765), False, 'from torch.nn import init\n'), ((12979, 13010), 'torch.sqrt', 'torch.sqrt', (['(self.var + self.eps)'], {}), '(self.var + self.eps)\n', (12989, 13010), False, 'import torch\n'), ((14123, 14147), 'torch.sqrt', 'torch.sqrt', (['(1 / self.var)'], {}), '(1 / self.var)\n', (14133, 14147), False, 'import torch\n'), ((14977, 15012), 'torch.nn.functional.relu', 'F.relu', (['input'], {'inplace': 'self.inplace'}), '(input, inplace=self.inplace)\n', (14983, 15012), True, 'import torch.nn.functional as F\n'), ((15784, 15810), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['output', 'target'], {}), '(output, target)\n', (15794, 15810), True, 'import torch.nn.functional as F\n'), ((2587, 2676), 'qtorch.quant.float_quantize', 'float_quantize', (['self.weight.data'], {'exp': 'self.w_exp', 'man': 'self.w_man', 'rounding': '"""nearest"""'}), "(self.weight.data, exp=self.w_exp, man=self.w_man, rounding=\n 'nearest')\n", (2601, 2676), False, 'from qtorch.quant import float_quantize\n'), ((2697, 2775), 'qtorch.quant.float_quantize', 'float_quantize', (['self.input'], {'exp': 'self.x_exp', 'man': 'self.x_man', 'rounding': '"""nearest"""'}), "(self.input, exp=self.x_exp, man=self.x_man, rounding='nearest')\n", (2711, 2775), False, 'from qtorch.quant import float_quantize\n'), ((3031, 3065), 'torch.zeros_like', 'torch.zeros_like', (['self.weight.data'], {}), '(self.weight.data)\n', (3047, 3065), False, 'import torch\n'), ((4016, 4092), 'qtorch.quant.float_quantize', 'float_quantize', (['self.out'], {'exp': 'self.x_exp', 'man': 'self.x_man', 'rounding': '"""nearest"""'}), "(self.out, exp=self.x_exp, man=self.x_man, rounding='nearest')\n", (4030, 4092), False, 'from qtorch.quant import float_quantize\n'), ((4790, 4862), 'qtorch.quant.float_quantize', 'float_quantize', (['dout'], {'exp': 'self.g_exp', 'man': 'self.g_man', 'rounding': '"""nearest"""'}), "(dout, exp=self.g_exp, man=self.g_man, rounding='nearest')\n", (4804, 4862), False, 'from qtorch.quant import float_quantize\n'), ((5545, 5621), 'torch.nn.functional.conv2d', 'F.conv2d', (['input_i_t', 'output_grad_t'], {'stride': 'self.stride', 'padding': 'self.padding'}), '(input_i_t, output_grad_t, stride=self.stride, padding=self.padding)\n', (5553, 5621), True, 'import torch.nn.functional as F\n'), ((5909, 5987), 'qtorch.quant.float_quantize', 'float_quantize', (['self.w_vel'], {'exp': 'self.g_exp', 'man': 'self.g_man', 'rounding': '"""nearest"""'}), "(self.w_vel, exp=self.g_exp, man=self.g_man, rounding='nearest')\n", (5923, 5987), False, 'from qtorch.quant import float_quantize\n'), ((7880, 7969), 'qtorch.quant.float_quantize', 'float_quantize', (['self.weight.data'], {'exp': 'self.w_exp', 'man': 'self.w_man', 'rounding': '"""nearest"""'}), "(self.weight.data, exp=self.w_exp, man=self.w_man, rounding=\n 'nearest')\n", (7894, 7969), False, 'from qtorch.quant import float_quantize\n'), ((7989, 8076), 'qtorch.quant.float_quantize', 'float_quantize', (['self.bias.data'], {'exp': 'self.w_exp', 'man': 'self.w_man', 'rounding': '"""nearest"""'}), "(self.bias.data, exp=self.w_exp, man=self.w_man, rounding=\n 'nearest')\n", (8003, 8076), False, 'from qtorch.quant import float_quantize\n'), ((8097, 8175), 'qtorch.quant.float_quantize', 'float_quantize', (['self.input'], {'exp': 'self.x_exp', 'man': 'self.x_man', 'rounding': '"""nearest"""'}), "(self.input, exp=self.x_exp, man=self.x_man, rounding='nearest')\n", (8111, 8175), False, 'from qtorch.quant import float_quantize\n'), ((8286, 8362), 'qtorch.quant.float_quantize', 'float_quantize', (['self.out'], {'exp': 'self.y_exp', 'man': 'self.y_man', 'rounding': '"""nearest"""'}), "(self.out, exp=self.y_exp, man=self.y_man, rounding='nearest')\n", (8300, 8362), False, 'from qtorch.quant import float_quantize\n'), ((8963, 9035), 'qtorch.quant.float_quantize', 'float_quantize', (['dout'], {'exp': 'self.g_exp', 'man': 'self.g_man', 'rounding': '"""nearest"""'}), "(dout, exp=self.g_exp, man=self.g_man, rounding='nearest')\n", (8977, 9035), False, 'from qtorch.quant import float_quantize\n'), ((10737, 10809), 'qtorch.quant.float_quantize', 'float_quantize', (['dout'], {'exp': 'self.g_exp', 'man': 'self.g_man', 'rounding': '"""nearest"""'}), "(dout, exp=self.g_exp, man=self.g_man, rounding='nearest')\n", (10751, 10809), False, 'from qtorch.quant import float_quantize\n'), ((15118, 15156), 'torch.clamp', 'torch.clamp', (['self.output'], {'min': '(0)', 'max': '(1)'}), '(self.output, min=0, max=1)\n', (15129, 15156), False, 'import torch\n'), ((15850, 15918), 'qtorch.quant.float_quantize', 'float_quantize', (['loss'], {'exp': 'self.exp', 'man': 'self.man', 'rounding': '"""nearest"""'}), "(loss, exp=self.exp, man=self.man, rounding='nearest')\n", (15864, 15918), False, 'from qtorch.quant import float_quantize\n'), ((16219, 16287), 'qtorch.quant.float_quantize', 'float_quantize', (['dout'], {'exp': 'self.exp', 'man': 'self.man', 'rounding': '"""nearest"""'}), "(dout, exp=self.exp, man=self.man, rounding='nearest')\n", (16233, 16287), False, 'from qtorch.quant import float_quantize\n'), ((1890, 1919), 'torch.zeros_like', 'torch.zeros_like', (['self.weight'], {}), '(self.weight)\n', (1906, 1919), False, 'import torch\n'), ((2039, 2068), 'torch.zeros_like', 'torch.zeros_like', (['self.weight'], {}), '(self.weight)\n', (2055, 2068), False, 'import torch\n'), ((5317, 5395), 'torch.nn.functional.conv2d', 'F.conv2d', (['input_i_t', 'output_grad_i_t'], {'stride': 'self.stride', 'padding': 'self.padding'}), '(input_i_t, output_grad_i_t, stride=self.stride, padding=self.padding)\n', (5325, 5395), True, 'import torch.nn.functional as F\n'), ((5443, 5513), 'qtorch.quant.float_quantize', 'float_quantize', (['dw'], {'exp': 'self.y_exp', 'man': 'self.y_man', 'rounding': '"""nearest"""'}), "(dw, exp=self.y_exp, man=self.y_man, rounding='nearest')\n", (5457, 5513), False, 'from qtorch.quant import float_quantize\n'), ((7332, 7361), 'torch.zeros_like', 'torch.zeros_like', (['self.weight'], {}), '(self.weight)\n', (7348, 7361), False, 'import torch\n'), ((7391, 7418), 'torch.zeros_like', 'torch.zeros_like', (['self.bias'], {}), '(self.bias)\n', (7407, 7418), False, 'import torch\n'), ((7538, 7567), 'torch.zeros_like', 'torch.zeros_like', (['self.weight'], {}), '(self.weight)\n', (7554, 7567), False, 'import torch\n'), ((7596, 7623), 'torch.zeros_like', 'torch.zeros_like', (['self.bias'], {}), '(self.bias)\n', (7612, 7623), False, 'import torch\n'), ((11439, 11464), 'torch.zeros', 'torch.zeros', (['num_features'], {}), '(num_features)\n', (11450, 11464), False, 'import torch\n'), ((11499, 11523), 'torch.ones', 'torch.ones', (['num_features'], {}), '(num_features)\n', (11509, 11523), False, 'import torch\n'), ((11821, 11850), 'torch.zeros_like', 'torch.zeros_like', (['self.weight'], {}), '(self.weight)\n', (11837, 11850), False, 'import torch\n'), ((11880, 11907), 'torch.zeros_like', 'torch.zeros_like', (['self.bias'], {}), '(self.bias)\n', (11896, 11907), False, 'import torch\n'), ((12019, 12048), 'torch.zeros_like', 'torch.zeros_like', (['self.weight'], {}), '(self.weight)\n', (12035, 12048), False, 'import torch\n'), ((12077, 12104), 'torch.zeros_like', 'torch.zeros_like', (['self.bias'], {}), '(self.bias)\n', (12093, 12104), False, 'import torch\n'), ((1670, 1695), 'torch.zeros', 'torch.zeros', (['out_channels'], {}), '(out_channels)\n', (1681, 1695), False, 'import torch\n'), ((3250, 3284), 'torch.zeros_like', 'torch.zeros_like', (['self.weight.data'], {}), '(self.weight.data)\n', (3266, 3284), False, 'import torch\n'), ((3454, 3570), 'torch.nn.functional.conv2d', 'F.conv2d', (['self.input', '(self.weight.data * mask)', 'self.bias', 'self.stride', 'self.padding', 'self.dilation', 'self.groups'], {}), '(self.input, self.weight.data * mask, self.bias, self.stride, self.\n padding, self.dilation, self.groups)\n', (3462, 3570), True, 'import torch.nn.functional as F\n'), ((4948, 4977), 'torch.zeros_like', 'torch.zeros_like', (['self.weight'], {}), '(self.weight)\n', (4964, 4977), False, 'import torch\n'), ((7257, 7282), 'torch.zeros', 'torch.zeros', (['out_features'], {}), '(out_features)\n', (7268, 7282), False, 'import torch\n'), ((13861, 13906), 'torch.sqrt', 'torch.sqrt', (['(1 / self.var[None, :, None, None])'], {}), '(1 / self.var[None, :, None, None])\n', (13871, 13906), False, 'import torch\n'), ((1418, 1482), 'torch.empty', 'torch.empty', (['out_channels', 'in_channels', 'kernel_size', 'kernel_size'], {}), '(out_channels, in_channels, kernel_size, kernel_size)\n', (1429, 1482), False, 'import torch\n'), ((3734, 3809), 'qtorch.quant.float_quantize', 'float_quantize', (['y_total'], {'exp': 'self.y_exp', 'man': 'self.y_man', 'rounding': '"""nearest"""'}), "(y_total, exp=self.y_exp, man=self.y_man, rounding='nearest')\n", (3748, 3809), False, 'from qtorch.quant import float_quantize\n'), ((7022, 7060), 'torch.empty', 'torch.empty', (['out_features', 'in_features'], {}), '(out_features, in_features)\n', (7033, 7060), False, 'import torch\n'), ((1565, 1590), 'torch.empty', 'torch.empty', (['out_channels'], {}), '(out_channels)\n', (1576, 1590), False, 'import torch\n'), ((7148, 7173), 'torch.empty', 'torch.empty', (['out_features'], {}), '(out_features)\n', (7159, 7173), False, 'import torch\n')] |
from __future__ import division
import numpy as np
import scipy.stats as st
from numpy.testing import assert_array_almost_equal
from tensorprob import (
Exponential,
MigradOptimizer,
Mix2,
Mix3,
MixN,
Model,
Normal,
Parameter,
Poisson
)
def test_mix2_fit():
with Model() as model:
mu = Parameter()
sigma = Parameter(lower=1)
a = Parameter(lower=0)
f = Parameter(lower=0, upper=1)
X1 = Normal(mu, sigma, bounds=[(-np.inf, 21), (22, np.inf)])
X2 = Exponential(a, bounds=[(-np.inf, 8), (10, np.inf)])
X12 = Mix2(f, X1, X2, bounds=[(6, 17), (18, 36)])
model.observed(X12)
model.initialize({
mu: 23,
sigma: 1.2,
a: 0.2,
f: 0.3,
})
# Generate some data to fit
np.random.seed(42)
exp_data = np.random.exponential(10, 200000)
exp_data = exp_data[(exp_data < 8) | (10 < exp_data)]
# Include the data blinded by the Mix2 bounds as we use the len(norm_data)
norm_data = np.random.normal(19, 2, 100000)
norm_data = norm_data[
((6 < norm_data) & (norm_data < 17)) |
((18 < norm_data) & (norm_data < 21)) |
((22 < norm_data) & (norm_data < 36))
]
data = np.concatenate([exp_data, norm_data])
data = data[((6 < data) & (data < 17)) | ((18 < data) & (data < 36))]
result = model.fit(data)
# Check the fit was successful
assert result.success
assert abs(model.state[mu] - 19) < 5e-3
assert abs(model.state[sigma] - 2) < 5e-3
assert abs(model.state[a] - 0.1) < 5e-4
assert abs(model.state[f] - (len(norm_data)/len(data))) < 5e-4
def test_mix2_fit_with_mix2_input():
with Model() as model:
mu = Parameter()
sigma = Parameter(lower=1, upper=4)
a = Parameter(lower=0.06)
b = Parameter(lower=0)
f_1 = Parameter(lower=0, upper=1)
f_2 = Parameter(lower=0, upper=1)
X1 = Normal(mu, sigma, bounds=[(-np.inf, 21), (22, np.inf)])
X2 = Exponential(a, bounds=[(-np.inf, 8), (10, 27), (31, np.inf)])
X12 = Mix2(f_1, X1, X2, bounds=[(6, 17), (18, 36)])
X3 = Exponential(b)
X123 = Mix2(f_2, X12, X3, bounds=[(6, 17), (18, 36)])
model.observed(X123)
model.initialize({
mu: 23,
sigma: 1.2,
a: 0.2,
b: 0.04,
f_1: 0.3,
f_2: 0.4
})
# Generate some data to fit
np.random.seed(42)
exp_1_data = np.random.exponential(10, 200000)
exp_1_data = exp_1_data[
(6 < exp_1_data) &
((exp_1_data < 8) | (10 < exp_1_data)) &
((exp_1_data < 17) | (18 < exp_1_data)) &
((exp_1_data < 27) | (31 < exp_1_data)) &
(exp_1_data < 36)
]
exp_2_data = np.random.exponential(20, 200000)
exp_2_data = exp_2_data[
(6 < exp_2_data) &
((exp_2_data < 17) | (18 < exp_2_data)) &
(exp_2_data < 36)
]
# Include the data blinded by the Mix2 bounds as we use the len(norm_data)
norm_data = np.random.normal(19, 2, 100000)
norm_data = norm_data[
((6 < norm_data) & (norm_data < 17)) |
((18 < norm_data) & (norm_data < 21)) |
((22 < norm_data) & (norm_data < 36))
]
data = np.concatenate([exp_1_data, exp_2_data, norm_data])
data = data[((6 < data) & (data < 17)) | ((18 < data) & (data < 36))]
result = model.fit(data)
# Check the fit was successful
assert result.success
assert abs(model.state[mu] - 19) < 3e-2
assert abs(model.state[sigma] - 2) < 1e-3
assert abs(model.state[a] - 0.1) < 1e-3
assert abs(model.state[b] - 0.05) < 3e-4
assert abs(model.state[f_1] - (len(norm_data)/(len(exp_1_data)+len(norm_data)))) < 5e-3
assert abs(model.state[f_2] - ((len(exp_1_data)+len(norm_data))/len(data))) < 5e-4
# Check if we can access the individual components
xs = np.linspace(0, 41, 1001)
def allowed_point(x, bounds):
@np.vectorize
def allowed_point(x):
for l, u in bounds:
if l < x and x < u:
return 1
return 0
return allowed_point(x)
# Normal
bounds = [(6, 17), (18, 21), (22, 36)]
out1 = st.norm.pdf(xs, model.state[mu], model.state[sigma]) * allowed_point(xs, bounds)
integral = sum(
st.norm.cdf(u, model.state[mu], model.state[sigma]) -
st.norm.cdf(l, model.state[mu], model.state[sigma])
for l, u in bounds
)
out1 *= model.state[f_1] * model.state[f_2] / integral
out2 = model[X1].pdf(xs)
assert_array_almost_equal(out1, out2, 11)
# Exponential 1
bounds = [(6, 8), (10, 17), (18, 27), (31, 36)]
out1 = st.expon.pdf(xs, 0, 1/model.state[a]) * allowed_point(xs, bounds)
integral = sum(
st.expon.cdf(u, 0, 1/model.state[a]) -
st.expon.cdf(l, 0, 1/model.state[a])
for l, u in bounds
)
out1 *= (1-model.state[f_1]) * model.state[f_2] / integral
out2 = model[X2].pdf(xs)
assert_array_almost_equal(out1, out2, 11)
# Exponential 2
bounds = [(6, 17), (18, 36)]
out1 = st.expon.pdf(xs, 0, 1/model.state[b]) * allowed_point(xs, bounds)
integral = sum(
st.expon.cdf(u, 0, 1/model.state[b]) -
st.expon.cdf(l, 0, 1/model.state[b])
for l, u in bounds
)
out1 *= (1-model.state[f_2]) / integral
out2 = model[X3].pdf(xs)
assert_array_almost_equal(out1, out2, 11)
def test_mix3_fit():
with Model() as model:
mu = Parameter()
sigma = Parameter(lower=1, upper=4)
a = Parameter(lower=0.06)
b = Parameter(lower=0)
f_1 = Parameter(lower=0, upper=1)
f_2 = Parameter(lower=0, upper=1)
X1 = Normal(mu, sigma, bounds=[(-np.inf, 21), (22, np.inf)])
X2 = Exponential(a, bounds=[(-np.inf, 8), (10, 27), (31, np.inf)])
X3 = Exponential(b)
X123 = Mix3(f_1, f_2, X1, X2, X3, bounds=[(6, 17), (18, 36)])
model.observed(X123)
model.initialize({
mu: 23,
sigma: 1.2,
a: 0.2,
b: 0.04,
f_1: 0.3,
f_2: 0.4
})
# Generate some data to fit
np.random.seed(42)
exp_1_data = np.random.exponential(10, 200000)
exp_1_data = exp_1_data[
(6 < exp_1_data) &
((exp_1_data < 8) | (10 < exp_1_data)) &
((exp_1_data < 17) | (18 < exp_1_data)) &
((exp_1_data < 27) | (31 < exp_1_data)) &
(exp_1_data < 36)
]
exp_2_data = np.random.exponential(20, 200000)
exp_2_data = exp_2_data[
(6 < exp_2_data) &
((exp_2_data < 17) | (18 < exp_2_data)) &
(exp_2_data < 36)
]
# Include the data blinded by the Mix2 bounds as we use the len(norm_data)
norm_data = np.random.normal(19, 2, 100000)
norm_data = norm_data[
((6 < norm_data) & (norm_data < 17)) |
((18 < norm_data) & (norm_data < 21)) |
((22 < norm_data) & (norm_data < 36))
]
data = np.concatenate([exp_1_data, exp_2_data, norm_data])
data = data[((6 < data) & (data < 17)) | ((18 < data) & (data < 36))]
result = model.fit(data)
# Check the fit was successful
assert result.success
assert abs(model.state[mu] - 19) < 3e-2
assert abs(model.state[sigma] - 2) < 1e-3
assert abs(model.state[a] - 0.1) < 1e-3
assert abs(model.state[b] - 0.05) < 3e-4
assert abs(model.state[f_1] - (len(norm_data)/(len(exp_1_data)+len(norm_data)))) < 5e-3
assert abs(model.state[f_2] - ((len(exp_1_data)+len(norm_data))/len(data))) < 5e-4
# Check if we can access the individual components
xs = np.linspace(0, 41, 1001)
def allowed_point(x, bounds):
@np.vectorize
def allowed_point(x):
for l, u in bounds:
if l < x and x < u:
return 1
return 0
return allowed_point(x)
# Normal
bounds = [(6, 17), (18, 21), (22, 36)]
out1 = st.norm.pdf(xs, model.state[mu], model.state[sigma]) * allowed_point(xs, bounds)
integral = sum(
st.norm.cdf(u, model.state[mu], model.state[sigma]) -
st.norm.cdf(l, model.state[mu], model.state[sigma])
for l, u in bounds
)
out1 *= model.state[f_1] * model.state[f_2] / integral
out2 = model[X1].pdf(xs)
assert_array_almost_equal(out1, out2, 11)
# Exponential 1
bounds = [(6, 8), (10, 17), (18, 27), (31, 36)]
out1 = st.expon.pdf(xs, 0, 1/model.state[a]) * allowed_point(xs, bounds)
integral = sum(
st.expon.cdf(u, 0, 1/model.state[a]) -
st.expon.cdf(l, 0, 1/model.state[a])
for l, u in bounds
)
out1 *= (1-model.state[f_1]) * model.state[f_2] / integral
out2 = model[X2].pdf(xs)
assert_array_almost_equal(out1, out2, 11)
# Exponential 2
bounds = [(6, 17), (18, 36)]
out1 = st.expon.pdf(xs, 0, 1/model.state[b]) * allowed_point(xs, bounds)
integral = sum(
st.expon.cdf(u, 0, 1/model.state[b]) -
st.expon.cdf(l, 0, 1/model.state[b])
for l, u in bounds
)
out1 *= (1-model.state[f_2]) / integral
out2 = model[X3].pdf(xs)
assert_array_almost_equal(out1, out2, 11)
def test_mixn_fit():
with Model() as model:
mu = Parameter()
sigma = Parameter(lower=1, upper=4)
a = Parameter(lower=0.06)
b = Parameter(lower=0)
f_1 = Parameter(lower=0, upper=1)
f_2 = Parameter(lower=0, upper=1)
X1 = Normal(mu, sigma, bounds=[(-np.inf, 21), (22, np.inf)])
X2 = Exponential(a, bounds=[(-np.inf, 8), (10, 27), (31, np.inf)])
X3 = Exponential(b)
X123 = MixN([f_1, f_2], [X1, X2, X3], bounds=[(6, 17), (18, 36)])
model.observed(X123)
model.initialize({
mu: 23,
sigma: 1.2,
a: 0.2,
b: 0.04,
f_1: 0.3,
f_2: 0.4
})
# Generate some data to fit
np.random.seed(42)
exp_1_data = np.random.exponential(10, 200000)
exp_1_data = exp_1_data[
(6 < exp_1_data) &
((exp_1_data < 8) | (10 < exp_1_data)) &
((exp_1_data < 17) | (18 < exp_1_data)) &
((exp_1_data < 27) | (31 < exp_1_data)) &
(exp_1_data < 36)
]
exp_2_data = np.random.exponential(20, 200000)
exp_2_data = exp_2_data[
(6 < exp_2_data) &
((exp_2_data < 17) | (18 < exp_2_data)) &
(exp_2_data < 36)
]
# Include the data blinded by the Mix2 bounds as we use the len(norm_data)
norm_data = np.random.normal(19, 2, 100000)
norm_data = norm_data[
((6 < norm_data) & (norm_data < 17)) |
((18 < norm_data) & (norm_data < 21)) |
((22 < norm_data) & (norm_data < 36))
]
data = np.concatenate([exp_1_data, exp_2_data, norm_data])
data = data[((6 < data) & (data < 17)) | ((18 < data) & (data < 36))]
result = model.fit(data)
# Check the fit was successful
assert result.success
assert abs(model.state[mu] - 19) < 3e-2
assert abs(model.state[sigma] - 2) < 1e-3
assert abs(model.state[a] - 0.1) < 1e-3
assert abs(model.state[b] - 0.05) < 3e-4
assert abs(model.state[f_1] - (len(norm_data)/(len(exp_1_data)+len(norm_data)))) < 5e-3
assert abs(model.state[f_2] - ((len(exp_1_data)+len(norm_data))/len(data))) < 5e-4
# Check if we can access the individual components
xs = np.linspace(0, 41, 1001)
def allowed_point(x, bounds):
@np.vectorize
def allowed_point(x):
for l, u in bounds:
if l < x and x < u:
return 1
return 0
return allowed_point(x)
# Normal
bounds = [(6, 17), (18, 21), (22, 36)]
out1 = st.norm.pdf(xs, model.state[mu], model.state[sigma]) * allowed_point(xs, bounds)
integral = sum(
st.norm.cdf(u, model.state[mu], model.state[sigma]) -
st.norm.cdf(l, model.state[mu], model.state[sigma])
for l, u in bounds
)
out1 *= model.state[f_1] * model.state[f_2] / integral
out2 = model[X1].pdf(xs)
assert_array_almost_equal(out1, out2, 11)
# Exponential 1
bounds = [(6, 8), (10, 17), (18, 27), (31, 36)]
out1 = st.expon.pdf(xs, 0, 1/model.state[a]) * allowed_point(xs, bounds)
integral = sum(
st.expon.cdf(u, 0, 1/model.state[a]) -
st.expon.cdf(l, 0, 1/model.state[a])
for l, u in bounds
)
out1 *= (1-model.state[f_1]) * model.state[f_2] / integral
out2 = model[X2].pdf(xs)
assert_array_almost_equal(out1, out2, 11)
# Exponential 2
bounds = [(6, 17), (18, 36)]
out1 = st.expon.pdf(xs, 0, 1/model.state[b]) * allowed_point(xs, bounds)
integral = sum(
st.expon.cdf(u, 0, 1/model.state[b]) -
st.expon.cdf(l, 0, 1/model.state[b])
for l, u in bounds
)
out1 *= (1-model.state[f_2]) / integral
out2 = model[X3].pdf(xs)
assert_array_almost_equal(out1, out2, 11)
def test_mix2_extended():
np.random.seed(0)
exp_data = np.random.exponential(10, 20000)
exp_data = exp_data[(6 < exp_data) & (exp_data < 36)]
norm1_data = np.random.normal(19, 2, 10000)
norm1_data = norm1_data[(6 < norm1_data) & (norm1_data < 36)]
data = np.concatenate([exp_data, norm1_data])
data = data[((6 < data) & (data < 36))]
with Model() as model:
mu = Parameter()
sigma = Parameter(lower=1)
a = Parameter(lower=0)
N1 = Parameter(lower=0)
N2 = Parameter(lower=0)
N = Poisson(N1+N2)
X1 = Normal(mu, sigma)
X2 = Exponential(a)
X12 = Mix2(N1/(N1+N2), X1, X2, bounds=[(6, 36)])
model.observed(X12, N)
model.initialize({
mu: 23,
sigma: 1.2,
a: 0.2,
N1: len(data)/5,
N2: len(data)*4/5
})
result = model.fit(data, np.ones_like(data)*len(data), optimizer=MigradOptimizer())
assert result.success
assert abs(model.state[mu] - 19) < 3e-2
assert abs(model.state[sigma] - 2) < 3e-2
assert abs(model.state[a] - 0.1) < 1e-3
assert abs(model.state[N1] - len(norm1_data)) < np.sqrt(len(norm1_data))
assert abs(model.state[N2] - len(exp_data)) < np.sqrt(len(exp_data))
# Check if the pdf is correct
xs = np.linspace(0, 41, 101)
def allowed_point(x, bounds):
@np.vectorize
def allowed_point(x):
for l, u in bounds:
if l < x and x < u:
return 1
return 0
return allowed_point(x)
out1a = st.norm.pdf(xs, model.state[mu], model.state[sigma]) * allowed_point(xs, [(6, 36)])
integral = st.norm.cdf(36, model.state[mu], model.state[sigma])
integral -= st.norm.cdf(6, model.state[mu], model.state[sigma])
out1a *= model.state[N1] / (model.state[N1]+model.state[N2]) / integral
out1b = st.expon.pdf(xs, 0, 1/model.state[a]) * allowed_point(xs, [(6, 36)])
integral = st.expon.cdf(36, 0, 1/model.state[a]) - st.expon.cdf(6, 0, 1/model.state[a])
out1b *= model.state[N2] / (model.state[N1]+model.state[N2]) / integral
out1 = out1a + out1b
out2 = model.pdf(xs, None)
assert_array_almost_equal(out1, out2, 16)
| [
"scipy.stats.expon.pdf",
"numpy.random.exponential",
"scipy.stats.norm.cdf",
"numpy.testing.assert_array_almost_equal",
"tensorprob.Exponential",
"tensorprob.Poisson",
"tensorprob.Normal",
"numpy.linspace",
"numpy.random.seed",
"numpy.concatenate",
"numpy.random.normal",
"tensorprob.Mix2",
"... | [((809, 827), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (823, 827), True, 'import numpy as np\n'), ((844, 877), 'numpy.random.exponential', 'np.random.exponential', (['(10)', '(200000)'], {}), '(10, 200000)\n', (865, 877), True, 'import numpy as np\n'), ((1032, 1063), 'numpy.random.normal', 'np.random.normal', (['(19)', '(2)', '(100000)'], {}), '(19, 2, 100000)\n', (1048, 1063), True, 'import numpy as np\n'), ((1250, 1287), 'numpy.concatenate', 'np.concatenate', (['[exp_data, norm_data]'], {}), '([exp_data, norm_data])\n', (1264, 1287), True, 'import numpy as np\n'), ((2432, 2450), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (2446, 2450), True, 'import numpy as np\n'), ((2469, 2502), 'numpy.random.exponential', 'np.random.exponential', (['(10)', '(200000)'], {}), '(10, 200000)\n', (2490, 2502), True, 'import numpy as np\n'), ((2758, 2791), 'numpy.random.exponential', 'np.random.exponential', (['(20)', '(200000)'], {}), '(20, 200000)\n', (2779, 2791), True, 'import numpy as np\n'), ((3026, 3057), 'numpy.random.normal', 'np.random.normal', (['(19)', '(2)', '(100000)'], {}), '(19, 2, 100000)\n', (3042, 3057), True, 'import numpy as np\n'), ((3244, 3295), 'numpy.concatenate', 'np.concatenate', (['[exp_1_data, exp_2_data, norm_data]'], {}), '([exp_1_data, exp_2_data, norm_data])\n', (3258, 3295), True, 'import numpy as np\n'), ((3885, 3909), 'numpy.linspace', 'np.linspace', (['(0)', '(41)', '(1001)'], {}), '(0, 41, 1001)\n', (3896, 3909), True, 'import numpy as np\n'), ((4565, 4606), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['out1', 'out2', '(11)'], {}), '(out1, out2, 11)\n', (4590, 4606), False, 'from numpy.testing import assert_array_almost_equal\n'), ((5000, 5041), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['out1', 'out2', '(11)'], {}), '(out1, out2, 11)\n', (5025, 5041), False, 'from numpy.testing import assert_array_almost_equal\n'), ((5397, 5438), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['out1', 'out2', '(11)'], {}), '(out1, out2, 11)\n', (5422, 5438), False, 'from numpy.testing import assert_array_almost_equal\n'), ((6148, 6166), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (6162, 6166), True, 'import numpy as np\n'), ((6185, 6218), 'numpy.random.exponential', 'np.random.exponential', (['(10)', '(200000)'], {}), '(10, 200000)\n', (6206, 6218), True, 'import numpy as np\n'), ((6474, 6507), 'numpy.random.exponential', 'np.random.exponential', (['(20)', '(200000)'], {}), '(20, 200000)\n', (6495, 6507), True, 'import numpy as np\n'), ((6742, 6773), 'numpy.random.normal', 'np.random.normal', (['(19)', '(2)', '(100000)'], {}), '(19, 2, 100000)\n', (6758, 6773), True, 'import numpy as np\n'), ((6960, 7011), 'numpy.concatenate', 'np.concatenate', (['[exp_1_data, exp_2_data, norm_data]'], {}), '([exp_1_data, exp_2_data, norm_data])\n', (6974, 7011), True, 'import numpy as np\n'), ((7601, 7625), 'numpy.linspace', 'np.linspace', (['(0)', '(41)', '(1001)'], {}), '(0, 41, 1001)\n', (7612, 7625), True, 'import numpy as np\n'), ((8281, 8322), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['out1', 'out2', '(11)'], {}), '(out1, out2, 11)\n', (8306, 8322), False, 'from numpy.testing import assert_array_almost_equal\n'), ((8716, 8757), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['out1', 'out2', '(11)'], {}), '(out1, out2, 11)\n', (8741, 8757), False, 'from numpy.testing import assert_array_almost_equal\n'), ((9113, 9154), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['out1', 'out2', '(11)'], {}), '(out1, out2, 11)\n', (9138, 9154), False, 'from numpy.testing import assert_array_almost_equal\n'), ((9868, 9886), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (9882, 9886), True, 'import numpy as np\n'), ((9905, 9938), 'numpy.random.exponential', 'np.random.exponential', (['(10)', '(200000)'], {}), '(10, 200000)\n', (9926, 9938), True, 'import numpy as np\n'), ((10194, 10227), 'numpy.random.exponential', 'np.random.exponential', (['(20)', '(200000)'], {}), '(20, 200000)\n', (10215, 10227), True, 'import numpy as np\n'), ((10462, 10493), 'numpy.random.normal', 'np.random.normal', (['(19)', '(2)', '(100000)'], {}), '(19, 2, 100000)\n', (10478, 10493), True, 'import numpy as np\n'), ((10680, 10731), 'numpy.concatenate', 'np.concatenate', (['[exp_1_data, exp_2_data, norm_data]'], {}), '([exp_1_data, exp_2_data, norm_data])\n', (10694, 10731), True, 'import numpy as np\n'), ((11321, 11345), 'numpy.linspace', 'np.linspace', (['(0)', '(41)', '(1001)'], {}), '(0, 41, 1001)\n', (11332, 11345), True, 'import numpy as np\n'), ((12001, 12042), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['out1', 'out2', '(11)'], {}), '(out1, out2, 11)\n', (12026, 12042), False, 'from numpy.testing import assert_array_almost_equal\n'), ((12436, 12477), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['out1', 'out2', '(11)'], {}), '(out1, out2, 11)\n', (12461, 12477), False, 'from numpy.testing import assert_array_almost_equal\n'), ((12833, 12874), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['out1', 'out2', '(11)'], {}), '(out1, out2, 11)\n', (12858, 12874), False, 'from numpy.testing import assert_array_almost_equal\n'), ((12907, 12924), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (12921, 12924), True, 'import numpy as np\n'), ((12940, 12972), 'numpy.random.exponential', 'np.random.exponential', (['(10)', '(20000)'], {}), '(10, 20000)\n', (12961, 12972), True, 'import numpy as np\n'), ((13049, 13079), 'numpy.random.normal', 'np.random.normal', (['(19)', '(2)', '(10000)'], {}), '(19, 2, 10000)\n', (13065, 13079), True, 'import numpy as np\n'), ((13158, 13196), 'numpy.concatenate', 'np.concatenate', (['[exp_data, norm1_data]'], {}), '([exp_data, norm1_data])\n', (13172, 13196), True, 'import numpy as np\n'), ((14173, 14196), 'numpy.linspace', 'np.linspace', (['(0)', '(41)', '(101)'], {}), '(0, 41, 101)\n', (14184, 14196), True, 'import numpy as np\n'), ((14546, 14598), 'scipy.stats.norm.cdf', 'st.norm.cdf', (['(36)', 'model.state[mu]', 'model.state[sigma]'], {}), '(36, model.state[mu], model.state[sigma])\n', (14557, 14598), True, 'import scipy.stats as st\n'), ((14615, 14666), 'scipy.stats.norm.cdf', 'st.norm.cdf', (['(6)', 'model.state[mu]', 'model.state[sigma]'], {}), '(6, model.state[mu], model.state[sigma])\n', (14626, 14666), True, 'import scipy.stats as st\n'), ((15056, 15097), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['out1', 'out2', '(16)'], {}), '(out1, out2, 16)\n', (15081, 15097), False, 'from numpy.testing import assert_array_almost_equal\n'), ((307, 314), 'tensorprob.Model', 'Model', ([], {}), '()\n', (312, 314), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((338, 349), 'tensorprob.Parameter', 'Parameter', ([], {}), '()\n', (347, 349), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((366, 384), 'tensorprob.Parameter', 'Parameter', ([], {'lower': '(1)'}), '(lower=1)\n', (375, 384), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((397, 415), 'tensorprob.Parameter', 'Parameter', ([], {'lower': '(0)'}), '(lower=0)\n', (406, 415), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((428, 455), 'tensorprob.Parameter', 'Parameter', ([], {'lower': '(0)', 'upper': '(1)'}), '(lower=0, upper=1)\n', (437, 455), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((470, 525), 'tensorprob.Normal', 'Normal', (['mu', 'sigma'], {'bounds': '[(-np.inf, 21), (22, np.inf)]'}), '(mu, sigma, bounds=[(-np.inf, 21), (22, np.inf)])\n', (476, 525), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((539, 590), 'tensorprob.Exponential', 'Exponential', (['a'], {'bounds': '[(-np.inf, 8), (10, np.inf)]'}), '(a, bounds=[(-np.inf, 8), (10, np.inf)])\n', (550, 590), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((605, 648), 'tensorprob.Mix2', 'Mix2', (['f', 'X1', 'X2'], {'bounds': '[(6, 17), (18, 36)]'}), '(f, X1, X2, bounds=[(6, 17), (18, 36)])\n', (609, 648), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((1703, 1710), 'tensorprob.Model', 'Model', ([], {}), '()\n', (1708, 1710), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((1734, 1745), 'tensorprob.Parameter', 'Parameter', ([], {}), '()\n', (1743, 1745), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((1762, 1789), 'tensorprob.Parameter', 'Parameter', ([], {'lower': '(1)', 'upper': '(4)'}), '(lower=1, upper=4)\n', (1771, 1789), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((1802, 1823), 'tensorprob.Parameter', 'Parameter', ([], {'lower': '(0.06)'}), '(lower=0.06)\n', (1811, 1823), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((1836, 1854), 'tensorprob.Parameter', 'Parameter', ([], {'lower': '(0)'}), '(lower=0)\n', (1845, 1854), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((1869, 1896), 'tensorprob.Parameter', 'Parameter', ([], {'lower': '(0)', 'upper': '(1)'}), '(lower=0, upper=1)\n', (1878, 1896), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((1911, 1938), 'tensorprob.Parameter', 'Parameter', ([], {'lower': '(0)', 'upper': '(1)'}), '(lower=0, upper=1)\n', (1920, 1938), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((1953, 2008), 'tensorprob.Normal', 'Normal', (['mu', 'sigma'], {'bounds': '[(-np.inf, 21), (22, np.inf)]'}), '(mu, sigma, bounds=[(-np.inf, 21), (22, np.inf)])\n', (1959, 2008), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((2022, 2083), 'tensorprob.Exponential', 'Exponential', (['a'], {'bounds': '[(-np.inf, 8), (10, 27), (31, np.inf)]'}), '(a, bounds=[(-np.inf, 8), (10, 27), (31, np.inf)])\n', (2033, 2083), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((2098, 2143), 'tensorprob.Mix2', 'Mix2', (['f_1', 'X1', 'X2'], {'bounds': '[(6, 17), (18, 36)]'}), '(f_1, X1, X2, bounds=[(6, 17), (18, 36)])\n', (2102, 2143), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((2158, 2172), 'tensorprob.Exponential', 'Exponential', (['b'], {}), '(b)\n', (2169, 2172), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((2188, 2234), 'tensorprob.Mix2', 'Mix2', (['f_2', 'X12', 'X3'], {'bounds': '[(6, 17), (18, 36)]'}), '(f_2, X12, X3, bounds=[(6, 17), (18, 36)])\n', (2192, 2234), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((4215, 4267), 'scipy.stats.norm.pdf', 'st.norm.pdf', (['xs', 'model.state[mu]', 'model.state[sigma]'], {}), '(xs, model.state[mu], model.state[sigma])\n', (4226, 4267), True, 'import scipy.stats as st\n'), ((4691, 4730), 'scipy.stats.expon.pdf', 'st.expon.pdf', (['xs', '(0)', '(1 / model.state[a])'], {}), '(xs, 0, 1 / model.state[a])\n', (4703, 4730), True, 'import scipy.stats as st\n'), ((5107, 5146), 'scipy.stats.expon.pdf', 'st.expon.pdf', (['xs', '(0)', '(1 / model.state[b])'], {}), '(xs, 0, 1 / model.state[b])\n', (5119, 5146), True, 'import scipy.stats as st\n'), ((5471, 5478), 'tensorprob.Model', 'Model', ([], {}), '()\n', (5476, 5478), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((5502, 5513), 'tensorprob.Parameter', 'Parameter', ([], {}), '()\n', (5511, 5513), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((5530, 5557), 'tensorprob.Parameter', 'Parameter', ([], {'lower': '(1)', 'upper': '(4)'}), '(lower=1, upper=4)\n', (5539, 5557), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((5570, 5591), 'tensorprob.Parameter', 'Parameter', ([], {'lower': '(0.06)'}), '(lower=0.06)\n', (5579, 5591), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((5604, 5622), 'tensorprob.Parameter', 'Parameter', ([], {'lower': '(0)'}), '(lower=0)\n', (5613, 5622), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((5637, 5664), 'tensorprob.Parameter', 'Parameter', ([], {'lower': '(0)', 'upper': '(1)'}), '(lower=0, upper=1)\n', (5646, 5664), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((5679, 5706), 'tensorprob.Parameter', 'Parameter', ([], {'lower': '(0)', 'upper': '(1)'}), '(lower=0, upper=1)\n', (5688, 5706), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((5721, 5776), 'tensorprob.Normal', 'Normal', (['mu', 'sigma'], {'bounds': '[(-np.inf, 21), (22, np.inf)]'}), '(mu, sigma, bounds=[(-np.inf, 21), (22, np.inf)])\n', (5727, 5776), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((5790, 5851), 'tensorprob.Exponential', 'Exponential', (['a'], {'bounds': '[(-np.inf, 8), (10, 27), (31, np.inf)]'}), '(a, bounds=[(-np.inf, 8), (10, 27), (31, np.inf)])\n', (5801, 5851), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((5865, 5879), 'tensorprob.Exponential', 'Exponential', (['b'], {}), '(b)\n', (5876, 5879), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((5896, 5950), 'tensorprob.Mix3', 'Mix3', (['f_1', 'f_2', 'X1', 'X2', 'X3'], {'bounds': '[(6, 17), (18, 36)]'}), '(f_1, f_2, X1, X2, X3, bounds=[(6, 17), (18, 36)])\n', (5900, 5950), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((7931, 7983), 'scipy.stats.norm.pdf', 'st.norm.pdf', (['xs', 'model.state[mu]', 'model.state[sigma]'], {}), '(xs, model.state[mu], model.state[sigma])\n', (7942, 7983), True, 'import scipy.stats as st\n'), ((8407, 8446), 'scipy.stats.expon.pdf', 'st.expon.pdf', (['xs', '(0)', '(1 / model.state[a])'], {}), '(xs, 0, 1 / model.state[a])\n', (8419, 8446), True, 'import scipy.stats as st\n'), ((8823, 8862), 'scipy.stats.expon.pdf', 'st.expon.pdf', (['xs', '(0)', '(1 / model.state[b])'], {}), '(xs, 0, 1 / model.state[b])\n', (8835, 8862), True, 'import scipy.stats as st\n'), ((9187, 9194), 'tensorprob.Model', 'Model', ([], {}), '()\n', (9192, 9194), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((9218, 9229), 'tensorprob.Parameter', 'Parameter', ([], {}), '()\n', (9227, 9229), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((9246, 9273), 'tensorprob.Parameter', 'Parameter', ([], {'lower': '(1)', 'upper': '(4)'}), '(lower=1, upper=4)\n', (9255, 9273), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((9286, 9307), 'tensorprob.Parameter', 'Parameter', ([], {'lower': '(0.06)'}), '(lower=0.06)\n', (9295, 9307), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((9320, 9338), 'tensorprob.Parameter', 'Parameter', ([], {'lower': '(0)'}), '(lower=0)\n', (9329, 9338), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((9353, 9380), 'tensorprob.Parameter', 'Parameter', ([], {'lower': '(0)', 'upper': '(1)'}), '(lower=0, upper=1)\n', (9362, 9380), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((9395, 9422), 'tensorprob.Parameter', 'Parameter', ([], {'lower': '(0)', 'upper': '(1)'}), '(lower=0, upper=1)\n', (9404, 9422), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((9437, 9492), 'tensorprob.Normal', 'Normal', (['mu', 'sigma'], {'bounds': '[(-np.inf, 21), (22, np.inf)]'}), '(mu, sigma, bounds=[(-np.inf, 21), (22, np.inf)])\n', (9443, 9492), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((9506, 9567), 'tensorprob.Exponential', 'Exponential', (['a'], {'bounds': '[(-np.inf, 8), (10, 27), (31, np.inf)]'}), '(a, bounds=[(-np.inf, 8), (10, 27), (31, np.inf)])\n', (9517, 9567), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((9581, 9595), 'tensorprob.Exponential', 'Exponential', (['b'], {}), '(b)\n', (9592, 9595), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((9612, 9670), 'tensorprob.MixN', 'MixN', (['[f_1, f_2]', '[X1, X2, X3]'], {'bounds': '[(6, 17), (18, 36)]'}), '([f_1, f_2], [X1, X2, X3], bounds=[(6, 17), (18, 36)])\n', (9616, 9670), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((11651, 11703), 'scipy.stats.norm.pdf', 'st.norm.pdf', (['xs', 'model.state[mu]', 'model.state[sigma]'], {}), '(xs, model.state[mu], model.state[sigma])\n', (11662, 11703), True, 'import scipy.stats as st\n'), ((12127, 12166), 'scipy.stats.expon.pdf', 'st.expon.pdf', (['xs', '(0)', '(1 / model.state[a])'], {}), '(xs, 0, 1 / model.state[a])\n', (12139, 12166), True, 'import scipy.stats as st\n'), ((12543, 12582), 'scipy.stats.expon.pdf', 'st.expon.pdf', (['xs', '(0)', '(1 / model.state[b])'], {}), '(xs, 0, 1 / model.state[b])\n', (12555, 12582), True, 'import scipy.stats as st\n'), ((13251, 13258), 'tensorprob.Model', 'Model', ([], {}), '()\n', (13256, 13258), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((13282, 13293), 'tensorprob.Parameter', 'Parameter', ([], {}), '()\n', (13291, 13293), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((13310, 13328), 'tensorprob.Parameter', 'Parameter', ([], {'lower': '(1)'}), '(lower=1)\n', (13319, 13328), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((13341, 13359), 'tensorprob.Parameter', 'Parameter', ([], {'lower': '(0)'}), '(lower=0)\n', (13350, 13359), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((13374, 13392), 'tensorprob.Parameter', 'Parameter', ([], {'lower': '(0)'}), '(lower=0)\n', (13383, 13392), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((13406, 13424), 'tensorprob.Parameter', 'Parameter', ([], {'lower': '(0)'}), '(lower=0)\n', (13415, 13424), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((13437, 13453), 'tensorprob.Poisson', 'Poisson', (['(N1 + N2)'], {}), '(N1 + N2)\n', (13444, 13453), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((13466, 13483), 'tensorprob.Normal', 'Normal', (['mu', 'sigma'], {}), '(mu, sigma)\n', (13472, 13483), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((13497, 13511), 'tensorprob.Exponential', 'Exponential', (['a'], {}), '(a)\n', (13508, 13511), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((13526, 13572), 'tensorprob.Mix2', 'Mix2', (['(N1 / (N1 + N2))', 'X1', 'X2'], {'bounds': '[(6, 36)]'}), '(N1 / (N1 + N2), X1, X2, bounds=[(6, 36)])\n', (13530, 13572), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((14447, 14499), 'scipy.stats.norm.pdf', 'st.norm.pdf', (['xs', 'model.state[mu]', 'model.state[sigma]'], {}), '(xs, model.state[mu], model.state[sigma])\n', (14458, 14499), True, 'import scipy.stats as st\n'), ((14756, 14795), 'scipy.stats.expon.pdf', 'st.expon.pdf', (['xs', '(0)', '(1 / model.state[a])'], {}), '(xs, 0, 1 / model.state[a])\n', (14768, 14795), True, 'import scipy.stats as st\n'), ((14840, 14879), 'scipy.stats.expon.cdf', 'st.expon.cdf', (['(36)', '(0)', '(1 / model.state[a])'], {}), '(36, 0, 1 / model.state[a])\n', (14852, 14879), True, 'import scipy.stats as st\n'), ((14880, 14918), 'scipy.stats.expon.cdf', 'st.expon.cdf', (['(6)', '(0)', '(1 / model.state[a])'], {}), '(6, 0, 1 / model.state[a])\n', (14892, 14918), True, 'import scipy.stats as st\n'), ((13759, 13777), 'numpy.ones_like', 'np.ones_like', (['data'], {}), '(data)\n', (13771, 13777), True, 'import numpy as np\n'), ((13799, 13816), 'tensorprob.MigradOptimizer', 'MigradOptimizer', ([], {}), '()\n', (13814, 13816), False, 'from tensorprob import Exponential, MigradOptimizer, Mix2, Mix3, MixN, Model, Normal, Parameter, Poisson\n'), ((4324, 4375), 'scipy.stats.norm.cdf', 'st.norm.cdf', (['u', 'model.state[mu]', 'model.state[sigma]'], {}), '(u, model.state[mu], model.state[sigma])\n', (4335, 4375), True, 'import scipy.stats as st\n'), ((4386, 4437), 'scipy.stats.norm.cdf', 'st.norm.cdf', (['l', 'model.state[mu]', 'model.state[sigma]'], {}), '(l, model.state[mu], model.state[sigma])\n', (4397, 4437), True, 'import scipy.stats as st\n'), ((4785, 4823), 'scipy.stats.expon.cdf', 'st.expon.cdf', (['u', '(0)', '(1 / model.state[a])'], {}), '(u, 0, 1 / model.state[a])\n', (4797, 4823), True, 'import scipy.stats as st\n'), ((4832, 4870), 'scipy.stats.expon.cdf', 'st.expon.cdf', (['l', '(0)', '(1 / model.state[a])'], {}), '(l, 0, 1 / model.state[a])\n', (4844, 4870), True, 'import scipy.stats as st\n'), ((5201, 5239), 'scipy.stats.expon.cdf', 'st.expon.cdf', (['u', '(0)', '(1 / model.state[b])'], {}), '(u, 0, 1 / model.state[b])\n', (5213, 5239), True, 'import scipy.stats as st\n'), ((5248, 5286), 'scipy.stats.expon.cdf', 'st.expon.cdf', (['l', '(0)', '(1 / model.state[b])'], {}), '(l, 0, 1 / model.state[b])\n', (5260, 5286), True, 'import scipy.stats as st\n'), ((8040, 8091), 'scipy.stats.norm.cdf', 'st.norm.cdf', (['u', 'model.state[mu]', 'model.state[sigma]'], {}), '(u, model.state[mu], model.state[sigma])\n', (8051, 8091), True, 'import scipy.stats as st\n'), ((8102, 8153), 'scipy.stats.norm.cdf', 'st.norm.cdf', (['l', 'model.state[mu]', 'model.state[sigma]'], {}), '(l, model.state[mu], model.state[sigma])\n', (8113, 8153), True, 'import scipy.stats as st\n'), ((8501, 8539), 'scipy.stats.expon.cdf', 'st.expon.cdf', (['u', '(0)', '(1 / model.state[a])'], {}), '(u, 0, 1 / model.state[a])\n', (8513, 8539), True, 'import scipy.stats as st\n'), ((8548, 8586), 'scipy.stats.expon.cdf', 'st.expon.cdf', (['l', '(0)', '(1 / model.state[a])'], {}), '(l, 0, 1 / model.state[a])\n', (8560, 8586), True, 'import scipy.stats as st\n'), ((8917, 8955), 'scipy.stats.expon.cdf', 'st.expon.cdf', (['u', '(0)', '(1 / model.state[b])'], {}), '(u, 0, 1 / model.state[b])\n', (8929, 8955), True, 'import scipy.stats as st\n'), ((8964, 9002), 'scipy.stats.expon.cdf', 'st.expon.cdf', (['l', '(0)', '(1 / model.state[b])'], {}), '(l, 0, 1 / model.state[b])\n', (8976, 9002), True, 'import scipy.stats as st\n'), ((11760, 11811), 'scipy.stats.norm.cdf', 'st.norm.cdf', (['u', 'model.state[mu]', 'model.state[sigma]'], {}), '(u, model.state[mu], model.state[sigma])\n', (11771, 11811), True, 'import scipy.stats as st\n'), ((11822, 11873), 'scipy.stats.norm.cdf', 'st.norm.cdf', (['l', 'model.state[mu]', 'model.state[sigma]'], {}), '(l, model.state[mu], model.state[sigma])\n', (11833, 11873), True, 'import scipy.stats as st\n'), ((12221, 12259), 'scipy.stats.expon.cdf', 'st.expon.cdf', (['u', '(0)', '(1 / model.state[a])'], {}), '(u, 0, 1 / model.state[a])\n', (12233, 12259), True, 'import scipy.stats as st\n'), ((12268, 12306), 'scipy.stats.expon.cdf', 'st.expon.cdf', (['l', '(0)', '(1 / model.state[a])'], {}), '(l, 0, 1 / model.state[a])\n', (12280, 12306), True, 'import scipy.stats as st\n'), ((12637, 12675), 'scipy.stats.expon.cdf', 'st.expon.cdf', (['u', '(0)', '(1 / model.state[b])'], {}), '(u, 0, 1 / model.state[b])\n', (12649, 12675), True, 'import scipy.stats as st\n'), ((12684, 12722), 'scipy.stats.expon.cdf', 'st.expon.cdf', (['l', '(0)', '(1 / model.state[b])'], {}), '(l, 0, 1 / model.state[b])\n', (12696, 12722), True, 'import scipy.stats as st\n')] |
import os
import numpy as np
from sklearn.svm import SVC, LinearSVC
from sklearn.metrics import classification_report
from sklearn.ensemble import RandomForestClassifier
from sklearn import preprocessing
from sklearn import metrics
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import confusion_matrix
import argparse
import gc
from utils import *
from sklearn.model_selection import StratifiedKFold
parser = argparse.ArgumentParser(description='ml_features_classifier')
parser.add_argument('--feature', type=str, default='dpc') # aac, dpc, ctd, pseaac1, pseaac2, all
parser.add_argument('--classify', type=str, default='linearsvc') # LR, DT, RF,linearsvc, svc,
parser.add_argument('--file', type=str, default='VFG-2706-iid')
parser.add_argument('--signal', type=int, default=13) # 13, 23, 33, 43, 53
args = parser.parse_args()
data_dir = os.getcwd() + "/data/"
features_dir = data_dir + args.file + "_features/"
class_name_dir = data_dir + args.file + "_train_class_name"
class_name = load_class_name(class_name_dir)
record_dir = data_dir + args.file + "_record/baseline/" + args.feature + "_" + args.classify + "/"
sig = args.feature + "_" + args.classify
if not os.path.exists(record_dir):
os.makedirs(record_dir)
if args.feature == 'all':
train_data = np.load(features_dir + "train_aac_ml.npz", allow_pickle=True)['data']
train_label = np.load(features_dir + "train_aac_ml.npz", allow_pickle=True)['labels']
dpc_data = np.load(features_dir + "train_dpc_ml.npz", allow_pickle=True)['data']
train_data = np.concatenate((train_data, dpc_data), axis=1)
ctd_data = np.load(features_dir + "train_ctd_ml.npz", allow_pickle=True)['data']
train_data = np.concatenate((train_data, ctd_data), axis=1)
pseaac1_data = np.load(features_dir + "train_pseaac1_ml.npz", allow_pickle=True)['data']
train_data = np.concatenate((train_data, pseaac1_data), axis=1)
pseaac2_data = np.load(features_dir + "train_pseaac2_ml.npz", allow_pickle=True)['data']
train_data = np.concatenate((train_data, pseaac2_data), axis=1)
else:
# feature_data
train_data = np.load(features_dir + "train_" + args.feature + "_ml.npz", allow_pickle=True)['data']
train_label = np.load(features_dir + "train_" + args.feature + "_ml.npz", allow_pickle=True)['labels']
train_label = map(int, train_label)
scaler = preprocessing.MinMaxScaler(feature_range=(0, 1))
scaler.fit(train_data)
train_data = scaler.transform(train_data)
# indep
if args.feature == 'all':
indep_feature_data = np.load(features_dir + "indep_aac_ml.npz", allow_pickle=True)['data']
indep_feature_label = np.load(features_dir + "indep_aac_ml.npz", allow_pickle=True)['labels']
indep_dpc_data = np.load(features_dir + "indep_dpc_ml.npz", allow_pickle=True)['data']
indep_feature_data = np.concatenate((indep_feature_data, indep_dpc_data), axis=1)
indep_ctd_data = np.load(features_dir + "indep_ctd_ml.npz", allow_pickle=True)['data']
indep_feature_data = np.concatenate((indep_feature_data, indep_ctd_data), axis=1)
indep_pseaac1_data = np.load(features_dir + "indep_pseaac1_ml.npz", allow_pickle=True)['data']
indep_feature_data = np.concatenate((indep_feature_data, indep_pseaac1_data), axis=1)
indep_pseaac2_data = np.load(features_dir + "indep_pseaac2_ml.npz", allow_pickle=True)['data']
indep_feature_data = np.concatenate((indep_feature_data, indep_pseaac2_data), axis=1)
else:
indep_feature_data = np.load(features_dir + "indep_" + args.feature + "_ml.npz", allow_pickle=True)['data']
indep_feature_label = np.load(features_dir + "indep_" + args.feature + "_ml.npz", allow_pickle=True)['labels']
indep_feature_label = map(int, indep_feature_label)
indep_feature_data = scaler.transform(indep_feature_data)
random_state = np.random.RandomState(0)
f_train = open(record_dir + sig + '_train.txt', 'a')
j = 0
cv_outer = StratifiedKFold(n_splits=5, shuffle=True, random_state=43)
for train, test in cv_outer.split(train_data, train_label):
print("Fold number: ", j)
f_train.write("Fold number: {}\n".format(j))
X_train, X_test = train_data[train], train_data[test]
Y_train, Y_test = np.array(train_label)[train], np.array(train_label)[test]
model = None
if args.classify == 'svc':
model = svm.SVC(random_state=random_state, decision_function_shape='ovr') # rbf
elif args.classify == "linearsvc":
model = LinearSVC(random_state=random_state)
elif args.classify == 'RF':
model = RandomForestClassifier(random_state=random_state, n_estimators=100)
elif args.classify == 'LR':
model = LogisticRegression(random_state=random_state)
elif args.classify == 'DT':
model = DecisionTreeClassifier(random_state=random_state)
model.fit(X_train, Y_train)
f_save_best_model_dir = record_dir + sig + '_bestmodel' + str(j + 1)
# pickle.dump(model, open(f_save_best_model_dir, 'ab')) # save model
train_acc = model.score(X_train, Y_train)
test_acc = model.score(X_test, Y_test)
y_pred = model.predict(X_test)
f_train.write('Best model, Training acc is {:.4f}\nval_acc is: {:.4f}\n'.format(train_acc, test_acc))
recall_value = metrics.recall_score(Y_test, y_pred, average='micro')
precision_value = metrics.precision_score(Y_test, y_pred, average='micro')
f1_score_value = metrics.f1_score(Y_test, y_pred, average='micro')
recall_value_2 = metrics.recall_score(Y_test, y_pred, average='macro')
precision_value_2 = metrics.precision_score(Y_test, y_pred, average='macro')
f1_score_value_2 = metrics.f1_score(Y_test, y_pred, average='macro')
f_train.write('Micro\nprecision is: {:.4f}\nRecall is: {:.4f}\nF1_score is: {:.4f}\n'.format(precision_value,
recall_value,
f1_score_value))
f_train.write('Macro\nprecision is: {:.4f}\nRecall is: {:.4f}\nF1_score is: {:.4f}\n'.format(precision_value_2,
recall_value_2,
f1_score_value_2))
# independent test
f_indep = open(record_dir + sig + '_indep_bestmodel' + str(j + 1) + '.txt', 'a')
indep_pred = model.predict(indep_feature_data)
indep_pred_labels = indep_pred.tolist()
indep_cm = confusion_matrix(indep_feature_label, indep_pred_labels)
indep_acc = metrics.accuracy_score(indep_feature_label, indep_pred_labels)
indep_cla_report = classification_report(indep_feature_label, indep_pred_labels, target_names=list(class_name))
f_indep.write('indep_acc is: {:.4f}\nClassfication report:\n{}\n'.format(indep_acc, indep_cla_report))
# f_indep.write('Test_acc is: {:.4f}\n'.format(indep_acc))
recall_value = metrics.recall_score(indep_feature_label, indep_pred_labels, average='micro')
precision_value = metrics.precision_score(indep_feature_label, indep_pred_labels, average='micro')
f1_score_value = metrics.f1_score(indep_feature_label, indep_pred_labels, average='micro')
recall_value_2 = metrics.recall_score(indep_feature_label, indep_pred_labels, average='macro')
precision_value_2 = metrics.precision_score(indep_feature_label, indep_pred_labels, average='macro')
f1_score_value_2 = metrics.f1_score(indep_feature_label, indep_pred_labels, average='macro')
f_indep.write(
'Micro\nprecision is: {:.4f}\nRecall is: {:.4f}\nF1_score is: {:.4f}\n'.format(precision_value, recall_value,
f1_score_value))
f_indep.write('Macro\nprecision is: {:.4f}\nRecall is: {:.4f}\nF1_score is: {:.4f}\n'.format(precision_value_2,
recall_value_2,
f1_score_value_2))
j += 1
f_indep.close()
del model
gc.collect()
f_train.close()
print("finish\n")
| [
"sklearn.metrics.precision_score",
"sklearn.model_selection.StratifiedKFold",
"sklearn.metrics.recall_score",
"numpy.array",
"numpy.random.RandomState",
"os.path.exists",
"argparse.ArgumentParser",
"sklearn.tree.DecisionTreeClassifier",
"numpy.concatenate",
"sklearn.preprocessing.MinMaxScaler",
... | [((485, 546), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""ml_features_classifier"""'}), "(description='ml_features_classifier')\n", (508, 546), False, 'import argparse\n'), ((2414, 2462), 'sklearn.preprocessing.MinMaxScaler', 'preprocessing.MinMaxScaler', ([], {'feature_range': '(0, 1)'}), '(feature_range=(0, 1))\n', (2440, 2462), False, 'from sklearn import preprocessing\n'), ((3849, 3873), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (3870, 3873), True, 'import numpy as np\n'), ((3945, 4003), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': '(5)', 'shuffle': '(True)', 'random_state': '(43)'}), '(n_splits=5, shuffle=True, random_state=43)\n', (3960, 4003), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((921, 932), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (930, 932), False, 'import os\n'), ((1248, 1274), 'os.path.exists', 'os.path.exists', (['record_dir'], {}), '(record_dir)\n', (1262, 1274), False, 'import os\n'), ((1280, 1303), 'os.makedirs', 'os.makedirs', (['record_dir'], {}), '(record_dir)\n', (1291, 1303), False, 'import os\n'), ((1611, 1657), 'numpy.concatenate', 'np.concatenate', (['(train_data, dpc_data)'], {'axis': '(1)'}), '((train_data, dpc_data), axis=1)\n', (1625, 1657), True, 'import numpy as np\n'), ((1761, 1807), 'numpy.concatenate', 'np.concatenate', (['(train_data, ctd_data)'], {'axis': '(1)'}), '((train_data, ctd_data), axis=1)\n', (1775, 1807), True, 'import numpy as np\n'), ((1919, 1969), 'numpy.concatenate', 'np.concatenate', (['(train_data, pseaac1_data)'], {'axis': '(1)'}), '((train_data, pseaac1_data), axis=1)\n', (1933, 1969), True, 'import numpy as np\n'), ((2081, 2131), 'numpy.concatenate', 'np.concatenate', (['(train_data, pseaac2_data)'], {'axis': '(1)'}), '((train_data, pseaac2_data), axis=1)\n', (2095, 2131), True, 'import numpy as np\n'), ((2872, 2932), 'numpy.concatenate', 'np.concatenate', (['(indep_feature_data, indep_dpc_data)'], {'axis': '(1)'}), '((indep_feature_data, indep_dpc_data), axis=1)\n', (2886, 2932), True, 'import numpy as np\n'), ((3049, 3109), 'numpy.concatenate', 'np.concatenate', (['(indep_feature_data, indep_ctd_data)'], {'axis': '(1)'}), '((indep_feature_data, indep_ctd_data), axis=1)\n', (3063, 3109), True, 'import numpy as np\n'), ((3234, 3298), 'numpy.concatenate', 'np.concatenate', (['(indep_feature_data, indep_pseaac1_data)'], {'axis': '(1)'}), '((indep_feature_data, indep_pseaac1_data), axis=1)\n', (3248, 3298), True, 'import numpy as np\n'), ((3423, 3487), 'numpy.concatenate', 'np.concatenate', (['(indep_feature_data, indep_pseaac2_data)'], {'axis': '(1)'}), '((indep_feature_data, indep_pseaac2_data), axis=1)\n', (3437, 3487), True, 'import numpy as np\n'), ((5251, 5304), 'sklearn.metrics.recall_score', 'metrics.recall_score', (['Y_test', 'y_pred'], {'average': '"""micro"""'}), "(Y_test, y_pred, average='micro')\n", (5271, 5304), False, 'from sklearn import metrics\n'), ((5327, 5383), 'sklearn.metrics.precision_score', 'metrics.precision_score', (['Y_test', 'y_pred'], {'average': '"""micro"""'}), "(Y_test, y_pred, average='micro')\n", (5350, 5383), False, 'from sklearn import metrics\n'), ((5405, 5454), 'sklearn.metrics.f1_score', 'metrics.f1_score', (['Y_test', 'y_pred'], {'average': '"""micro"""'}), "(Y_test, y_pred, average='micro')\n", (5421, 5454), False, 'from sklearn import metrics\n'), ((5476, 5529), 'sklearn.metrics.recall_score', 'metrics.recall_score', (['Y_test', 'y_pred'], {'average': '"""macro"""'}), "(Y_test, y_pred, average='macro')\n", (5496, 5529), False, 'from sklearn import metrics\n'), ((5554, 5610), 'sklearn.metrics.precision_score', 'metrics.precision_score', (['Y_test', 'y_pred'], {'average': '"""macro"""'}), "(Y_test, y_pred, average='macro')\n", (5577, 5610), False, 'from sklearn import metrics\n'), ((5634, 5683), 'sklearn.metrics.f1_score', 'metrics.f1_score', (['Y_test', 'y_pred'], {'average': '"""macro"""'}), "(Y_test, y_pred, average='macro')\n", (5650, 5683), False, 'from sklearn import metrics\n'), ((6536, 6592), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['indep_feature_label', 'indep_pred_labels'], {}), '(indep_feature_label, indep_pred_labels)\n', (6552, 6592), False, 'from sklearn.metrics import confusion_matrix\n'), ((6609, 6671), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['indep_feature_label', 'indep_pred_labels'], {}), '(indep_feature_label, indep_pred_labels)\n', (6631, 6671), False, 'from sklearn import metrics\n'), ((6977, 7054), 'sklearn.metrics.recall_score', 'metrics.recall_score', (['indep_feature_label', 'indep_pred_labels'], {'average': '"""micro"""'}), "(indep_feature_label, indep_pred_labels, average='micro')\n", (6997, 7054), False, 'from sklearn import metrics\n'), ((7077, 7162), 'sklearn.metrics.precision_score', 'metrics.precision_score', (['indep_feature_label', 'indep_pred_labels'], {'average': '"""micro"""'}), "(indep_feature_label, indep_pred_labels, average='micro'\n )\n", (7100, 7162), False, 'from sklearn import metrics\n'), ((7179, 7252), 'sklearn.metrics.f1_score', 'metrics.f1_score', (['indep_feature_label', 'indep_pred_labels'], {'average': '"""micro"""'}), "(indep_feature_label, indep_pred_labels, average='micro')\n", (7195, 7252), False, 'from sklearn import metrics\n'), ((7274, 7351), 'sklearn.metrics.recall_score', 'metrics.recall_score', (['indep_feature_label', 'indep_pred_labels'], {'average': '"""macro"""'}), "(indep_feature_label, indep_pred_labels, average='macro')\n", (7294, 7351), False, 'from sklearn import metrics\n'), ((7376, 7461), 'sklearn.metrics.precision_score', 'metrics.precision_score', (['indep_feature_label', 'indep_pred_labels'], {'average': '"""macro"""'}), "(indep_feature_label, indep_pred_labels, average='macro'\n )\n", (7399, 7461), False, 'from sklearn import metrics\n'), ((7480, 7553), 'sklearn.metrics.f1_score', 'metrics.f1_score', (['indep_feature_label', 'indep_pred_labels'], {'average': '"""macro"""'}), "(indep_feature_label, indep_pred_labels, average='macro')\n", (7496, 7553), False, 'from sklearn import metrics\n'), ((8189, 8201), 'gc.collect', 'gc.collect', ([], {}), '()\n', (8199, 8201), False, 'import gc\n'), ((1348, 1409), 'numpy.load', 'np.load', (["(features_dir + 'train_aac_ml.npz')"], {'allow_pickle': '(True)'}), "(features_dir + 'train_aac_ml.npz', allow_pickle=True)\n", (1355, 1409), True, 'import numpy as np\n'), ((1436, 1497), 'numpy.load', 'np.load', (["(features_dir + 'train_aac_ml.npz')"], {'allow_pickle': '(True)'}), "(features_dir + 'train_aac_ml.npz', allow_pickle=True)\n", (1443, 1497), True, 'import numpy as np\n'), ((1524, 1585), 'numpy.load', 'np.load', (["(features_dir + 'train_dpc_ml.npz')"], {'allow_pickle': '(True)'}), "(features_dir + 'train_dpc_ml.npz', allow_pickle=True)\n", (1531, 1585), True, 'import numpy as np\n'), ((1674, 1735), 'numpy.load', 'np.load', (["(features_dir + 'train_ctd_ml.npz')"], {'allow_pickle': '(True)'}), "(features_dir + 'train_ctd_ml.npz', allow_pickle=True)\n", (1681, 1735), True, 'import numpy as np\n'), ((1828, 1893), 'numpy.load', 'np.load', (["(features_dir + 'train_pseaac1_ml.npz')"], {'allow_pickle': '(True)'}), "(features_dir + 'train_pseaac1_ml.npz', allow_pickle=True)\n", (1835, 1893), True, 'import numpy as np\n'), ((1990, 2055), 'numpy.load', 'np.load', (["(features_dir + 'train_pseaac2_ml.npz')"], {'allow_pickle': '(True)'}), "(features_dir + 'train_pseaac2_ml.npz', allow_pickle=True)\n", (1997, 2055), True, 'import numpy as np\n'), ((2174, 2252), 'numpy.load', 'np.load', (["(features_dir + 'train_' + args.feature + '_ml.npz')"], {'allow_pickle': '(True)'}), "(features_dir + 'train_' + args.feature + '_ml.npz', allow_pickle=True)\n", (2181, 2252), True, 'import numpy as np\n'), ((2279, 2357), 'numpy.load', 'np.load', (["(features_dir + 'train_' + args.feature + '_ml.npz')"], {'allow_pickle': '(True)'}), "(features_dir + 'train_' + args.feature + '_ml.npz', allow_pickle=True)\n", (2286, 2357), True, 'import numpy as np\n'), ((2588, 2649), 'numpy.load', 'np.load', (["(features_dir + 'indep_aac_ml.npz')"], {'allow_pickle': '(True)'}), "(features_dir + 'indep_aac_ml.npz', allow_pickle=True)\n", (2595, 2649), True, 'import numpy as np\n'), ((2684, 2745), 'numpy.load', 'np.load', (["(features_dir + 'indep_aac_ml.npz')"], {'allow_pickle': '(True)'}), "(features_dir + 'indep_aac_ml.npz', allow_pickle=True)\n", (2691, 2745), True, 'import numpy as np\n'), ((2777, 2838), 'numpy.load', 'np.load', (["(features_dir + 'indep_dpc_ml.npz')"], {'allow_pickle': '(True)'}), "(features_dir + 'indep_dpc_ml.npz', allow_pickle=True)\n", (2784, 2838), True, 'import numpy as np\n'), ((2954, 3015), 'numpy.load', 'np.load', (["(features_dir + 'indep_ctd_ml.npz')"], {'allow_pickle': '(True)'}), "(features_dir + 'indep_ctd_ml.npz', allow_pickle=True)\n", (2961, 3015), True, 'import numpy as np\n'), ((3135, 3200), 'numpy.load', 'np.load', (["(features_dir + 'indep_pseaac1_ml.npz')"], {'allow_pickle': '(True)'}), "(features_dir + 'indep_pseaac1_ml.npz', allow_pickle=True)\n", (3142, 3200), True, 'import numpy as np\n'), ((3324, 3389), 'numpy.load', 'np.load', (["(features_dir + 'indep_pseaac2_ml.npz')"], {'allow_pickle': '(True)'}), "(features_dir + 'indep_pseaac2_ml.npz', allow_pickle=True)\n", (3331, 3389), True, 'import numpy as np\n'), ((3520, 3598), 'numpy.load', 'np.load', (["(features_dir + 'indep_' + args.feature + '_ml.npz')"], {'allow_pickle': '(True)'}), "(features_dir + 'indep_' + args.feature + '_ml.npz', allow_pickle=True)\n", (3527, 3598), True, 'import numpy as np\n'), ((3633, 3711), 'numpy.load', 'np.load', (["(features_dir + 'indep_' + args.feature + '_ml.npz')"], {'allow_pickle': '(True)'}), "(features_dir + 'indep_' + args.feature + '_ml.npz', allow_pickle=True)\n", (3640, 3711), True, 'import numpy as np\n'), ((4223, 4244), 'numpy.array', 'np.array', (['train_label'], {}), '(train_label)\n', (4231, 4244), True, 'import numpy as np\n'), ((4253, 4274), 'numpy.array', 'np.array', (['train_label'], {}), '(train_label)\n', (4261, 4274), True, 'import numpy as np\n'), ((4475, 4511), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'random_state': 'random_state'}), '(random_state=random_state)\n', (4484, 4511), False, 'from sklearn.svm import SVC, LinearSVC\n'), ((4560, 4627), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': 'random_state', 'n_estimators': '(100)'}), '(random_state=random_state, n_estimators=100)\n', (4582, 4627), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((4676, 4721), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': 'random_state'}), '(random_state=random_state)\n', (4694, 4721), False, 'from sklearn.linear_model import LogisticRegression\n'), ((4770, 4819), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'random_state': 'random_state'}), '(random_state=random_state)\n', (4792, 4819), False, 'from sklearn.tree import DecisionTreeClassifier\n')] |
import numpy as np
import sympy as sp
from scipy.misc import derivative
from prettytable import PrettyTable
import math
from math import *
def nuevosValoresa(ecua, derivadas, Ecuaciones, variables,var):
valor_ini = []
func_numerica = []
derv_numerica = []
funcs = vars(math)
for i in range(0, Ecuaciones):
funcion = ecua[i]
funcion_eval = eval(funcion, funcs, variables)
derivada = derivadas[i]
derivada_eval = eval(derivada, funcs, variables)
nuevo_valor = variables[f'{var[i]}']-funcion_eval/derivada_eval
variables[f'{var[i]}'] = nuevo_valor
valor_ini.append(nuevo_valor)
func_numerica.append(funcion_eval)
derv_numerica.append(derivada_eval)
# print(funcion_eval)
# print(derivada_eval)
# print(variables[f'{var[i]}'])
return valor_ini, func_numerica # ,derivada_eval
def cambiarValores(dic_var, nuevos_valores, num_ecuaciones, var):
for i in range(0, num_ecuaciones):
dic_var[f'{var[i]}'] = nuevos_valores[i]
return dic_var
def derivadaSimple(ecuaciones, variables, num_ecuaciones):
derivada_parcial = []
for i in range(0, num_ecuaciones):
var = sp.Symbol(variables[i])
df_dvar = sp.Derivative(ecuaciones[i], var, evaluate=True)
derivada_parcial.append(str(df_dvar))
return derivada_parcial
def efunciones(var, valor, Ecuaciones):
var_valor = {}
for i in range(0, Ecuaciones):
variable = var[i]
anadir = {f'{variable}': valor[i]}
var_valor.update(anadir)
return var_valor
def newtonModificado(ecua, var, valor, Ecuaciones, emax, N=50):
# constructor de la tabla
encabezados = []
contenido = []
encabezados.append("Iteracion")
for i in var:
encabezados.append(f'f{var.index(i)}=0')
#encabezados.append(f'f')
#print(var.index(i))
encabezados.append(i)
encabezados.append(f"Error")
tabla = PrettyTable(encabezados)
tabla.title = "METODO DE NEWTON RHAPSON MULTIVARIABLE MODIFICADO"
# Valores iniciales
dicc_valores = efunciones(var, valor, Ecuaciones)
# derivadas de las ecuaciones
derv_parciales = derivadaSimple(ecua, var, Ecuaciones)
for k in range(1, N):
variables = cambiarValores(dicc_valores, valor, Ecuaciones, var)
derivadas_numericas = []
nuevos_Valores, funcion_evaluada = nuevosValoresa(
ecua, derv_parciales, Ecuaciones, variables,var)
# error
ea = abs(max(funcion_evaluada))
eb = abs(min(funcion_evaluada))
if ea > eb:
error = ea
elif eb >= ea:
error = eb
# Verificar error
if error < emax or error == 0:
break
# anadir a la tabla
contenido = []
contenido.append(k)
for i in range(0, Ecuaciones):
contenido.append("{0:.7f}".format(funcion_evaluada[i]))
contenido.append("{0:.7f}".format(nuevos_Valores[i]))
contenido.append("{0:.7f}".format(error))
tabla.add_row(contenido)
valor = nuevos_Valores
# Constructor de la tabla de las derivadas parciales
u = np.array(derv_parciales).T
derivadas_p = PrettyTable()
derivadas_p.title = "Derivadas parciales"
der_res = []
for j in range(0, Ecuaciones):
der_res.append(f'df{j}/d{var[j]}')
derivadas_p.add_row(u)
derivadas_p.field_names = der_res
print(derivadas_p)
# print(f'{u}')
print(tabla)
print(f'Solucion del sistema: ')
for i in range(0, Ecuaciones):
print(f' {var[i]} = {"{0:.4f}".format(valor[i])}')
""" ecua = ['x**2-10*x+y**2+8', 'x*y**2+x-10*y+8']
var = ['x', 'y']
valori = [0.0, 0.0]
Ecuaciones = 2 """
""" ecua = ['x**2+x-y**2-1', 'y-sin(x**2)']
var = ['x', 'y']
valori = [0.0, 0.0]
Ecuaciones = 2 """
""" ecua = ['x**2+y**2+z**2-9', 'x*y*z-1', 'x+y-z**2']
var = ['x', 'y', 'z']
valori = [2.5, 0.2, 1.6]
Ecuaciones = 3 """
""" #no converge
ecua = ['x**2-625*y**2', '3*x-cos(y*z)-0.5', 'exp(-x*y)+20*z+(10*pi-3)/3']
var = ['x', 'y', 'z']
valori = [1, 1, 1]
Ecuaciones = 3
"""
""" ecua = ['3*x-cos(y*z)-0.5', 'x**2-625*y**2', 'exp(-x*y)+20*z+(10*pi-3)/3']
var = ['x', 'y', 'z']
valori = [1, 0.2, 1]
Ecuaciones = 3
newtonModificado(ecua, var, valori, Ecuaciones, 1e-3) """
| [
"prettytable.PrettyTable",
"numpy.array",
"sympy.Symbol",
"sympy.Derivative"
] | [((1972, 1996), 'prettytable.PrettyTable', 'PrettyTable', (['encabezados'], {}), '(encabezados)\n', (1983, 1996), False, 'from prettytable import PrettyTable\n'), ((3236, 3249), 'prettytable.PrettyTable', 'PrettyTable', ([], {}), '()\n', (3247, 3249), False, 'from prettytable import PrettyTable\n'), ((1211, 1234), 'sympy.Symbol', 'sp.Symbol', (['variables[i]'], {}), '(variables[i])\n', (1220, 1234), True, 'import sympy as sp\n'), ((1253, 1301), 'sympy.Derivative', 'sp.Derivative', (['ecuaciones[i]', 'var'], {'evaluate': '(True)'}), '(ecuaciones[i], var, evaluate=True)\n', (1266, 1301), True, 'import sympy as sp\n'), ((3191, 3215), 'numpy.array', 'np.array', (['derv_parciales'], {}), '(derv_parciales)\n', (3199, 3215), True, 'import numpy as np\n')] |
from __future__ import print_function, absolute_import
import argparse
import os.path as osp
import random
import numpy as np
import sys
import torch.nn.functional as F
from hdbscan import HDBSCAN
from sklearn.cluster import KMeans, DBSCAN
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import normalize
import time
import torch
from torch import nn
from torch.backends import cudnn
from torch.utils.data import DataLoader
# from scipy.special import softmax
from abmt import datasets
from abmt import models
from abmt.trainers import ABMTTrainer
from abmt.evaluators import Evaluator, extract_features
from abmt.utils.data import IterLoader
from abmt.utils.data import transforms as T
from abmt.utils.data.sampler import RandomMultipleGallerySampler
from abmt.utils.data.preprocessor import Preprocessor
from abmt.utils.logging import Logger
from abmt.utils.serialization import load_checkpoint, save_checkpoint, copy_state_dict
from abmt.utils.rerank import compute_jaccard_dist
start_epoch = best_mAP = 0
def get_data(name, data_dir):
root = osp.join(data_dir, name)
dataset = datasets.create(name, root)
return dataset
def get_train_loader(dataset, height, width, batch_size, workers,
num_instances, iters, mutual=False):
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_transformer = T.Compose([
T.Resize((height, width), interpolation=3),
T.RandomHorizontalFlip(p=0.5),
T.Pad(10),
T.RandomCrop((height, width)),
T.ToTensor(),
normalizer,
T.RandomErasing(probability=0.5, mean=[0.485, 0.456, 0.406])
])
# print(dataset)
train_set = dataset.train
rmgs_flag = num_instances > 0
if rmgs_flag:
sampler = RandomMultipleGallerySampler(train_set, num_instances)
else:
sampler = None
train_loader = IterLoader(
DataLoader(Preprocessor(train_set, root=dataset.images_dir,
transform=train_transformer, mutual=mutual),
batch_size=batch_size, num_workers=workers, sampler=sampler,
shuffle=not rmgs_flag, pin_memory=True, drop_last=True), length=iters)
return train_loader
def get_test_loader(dataset, height, width, batch_size, workers, testset=None):
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
test_transformer = T.Compose([
T.Resize((height, width), interpolation=3),
T.ToTensor(),
normalizer
])
if (testset is None):
testset = list(set(dataset.query) | set(dataset.gallery))
test_loader = DataLoader(
Preprocessor(testset, root=dataset.images_dir, transform=test_transformer),
batch_size=batch_size, num_workers=workers,
shuffle=False, pin_memory=True)
return test_loader
def create_model(args):
model_1 = models.create(args.arch, num_features=args.features, dropout=args.dropout, num_classes=1)
model_1_ema = models.create(args.arch, num_features=args.features, dropout=args.dropout, num_classes=1)
model_1.cuda()
model_1_ema.cuda()
model_1 = nn.DataParallel(model_1)
model_1_ema = nn.DataParallel(model_1_ema)
if args.no_source:
print('No source pre-training')
else:
initial_weights = load_checkpoint(args.init_1)
copy_state_dict(initial_weights['state_dict'], model_1)
copy_state_dict(initial_weights['state_dict'], model_1_ema)
for param in model_1_ema.parameters():
param.detach_()
return model_1, model_1_ema
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
main_worker(args)
def main_worker(args):
global start_epoch, best_mAP
cudnn.benchmark = True
sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))
print("==========\nArgs:{}\n==========".format(args))
# Create data loaders
iters = args.iters if (args.iters>0) else None
dataset_target = get_data(args.dataset_target, args.data_dir)
ori_train = dataset_target.train
if not args.no_source:
dataset_source = get_data(args.dataset_source, args.data_dir)
test_loader_target = get_test_loader(dataset_target, args.height, args.width, args.batch_size, args.workers)
# Create model
model_1, model_1_ema = create_model(args)
# Evaluator
evaluator_1_ema = Evaluator(model_1_ema)
best_mAP = 0
for nc in range(args.epochs):
cluster_loader = get_test_loader(dataset_target, args.height, args.width, args.batch_size, args.workers,
testset=dataset_target.train)
dict_f, _ = extract_features(model_1_ema, cluster_loader, print_freq=50)
cf_1 = torch.stack(list(dict_f.values()))
# DBSCAN cluster
if args.no_source:
rerank_dist = compute_jaccard_dist(cf_1, lambda_value=0, source_features=None,
use_gpu=False).numpy()
else:
cluster_loader_source = get_test_loader(dataset_source, args.height, args.width, args.batch_size,
args.workers, testset=dataset_source.train)
dict_f_source, _ = extract_features(model_1_ema, cluster_loader_source, print_freq=50)
cf_1_source = torch.stack(list(dict_f_source.values()))
rerank_dist = compute_jaccard_dist(cf_1, lambda_value=args.lambda_value, source_features=cf_1_source,
use_gpu=False).numpy()
del cf_1_source
tri_mat = np.triu(rerank_dist, 1) # tri_mat.dim=2
tri_mat = tri_mat[np.nonzero(tri_mat)] # tri_mat.dim=1
tri_mat = np.sort(tri_mat, axis=None)
top_num = np.round(args.rho * tri_mat.size).astype(int)
eps = tri_mat[:top_num].mean()
print('eps in cluster: {:.3f}'.format(eps))
print('Clustering and labeling...')
cluster = DBSCAN(eps=eps, min_samples=4, metric='precomputed', n_jobs=-1)
labels = cluster.fit_predict(rerank_dist)
num_ids = len(set(labels)) -1
print('Epoch {} have {} training ids'.format(nc, num_ids))
# generate new dataset
labeled_ind, unlabeled_ind = [], []
for ind, label in enumerate(labels):
if label == -1:
unlabeled_ind.append(ind)
else:
labeled_ind.append(ind)
# print('Epoch {} have {} labeled samples and {} unlabeled samples'.format(nc + 1, len(labeled_ind), len(unlabeled_ind)))
cf_1 = cf_1.numpy()
centers = []
for id in range(num_ids):
centers.append(np.mean(cf_1[labels == id], axis=0))
centers = np.stack(centers, axis=0)
del cf_1, rerank_dist
model_1.module.classifier = nn.Linear(2048, num_ids, bias=False).cuda()
model_1_ema.module.classifier = nn.Linear(2048, num_ids, bias=False).cuda()
model_1.module.classifier_max = nn.Linear(2048, num_ids, bias=False).cuda()
model_1_ema.module.classifier_max = nn.Linear(2048, num_ids, bias=False).cuda()
model_1.module.classifier.weight.data.copy_(
torch.from_numpy(normalize(centers[:, :2048], axis=1)).float().cuda())
model_1_ema.module.classifier.weight.data.copy_(
torch.from_numpy(normalize(centers[:, :2048], axis=1)).float().cuda())
model_1.module.classifier_max.weight.data.copy_(
torch.from_numpy(normalize(centers[:, 2048:], axis=1)).float().cuda())
model_1_ema.module.classifier_max.weight.data.copy_(
torch.from_numpy(normalize(centers[:, 2048:], axis=1)).float().cuda())
del centers
target_label = labels
for i in range(len(dataset_target.train)):
dataset_target.train[i] = list(dataset_target.train[i])
dataset_target.train[i][1] = int(target_label[i])
dataset_target.train[i] = tuple(dataset_target.train[i])
# Optimizer
params = []
for key, value in model_1.named_parameters():
if not value.requires_grad:
continue
params += [{"params": [value], "lr": args.lr, "weight_decay": args.weight_decay}]
optimizer = torch.optim.Adam(params)
# Trainer
trainer = ABMTTrainer(model_1, model_1_ema, num_cluster=num_ids, alpha=args.alpha)
epoch = nc
# # DBSCAN
dataset_target.train = [ori_train[i] for i in labeled_ind]
print(len(dataset_target.train), 'are labeled.')
labeled_loader_target = get_train_loader(dataset_target, args.height, args.width,
args.batch_size, args.workers, args.num_instances, iters, mutual=True)
labeled_loader_target.new_epoch()
trainer.train(epoch, labeled_loader_target, optimizer,
print_freq=args.print_freq, train_iters=len(labeled_loader_target))
def save_model(model_ema, is_best, best_mAP, mid, num_ids):
save_checkpoint({
'state_dict': model_ema.state_dict(),
'epoch': epoch + 1,
'best_mAP': best_mAP,
'num_ids': num_ids
}, is_best, fpath=osp.join(args.logs_dir, 'model'+str(mid)+'_checkpoint.pth.tar'))
if ((epoch+1)%args.eval_step==0 or (epoch==args.epochs-1)):
print('Evaluating teacher net:')
cmc, mAP_1 = evaluator_1_ema.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery, cmc_flag=True)
is_best = (mAP_1>best_mAP)
best_mAP = max(mAP_1, best_mAP)
save_model(model_1_ema, is_best, best_mAP, 1, num_ids)
dataset_target.train = ori_train
print ('Test on the best model.')
checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))
model_best = models.create(args.arch, num_features=args.features, dropout=args.dropout, num_classes=checkpoint['num_ids'])
model_best.cuda()
model_best = nn.DataParallel(model_best)
evaluator_best = Evaluator(model_best)
model_best.load_state_dict(checkpoint['state_dict'])
evaluator_best.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery, cmc_flag=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="ABMT Training")
# data
parser.add_argument('-dt', '--dataset-target', type=str, default='market1501',
choices=datasets.names())
parser.add_argument('-ds', '--dataset-source', type=str, default='dukemtmc-reid',
choices=datasets.names())
parser.add_argument('-b', '--batch-size', type=int, default=64)
parser.add_argument('-j', '--workers', type=int, default=4)
parser.add_argument('--height', type=int, default=256,
help="input height")
parser.add_argument('--width', type=int, default=128,
help="input width")
parser.add_argument('--num-instances', type=int, default=4,
help="each minibatch consist of "
"(batch_size // num_instances) identities, and "
"each identity has num_instances instances, "
"default: 0 (NOT USE)")
# model
parser.add_argument('-a', '--arch', type=str, default='resnet50',
choices=models.names())
parser.add_argument('--features', type=int, default=0)
parser.add_argument('--dropout', type=float, default=0)
# optimizer
parser.add_argument('--lr', type=float, default=0.00035,
help="learning rate of new parameters, for pretrained "
"parameters it is 10 times smaller than this")
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--alpha', type=float, default=0.999)
parser.add_argument('--moving-avg-momentum', type=float, default=0.9)
parser.add_argument('--weight-decay', type=float, default=5e-4)
parser.add_argument('--epochs', type=int, default=40)
parser.add_argument('--iters', type=int, default=800)
# training configs
parser.add_argument('--no-source', action='store_true')
parser.add_argument('--init-1', type=str, default='', metavar='PATH')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--print-freq', type=int, default=100)
parser.add_argument('--eval-step', type=int, default=1)
# path
working_dir = osp.dirname(osp.abspath(__file__))
parser.add_argument('--data-dir', type=str, metavar='PATH',
default=osp.join(working_dir, 'data'))
parser.add_argument('--logs-dir', type=str, metavar='PATH',
default=osp.join(working_dir, 'logs'))
# cluster
parser.add_argument('--lambda_value', type=float, default=0.1,
help="balancing parameter, default: 0.1")
parser.add_argument('--rho', type=float, default=2e-3,
help="rho percentage, default: 2e-3")
main()
| [
"abmt.datasets.names",
"abmt.utils.data.sampler.RandomMultipleGallerySampler",
"abmt.utils.serialization.copy_state_dict",
"abmt.trainers.ABMTTrainer",
"sklearn.cluster.DBSCAN",
"numpy.mean",
"abmt.models.create",
"argparse.ArgumentParser",
"abmt.utils.data.transforms.RandomHorizontalFlip",
"numpy... | [((1086, 1110), 'os.path.join', 'osp.join', (['data_dir', 'name'], {}), '(data_dir, name)\n', (1094, 1110), True, 'import os.path as osp\n'), ((1125, 1152), 'abmt.datasets.create', 'datasets.create', (['name', 'root'], {}), '(name, root)\n', (1140, 1152), False, 'from abmt import datasets\n'), ((1314, 1380), 'abmt.utils.data.transforms.Normalize', 'T.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (1325, 1380), True, 'from abmt.utils.data import transforms as T\n'), ((2463, 2529), 'abmt.utils.data.transforms.Normalize', 'T.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (2474, 2529), True, 'from abmt.utils.data import transforms as T\n'), ((3078, 3171), 'abmt.models.create', 'models.create', (['args.arch'], {'num_features': 'args.features', 'dropout': 'args.dropout', 'num_classes': '(1)'}), '(args.arch, num_features=args.features, dropout=args.dropout,\n num_classes=1)\n', (3091, 3171), False, 'from abmt import models\n'), ((3187, 3280), 'abmt.models.create', 'models.create', (['args.arch'], {'num_features': 'args.features', 'dropout': 'args.dropout', 'num_classes': '(1)'}), '(args.arch, num_features=args.features, dropout=args.dropout,\n num_classes=1)\n', (3200, 3280), False, 'from abmt import models\n'), ((3334, 3358), 'torch.nn.DataParallel', 'nn.DataParallel', (['model_1'], {}), '(model_1)\n', (3349, 3358), False, 'from torch import nn\n'), ((3377, 3405), 'torch.nn.DataParallel', 'nn.DataParallel', (['model_1_ema'], {}), '(model_1_ema)\n', (3392, 3405), False, 'from torch import nn\n'), ((4706, 4728), 'abmt.evaluators.Evaluator', 'Evaluator', (['model_1_ema'], {}), '(model_1_ema)\n', (4715, 4728), False, 'from abmt.evaluators import Evaluator, extract_features\n'), ((10227, 10340), 'abmt.models.create', 'models.create', (['args.arch'], {'num_features': 'args.features', 'dropout': 'args.dropout', 'num_classes': "checkpoint['num_ids']"}), "(args.arch, num_features=args.features, dropout=args.dropout,\n num_classes=checkpoint['num_ids'])\n", (10240, 10340), False, 'from abmt import models\n'), ((10376, 10403), 'torch.nn.DataParallel', 'nn.DataParallel', (['model_best'], {}), '(model_best)\n', (10391, 10403), False, 'from torch import nn\n'), ((10425, 10446), 'abmt.evaluators.Evaluator', 'Evaluator', (['model_best'], {}), '(model_best)\n', (10434, 10446), False, 'from abmt.evaluators import Evaluator, extract_features\n'), ((10654, 10706), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""ABMT Training"""'}), "(description='ABMT Training')\n", (10677, 10706), False, 'import argparse\n'), ((1872, 1926), 'abmt.utils.data.sampler.RandomMultipleGallerySampler', 'RandomMultipleGallerySampler', (['train_set', 'num_instances'], {}), '(train_set, num_instances)\n', (1900, 1926), False, 'from abmt.utils.data.sampler import RandomMultipleGallerySampler\n'), ((2847, 2921), 'abmt.utils.data.preprocessor.Preprocessor', 'Preprocessor', (['testset'], {'root': 'dataset.images_dir', 'transform': 'test_transformer'}), '(testset, root=dataset.images_dir, transform=test_transformer)\n', (2859, 2921), False, 'from abmt.utils.data.preprocessor import Preprocessor\n'), ((3506, 3534), 'abmt.utils.serialization.load_checkpoint', 'load_checkpoint', (['args.init_1'], {}), '(args.init_1)\n', (3521, 3534), False, 'from abmt.utils.serialization import load_checkpoint, save_checkpoint, copy_state_dict\n'), ((3543, 3598), 'abmt.utils.serialization.copy_state_dict', 'copy_state_dict', (["initial_weights['state_dict']", 'model_1'], {}), "(initial_weights['state_dict'], model_1)\n", (3558, 3598), False, 'from abmt.utils.serialization import load_checkpoint, save_checkpoint, copy_state_dict\n'), ((3607, 3666), 'abmt.utils.serialization.copy_state_dict', 'copy_state_dict', (["initial_weights['state_dict']", 'model_1_ema'], {}), "(initial_weights['state_dict'], model_1_ema)\n", (3622, 3666), False, 'from abmt.utils.serialization import load_checkpoint, save_checkpoint, copy_state_dict\n'), ((3853, 3875), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (3864, 3875), False, 'import random\n'), ((3884, 3909), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (3898, 3909), True, 'import numpy as np\n'), ((3918, 3946), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (3935, 3946), False, 'import torch\n'), ((4116, 4150), 'os.path.join', 'osp.join', (['args.logs_dir', '"""log.txt"""'], {}), "(args.logs_dir, 'log.txt')\n", (4124, 4150), True, 'import os.path as osp\n'), ((4987, 5047), 'abmt.evaluators.extract_features', 'extract_features', (['model_1_ema', 'cluster_loader'], {'print_freq': '(50)'}), '(model_1_ema, cluster_loader, print_freq=50)\n', (5003, 5047), False, 'from abmt.evaluators import Evaluator, extract_features\n'), ((5929, 5952), 'numpy.triu', 'np.triu', (['rerank_dist', '(1)'], {}), '(rerank_dist, 1)\n', (5936, 5952), True, 'import numpy as np\n'), ((6052, 6079), 'numpy.sort', 'np.sort', (['tri_mat'], {'axis': 'None'}), '(tri_mat, axis=None)\n', (6059, 6079), True, 'import numpy as np\n'), ((6297, 6360), 'sklearn.cluster.DBSCAN', 'DBSCAN', ([], {'eps': 'eps', 'min_samples': '(4)', 'metric': '"""precomputed"""', 'n_jobs': '(-1)'}), "(eps=eps, min_samples=4, metric='precomputed', n_jobs=-1)\n", (6303, 6360), False, 'from sklearn.cluster import KMeans, DBSCAN\n'), ((7061, 7086), 'numpy.stack', 'np.stack', (['centers'], {'axis': '(0)'}), '(centers, axis=0)\n', (7069, 7086), True, 'import numpy as np\n'), ((8595, 8619), 'torch.optim.Adam', 'torch.optim.Adam', (['params'], {}), '(params)\n', (8611, 8619), False, 'import torch\n'), ((8657, 8729), 'abmt.trainers.ABMTTrainer', 'ABMTTrainer', (['model_1', 'model_1_ema'], {'num_cluster': 'num_ids', 'alpha': 'args.alpha'}), '(model_1, model_1_ema, num_cluster=num_ids, alpha=args.alpha)\n', (8668, 8729), False, 'from abmt.trainers import ABMTTrainer\n'), ((10163, 10208), 'os.path.join', 'osp.join', (['args.logs_dir', '"""model_best.pth.tar"""'], {}), "(args.logs_dir, 'model_best.pth.tar')\n", (10171, 10208), True, 'import os.path as osp\n'), ((12894, 12915), 'os.path.abspath', 'osp.abspath', (['__file__'], {}), '(__file__)\n', (12905, 12915), True, 'import os.path as osp\n'), ((1459, 1501), 'abmt.utils.data.transforms.Resize', 'T.Resize', (['(height, width)'], {'interpolation': '(3)'}), '((height, width), interpolation=3)\n', (1467, 1501), True, 'from abmt.utils.data import transforms as T\n'), ((1516, 1545), 'abmt.utils.data.transforms.RandomHorizontalFlip', 'T.RandomHorizontalFlip', ([], {'p': '(0.5)'}), '(p=0.5)\n', (1538, 1545), True, 'from abmt.utils.data import transforms as T\n'), ((1560, 1569), 'abmt.utils.data.transforms.Pad', 'T.Pad', (['(10)'], {}), '(10)\n', (1565, 1569), True, 'from abmt.utils.data import transforms as T\n'), ((1584, 1613), 'abmt.utils.data.transforms.RandomCrop', 'T.RandomCrop', (['(height, width)'], {}), '((height, width))\n', (1596, 1613), True, 'from abmt.utils.data import transforms as T\n'), ((1628, 1640), 'abmt.utils.data.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (1638, 1640), True, 'from abmt.utils.data import transforms as T\n'), ((1677, 1737), 'abmt.utils.data.transforms.RandomErasing', 'T.RandomErasing', ([], {'probability': '(0.5)', 'mean': '[0.485, 0.456, 0.406]'}), '(probability=0.5, mean=[0.485, 0.456, 0.406])\n', (1692, 1737), True, 'from abmt.utils.data import transforms as T\n'), ((2018, 2115), 'abmt.utils.data.preprocessor.Preprocessor', 'Preprocessor', (['train_set'], {'root': 'dataset.images_dir', 'transform': 'train_transformer', 'mutual': 'mutual'}), '(train_set, root=dataset.images_dir, transform=\n train_transformer, mutual=mutual)\n', (2030, 2115), False, 'from abmt.utils.data.preprocessor import Preprocessor\n'), ((2608, 2650), 'abmt.utils.data.transforms.Resize', 'T.Resize', (['(height, width)'], {'interpolation': '(3)'}), '((height, width), interpolation=3)\n', (2616, 2650), True, 'from abmt.utils.data import transforms as T\n'), ((2665, 2677), 'abmt.utils.data.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (2675, 2677), True, 'from abmt.utils.data import transforms as T\n'), ((5563, 5630), 'abmt.evaluators.extract_features', 'extract_features', (['model_1_ema', 'cluster_loader_source'], {'print_freq': '(50)'}), '(model_1_ema, cluster_loader_source, print_freq=50)\n', (5579, 5630), False, 'from abmt.evaluators import Evaluator, extract_features\n'), ((5996, 6015), 'numpy.nonzero', 'np.nonzero', (['tri_mat'], {}), '(tri_mat)\n', (6006, 6015), True, 'import numpy as np\n'), ((10833, 10849), 'abmt.datasets.names', 'datasets.names', ([], {}), '()\n', (10847, 10849), False, 'from abmt import datasets\n'), ((10969, 10985), 'abmt.datasets.names', 'datasets.names', ([], {}), '()\n', (10983, 10985), False, 'from abmt import datasets\n'), ((11767, 11781), 'abmt.models.names', 'models.names', ([], {}), '()\n', (11779, 11781), False, 'from abmt import models\n'), ((13013, 13042), 'os.path.join', 'osp.join', (['working_dir', '"""data"""'], {}), "(working_dir, 'data')\n", (13021, 13042), True, 'import os.path as osp\n'), ((13140, 13169), 'os.path.join', 'osp.join', (['working_dir', '"""logs"""'], {}), "(working_dir, 'logs')\n", (13148, 13169), True, 'import os.path as osp\n'), ((6098, 6131), 'numpy.round', 'np.round', (['(args.rho * tri_mat.size)'], {}), '(args.rho * tri_mat.size)\n', (6106, 6131), True, 'import numpy as np\n'), ((7006, 7041), 'numpy.mean', 'np.mean', (['cf_1[labels == id]'], {'axis': '(0)'}), '(cf_1[labels == id], axis=0)\n', (7013, 7041), True, 'import numpy as np\n'), ((7155, 7191), 'torch.nn.Linear', 'nn.Linear', (['(2048)', 'num_ids'], {'bias': '(False)'}), '(2048, num_ids, bias=False)\n', (7164, 7191), False, 'from torch import nn\n'), ((7239, 7275), 'torch.nn.Linear', 'nn.Linear', (['(2048)', 'num_ids'], {'bias': '(False)'}), '(2048, num_ids, bias=False)\n', (7248, 7275), False, 'from torch import nn\n'), ((7323, 7359), 'torch.nn.Linear', 'nn.Linear', (['(2048)', 'num_ids'], {'bias': '(False)'}), '(2048, num_ids, bias=False)\n', (7332, 7359), False, 'from torch import nn\n'), ((7411, 7447), 'torch.nn.Linear', 'nn.Linear', (['(2048)', 'num_ids'], {'bias': '(False)'}), '(2048, num_ids, bias=False)\n', (7420, 7447), False, 'from torch import nn\n'), ((5177, 5256), 'abmt.utils.rerank.compute_jaccard_dist', 'compute_jaccard_dist', (['cf_1'], {'lambda_value': '(0)', 'source_features': 'None', 'use_gpu': '(False)'}), '(cf_1, lambda_value=0, source_features=None, use_gpu=False)\n', (5197, 5256), False, 'from abmt.utils.rerank import compute_jaccard_dist\n'), ((5725, 5832), 'abmt.utils.rerank.compute_jaccard_dist', 'compute_jaccard_dist', (['cf_1'], {'lambda_value': 'args.lambda_value', 'source_features': 'cf_1_source', 'use_gpu': '(False)'}), '(cf_1, lambda_value=args.lambda_value, source_features=\n cf_1_source, use_gpu=False)\n', (5745, 5832), False, 'from abmt.utils.rerank import compute_jaccard_dist\n'), ((7538, 7574), 'sklearn.preprocessing.normalize', 'normalize', (['centers[:, :2048]'], {'axis': '(1)'}), '(centers[:, :2048], axis=1)\n', (7547, 7574), False, 'from sklearn.preprocessing import normalize\n'), ((7678, 7714), 'sklearn.preprocessing.normalize', 'normalize', (['centers[:, :2048]'], {'axis': '(1)'}), '(centers[:, :2048], axis=1)\n', (7687, 7714), False, 'from sklearn.preprocessing import normalize\n'), ((7819, 7855), 'sklearn.preprocessing.normalize', 'normalize', (['centers[:, 2048:]'], {'axis': '(1)'}), '(centers[:, 2048:], axis=1)\n', (7828, 7855), False, 'from sklearn.preprocessing import normalize\n'), ((7963, 7999), 'sklearn.preprocessing.normalize', 'normalize', (['centers[:, 2048:]'], {'axis': '(1)'}), '(centers[:, 2048:], axis=1)\n', (7972, 7999), False, 'from sklearn.preprocessing import normalize\n')] |
# Test results on all possible clustering methods using clustering results
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import torch
from sklearn.metrics import silhouette_samples, silhouette_score, adjusted_rand_score
from sklearn.cluster import KMeans, SpectralClustering, AffinityPropagation, AgglomerativeClustering, Birch, DBSCAN, FeatureAgglomeration, OPTICS, MeanShift
from model import AE, VAE, PVAE, PAE
from util_function import *
from graph_function import *
from benchmark_util import *
import argparse
parser = argparse.ArgumentParser(description='Main entrance of scGNN')
parser.add_argument('--dataName', type=str, default='151507_cpm', help='dataName')
args = parser.parse_args()
def readGraph(name):
edgeList = []
count = 0
with open(name) as f:
lines = f.readlines()
for line in lines:
line = line.strip()
words = line.split(',')
if count > 0:
# edgeList.append((words[0],words[1],words[2]))
# For some version, add forced type conversion
edgeList.append((int(words[0]),int(words[1]),float(words[2])))
count +=1
return edgeList
def readpreprocess(dataName):
spatialMatrix = readSpatial('/Users/wangjue/workspace/scGNNsp/'+dataName+'/coords_array.npy')
spatialMatrix = preprocessSpatial(spatialMatrix)
spatialMatrix = torch.from_numpy(spatialMatrix)
spatialMatrix = spatialMatrix.type(torch.FloatTensor)
# df = pd.read_csv('/Users/wangjue/workspace/scGNNsp/outputdirMVKMm1-'+dataName+'_cpm/'+dataName+'_cpm_10_euclidean_STD_dummy_add_0.5_embedding.csv')
df = pd.read_csv('/Users/wangjue/workspace/scGNNsp/outputdirS-'+dataName+'_cpm_0.3/'+dataName+'_cpm_8_euclidean_Grid_dummy_add_0.5_embedding.csv')
filter_col = [col for col in df if col.startswith('embedding') ]
dfEX = df[filter_col]
zOut = dfEX.to_numpy()
# adjTarget, edgeList = generateAdj(zOut, graphType='spatialGrid', para='euclidean:8:Grid', adjTag=True, spatialMatrix = spatialMatrix)
# adjSource, edgeList = generateAdj(zOut, graphType='KNNgraphStatsSingleThread', para='euclidean:10:STD', adjTag=True, spatialMatrix = None)
edgeList = readGraph('/Users/wangjue/workspace/scGNNsp/outputdirS-'+dataName+'_cpm_0.3/'+dataName+'_cpm_8_euclidean_Grid_dummy_add_0.5_graph.csv')
return zOut,edgeList
def readembedding(dataName,k,pe_type,skStr):
df = pd.read_csv('/storage/htc/joshilab/wangjue/scGNNsp/outputdirH-'+dataName+'_0.3/'+dataName+'_'+k+'_euclidean_NA_'+pe_type+'_add_0.5_intersect_'+skStr+'_embedding.csv')
# df = pd.read_csv('/Users/wangjue/workspace/scGNNsp/outputdirS-151671_cpm_0.3/'+dataName+'_cpm_8_euclidean_Grid_dummy_add_0.5_embedding.csv')
filter_col = [col for col in df if col.startswith('embedding') ]
dfEX = df[filter_col]
zOut = dfEX.to_numpy()
edgeList = readGraph('/storage/htc/joshilab/wangjue/scGNNsp/outputdirH-'+dataName+'_0.3/'+dataName+'_'+k+'_euclidean_NA_'+pe_type+'_add_0.5_intersect_'+skStr+'_graph.csv')
# edgeList = readGraph('/Users/wangjue/workspace/scGNNsp/outputdirS-151671_cpm_0.3/'+dataName+'_cpm_8_euclidean_Grid_dummy_add_0.5_graph.csv')
return zOut,edgeList
def clusteringMethod(zOut, edgeList, name, preK=5, resolution = 0.3):
if name=='Louvain':
listResult, size = generateLouvainCluster(edgeList)
k = len(np.unique(listResult))
print('Louvain\t'+str(k)+'\t', end='')
elif name== 'LouvainK':
listResult, size = generateLouvainCluster(edgeList)
k = len(np.unique(listResult))
# print('Louvain cluster: '+str(k))
k = int(k*resolution) if int(k*resolution) >= 3 else 2
# print('LouvainK\t'+str(k)+'\t', end='')
clustering = KMeans(n_clusters=k, random_state=0).fit(zOut)
listResult = clustering.predict(zOut)
#Check criteria
intraArr=clustering.transform(zOut)
intraL1=np.sum(intraArr)
intraL2=np.sum(intraArr**2)
print(str(clustering.score(zOut))+'\t'+str(intraL1)+'\t'+str(intraL2))
elif name == 'LouvainB':
listResult, size = generateLouvainCluster(edgeList)
k = len(np.unique(listResult))
print('LouvainB\t'+str(k)+'\t', end='')
k = int(k*resolution) if int(k*resolution) >= 3 else 2
clustering = Birch(n_clusters=k).fit(zOut)
listResult = clustering.predict(zOut)
elif name == 'KMeans':
clustering = KMeans(n_clusters=preK,random_state=0).fit(zOut)
listResult = clustering.predict(zOut)
print('KMeans\t'+str(len(set(listResult)))+'\t', end='')
elif name == 'SpectralClustering':
clustering = SpectralClustering(n_clusters=preK, assign_labels="discretize", random_state=0).fit(zOut)
listResult = clustering.labels_.tolist()
print('SpectralClustering\t'+str(len(set(listResult)))+'\t', end='')
elif name == 'AffinityPropagation':
clustering = AffinityPropagation().fit(zOut)
listResult = clustering.predict(zOut)
print('AffinityPropagation\t'+str(len(set(listResult)))+'\t', end='')
elif name == 'AgglomerativeClustering':
clustering = AgglomerativeClustering().fit(zOut)
listResult = clustering.labels_.tolist()
print('Agglo\t'+str(len(set(listResult)))+'\t', end='')
elif name == 'AgglomerativeClusteringK':
clustering = AgglomerativeClustering(n_clusters=preK).fit(zOut)
listResult = clustering.labels_.tolist()
print('AggloK\t'+str(len(set(listResult)))+'\t', end='')
elif name == 'Birch':
clustering = Birch(n_clusters=preK).fit(zOut)
listResult = clustering.predict(zOut)
print('Birch\t'+str(len(set(listResult)))+'\t', end='')
elif name == 'BirchN':
clustering = Birch(n_clusters=None).fit(zOut)
listResult = clustering.predict(zOut)
print('BirchN\t'+str(len(set(listResult)))+'\t', end='')
elif name == 'MeanShift':
clustering = MeanShift().fit(zOut)
listResult = clustering.predict(zOut)
print('MeanShift\t'+str(len(set(listResult)))+'\t', end='')
elif name == 'OPTICS':
clustering = OPTICS(min_samples=3, min_cluster_size=3).fit(zOut)
# clustering = OPTICS(min_samples=int(args.k/2), min_cluster_size=args.minMemberinCluster).fit(zOut)
listResult = clustering.predict(zOut)
print('OPTICS\t'+str(len(set(listResult)))+'\t', end='')
elif name=='Leiden':
listResult, size = generateLeidenCluster(edgeList)
print('Leiden\t'+str(len(set(listResult)))+'\t', end='')
return listResult
def plotMethod(zOut, edgeList, method):
listResult = clusteringMethod(zOut, edgeList, method, preK=6, resolution = 0.3)
listResult = pd.Series(listResult)
color_labels = listResult.unique()
# print(color_labels)
# List of colors in the color palettes
rgb_values = sns.color_palette("Set2", len(color_labels))
# Map continents to the colors
color_map = dict(zip(color_labels, rgb_values))
# Finally use the mapped values
plt.scatter(arr[:,0], arr[:,1], c=listResult.map(color_map), s=10)
# plt.show()
plt.savefig(str(dataName)+'_'+method+'.png')
plt.close()
labelname = '/Users/wangjue/workspace/scGNNsp/'+dataName+'/label.csv'
df = pd.read_csv(labelname)
listBench = df['layer'].to_numpy().tolist()
ari, ami, nmi, cs, fms, vms, hs = measureClusteringTrueLabel(listBench, listResult)
print(ari)
# dataName = '151671'
# Original
# arr = np.load('/Users/wangjue/workspace/scGNNsp/'+dataName+'/coords_array.npy')
# zOut,edgeList = readpreprocess(dataName)
# # plotMethod(zOut, edgeList, 'Louvain')
# # plotMethod(zOut, edgeList, 'LouvainK')
# # plotMethod(zOut, edgeList, 'LouvainB')
# plotMethod(zOut, edgeList, 'KMeans')
# # plotMethod(zOut, edgeList, 'SpectralClustering')
# # plotMethod(zOut, edgeList, 'AffinityPropagation')
# # plotMethod(zOut, edgeList, 'AgglomerativeClustering')
# # plotMethod(zOut, edgeList, 'AgglomerativeClusteringK')
# # plotMethod(zOut, edgeList, 'Birch')
# # plotMethod(zOut, edgeList, 'BirchN')
# # plotMethod(zOut, edgeList, 'MeanShift')
# # # plotMethod(zOut, edgeList, 'OPTICS')
# # plotMethod(zOut, edgeList, 'Leiden')
dataNameList = [
'151507_cpm',
'151508_cpm',
'151509_cpm',
'151510_cpm',
'151669_cpm',
'151670_cpm',
'151671_cpm',
'151672_cpm',
'151673_cpm',
'151674_cpm',
'151675_cpm',
'151676_cpm',
'18-64_cpm',
'2-5_cpm',
'2-8_cpm',
'T4857_cpm'
]
pe_typeList =['dummy','geom_lowf']
kList = ['10','50','100','200','500','1000','2000']
skStrList = ['8_Grid','16_GridEx',
# '24_GridEx2','32_GridEx3',
'40_GridEx4',
# '48_GridEx5','56_GridEx6','64_GridEx7','72_GridEx8',
'80_GridEx9',
# '88_GridEx10','96_GridEx11','104_GridEx12','112_GridEx13',
'120_GridEx14','160_GridEx19','200_GridEx24','240_GridEx29']
# For debug
# zOut,edgeList = readembedding(dataName='151671',k='',pe_type='',skStr='')
# clusteringMethod(zOut, edgeList, name='LouvainK', preK=5, resolution = 0.3)
# Single
# for dataName in dataNameList:
# for pe_type in pe_typeList:
# for k in kList:
# for skStr in skStrList:
# #outputdirH-2-5_cpm_0.3/
# zOut,edgeList = readembedding(dataName,k,pe_type,skStr)
# clusteringMethod(zOut, edgeList, name='LouvainK', preK=5, resolution = 0.3)
for pe_type in pe_typeList:
for k in kList:
for skStr in skStrList:
#outputdirH-2-5_cpm_0.3/
zOut,edgeList = readembedding(args.dataName,k,pe_type,skStr)
clusteringMethod(zOut, edgeList, name='LouvainK', preK=5, resolution = 0.3)
| [
"pandas.Series",
"sklearn.cluster.KMeans",
"sklearn.cluster.SpectralClustering",
"sklearn.cluster.AgglomerativeClustering",
"numpy.unique",
"argparse.ArgumentParser",
"pandas.read_csv",
"sklearn.cluster.OPTICS",
"sklearn.cluster.AffinityPropagation",
"torch.from_numpy",
"matplotlib.pyplot.close"... | [((574, 635), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Main entrance of scGNN"""'}), "(description='Main entrance of scGNN')\n", (597, 635), False, 'import argparse\n'), ((1427, 1458), 'torch.from_numpy', 'torch.from_numpy', (['spatialMatrix'], {}), '(spatialMatrix)\n', (1443, 1458), False, 'import torch\n'), ((1681, 1838), 'pandas.read_csv', 'pd.read_csv', (["('/Users/wangjue/workspace/scGNNsp/outputdirS-' + dataName + '_cpm_0.3/' +\n dataName + '_cpm_8_euclidean_Grid_dummy_add_0.5_embedding.csv')"], {}), "('/Users/wangjue/workspace/scGNNsp/outputdirS-' + dataName +\n '_cpm_0.3/' + dataName +\n '_cpm_8_euclidean_Grid_dummy_add_0.5_embedding.csv')\n", (1692, 1838), True, 'import pandas as pd\n'), ((2472, 2666), 'pandas.read_csv', 'pd.read_csv', (["('/storage/htc/joshilab/wangjue/scGNNsp/outputdirH-' + dataName + '_0.3/' +\n dataName + '_' + k + '_euclidean_NA_' + pe_type + '_add_0.5_intersect_' +\n skStr + '_embedding.csv')"], {}), "('/storage/htc/joshilab/wangjue/scGNNsp/outputdirH-' + dataName +\n '_0.3/' + dataName + '_' + k + '_euclidean_NA_' + pe_type +\n '_add_0.5_intersect_' + skStr + '_embedding.csv')\n", (2483, 2666), True, 'import pandas as pd\n'), ((6813, 6834), 'pandas.Series', 'pd.Series', (['listResult'], {}), '(listResult)\n', (6822, 6834), True, 'import pandas as pd\n'), ((7269, 7280), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7278, 7280), True, 'import matplotlib.pyplot as plt\n'), ((7365, 7387), 'pandas.read_csv', 'pd.read_csv', (['labelname'], {}), '(labelname)\n', (7376, 7387), True, 'import pandas as pd\n'), ((3435, 3456), 'numpy.unique', 'np.unique', (['listResult'], {}), '(listResult)\n', (3444, 3456), True, 'import numpy as np\n'), ((3988, 4004), 'numpy.sum', 'np.sum', (['intraArr'], {}), '(intraArr)\n', (3994, 4004), True, 'import numpy as np\n'), ((4021, 4042), 'numpy.sum', 'np.sum', (['(intraArr ** 2)'], {}), '(intraArr ** 2)\n', (4027, 4042), True, 'import numpy as np\n'), ((3609, 3630), 'numpy.unique', 'np.unique', (['listResult'], {}), '(listResult)\n', (3618, 3630), True, 'import numpy as np\n'), ((3810, 3846), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'k', 'random_state': '(0)'}), '(n_clusters=k, random_state=0)\n', (3816, 3846), False, 'from sklearn.cluster import KMeans, SpectralClustering, AffinityPropagation, AgglomerativeClustering, Birch, DBSCAN, FeatureAgglomeration, OPTICS, MeanShift\n'), ((4225, 4246), 'numpy.unique', 'np.unique', (['listResult'], {}), '(listResult)\n', (4234, 4246), True, 'import numpy as np\n'), ((4380, 4399), 'sklearn.cluster.Birch', 'Birch', ([], {'n_clusters': 'k'}), '(n_clusters=k)\n', (4385, 4399), False, 'from sklearn.cluster import KMeans, SpectralClustering, AffinityPropagation, AgglomerativeClustering, Birch, DBSCAN, FeatureAgglomeration, OPTICS, MeanShift\n'), ((4512, 4551), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'preK', 'random_state': '(0)'}), '(n_clusters=preK, random_state=0)\n', (4518, 4551), False, 'from sklearn.cluster import KMeans, SpectralClustering, AffinityPropagation, AgglomerativeClustering, Birch, DBSCAN, FeatureAgglomeration, OPTICS, MeanShift\n'), ((4732, 4811), 'sklearn.cluster.SpectralClustering', 'SpectralClustering', ([], {'n_clusters': 'preK', 'assign_labels': '"""discretize"""', 'random_state': '(0)'}), "(n_clusters=preK, assign_labels='discretize', random_state=0)\n", (4750, 4811), False, 'from sklearn.cluster import KMeans, SpectralClustering, AffinityPropagation, AgglomerativeClustering, Birch, DBSCAN, FeatureAgglomeration, OPTICS, MeanShift\n'), ((5009, 5030), 'sklearn.cluster.AffinityPropagation', 'AffinityPropagation', ([], {}), '()\n', (5028, 5030), False, 'from sklearn.cluster import KMeans, SpectralClustering, AffinityPropagation, AgglomerativeClustering, Birch, DBSCAN, FeatureAgglomeration, OPTICS, MeanShift\n'), ((5230, 5255), 'sklearn.cluster.AgglomerativeClustering', 'AgglomerativeClustering', ([], {}), '()\n', (5253, 5255), False, 'from sklearn.cluster import KMeans, SpectralClustering, AffinityPropagation, AgglomerativeClustering, Birch, DBSCAN, FeatureAgglomeration, OPTICS, MeanShift\n'), ((5445, 5485), 'sklearn.cluster.AgglomerativeClustering', 'AgglomerativeClustering', ([], {'n_clusters': 'preK'}), '(n_clusters=preK)\n', (5468, 5485), False, 'from sklearn.cluster import KMeans, SpectralClustering, AffinityPropagation, AgglomerativeClustering, Birch, DBSCAN, FeatureAgglomeration, OPTICS, MeanShift\n'), ((5657, 5679), 'sklearn.cluster.Birch', 'Birch', ([], {'n_clusters': 'preK'}), '(n_clusters=preK)\n', (5662, 5679), False, 'from sklearn.cluster import KMeans, SpectralClustering, AffinityPropagation, AgglomerativeClustering, Birch, DBSCAN, FeatureAgglomeration, OPTICS, MeanShift\n'), ((5848, 5870), 'sklearn.cluster.Birch', 'Birch', ([], {'n_clusters': 'None'}), '(n_clusters=None)\n', (5853, 5870), False, 'from sklearn.cluster import KMeans, SpectralClustering, AffinityPropagation, AgglomerativeClustering, Birch, DBSCAN, FeatureAgglomeration, OPTICS, MeanShift\n'), ((6043, 6054), 'sklearn.cluster.MeanShift', 'MeanShift', ([], {}), '()\n', (6052, 6054), False, 'from sklearn.cluster import KMeans, SpectralClustering, AffinityPropagation, AgglomerativeClustering, Birch, DBSCAN, FeatureAgglomeration, OPTICS, MeanShift\n'), ((6227, 6268), 'sklearn.cluster.OPTICS', 'OPTICS', ([], {'min_samples': '(3)', 'min_cluster_size': '(3)'}), '(min_samples=3, min_cluster_size=3)\n', (6233, 6268), False, 'from sklearn.cluster import KMeans, SpectralClustering, AffinityPropagation, AgglomerativeClustering, Birch, DBSCAN, FeatureAgglomeration, OPTICS, MeanShift\n')] |
import mediapipe as mp
import pandas as pd
import numpy as np
import cv2
mp_pose = mp.solutions.pose
# returns an angle value as a result of the given points
def calculate_angle(a, b, c):
a = np.array(a) # First
b = np.array(b) # Mid
c = np.array(c) # End
radians = np.arctan2(c[1] - b[1], c[0] - b[0]) -\
np.arctan2(a[1] - b[1], a[0] - b[0])
angle = np.abs(radians * 180.0 / np.pi)
# check cord sys area
if angle > 180.0:
angle = 360 - angle
return angle
# return body part x,y value
def detection_body_part(landmarks, body_part_name):
return [
landmarks[mp_pose.PoseLandmark[body_part_name].value].x,
landmarks[mp_pose.PoseLandmark[body_part_name].value].y,
landmarks[mp_pose.PoseLandmark[body_part_name].value].visibility
]
# return body_part, x, y as dataframe
def detection_body_parts(landmarks):
body_parts = pd.DataFrame(columns=["body_part", "x", "y"])
for i, lndmrk in enumerate(mp_pose.PoseLandmark):
lndmrk = str(lndmrk).split(".")[1]
cord = detection_body_part(landmarks, lndmrk)
body_parts.loc[i] = lndmrk, cord[0], cord[1]
return body_parts
def score_table(exercise, counter, status):
score_table = cv2.imread("./images/score_table.png")
cv2.putText(score_table, "Activity : " + exercise.replace("-", " "),
(10, 65), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (182, 158, 128), 2,
cv2.LINE_AA)
cv2.putText(score_table, "Counter : " + str(counter), (10, 100),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (182, 158, 128), 2, cv2.LINE_AA)
cv2.putText(score_table, "Status : " + str(status), (10, 135),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (182, 158, 128), 2, cv2.LINE_AA)
cv2.imshow("Score Table", score_table)
| [
"numpy.abs",
"cv2.imshow",
"numpy.array",
"numpy.arctan2",
"pandas.DataFrame",
"cv2.imread"
] | [((199, 210), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (207, 210), True, 'import numpy as np\n'), ((228, 239), 'numpy.array', 'np.array', (['b'], {}), '(b)\n', (236, 239), True, 'import numpy as np\n'), ((255, 266), 'numpy.array', 'np.array', (['c'], {}), '(c)\n', (263, 266), True, 'import numpy as np\n'), ((392, 423), 'numpy.abs', 'np.abs', (['(radians * 180.0 / np.pi)'], {}), '(radians * 180.0 / np.pi)\n', (398, 423), True, 'import numpy as np\n'), ((918, 963), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['body_part', 'x', 'y']"}), "(columns=['body_part', 'x', 'y'])\n", (930, 963), True, 'import pandas as pd\n'), ((1256, 1294), 'cv2.imread', 'cv2.imread', (['"""./images/score_table.png"""'], {}), "('./images/score_table.png')\n", (1266, 1294), False, 'import cv2\n'), ((1774, 1812), 'cv2.imshow', 'cv2.imshow', (['"""Score Table"""', 'score_table'], {}), "('Score Table', score_table)\n", (1784, 1812), False, 'import cv2\n'), ((289, 325), 'numpy.arctan2', 'np.arctan2', (['(c[1] - b[1])', '(c[0] - b[0])'], {}), '(c[1] - b[1], c[0] - b[0])\n', (299, 325), True, 'import numpy as np\n'), ((343, 379), 'numpy.arctan2', 'np.arctan2', (['(a[1] - b[1])', '(a[0] - b[0])'], {}), '(a[1] - b[1], a[0] - b[0])\n', (353, 379), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
def get_transformed_spatial_coordinates(filename: str):
df = pd.read_csv(filename, sep="\t")
spatial_data = df.iloc[:, 0]
spatial_xy = []
for spot in spatial_data:
coordinates = spot.split('x')
coordinates = [float(i) for i in coordinates]
spatial_xy.append(coordinates)
xy_coordinates = pd.DataFrame(spatial_xy, columns=['x', 'y'])
# transform image
x_scale = 288.9
y_scale = 292.6
x_shift = -288.9
y_shift = -292.6
xy_coordinates['x'] = xy_coordinates['x'] * x_scale + x_shift
xy_coordinates['y'] = xy_coordinates['y'] * y_scale + y_shift
return xy_coordinates
def label_components(pca, bound):
components = pca.to_numpy(copy=True)
for i in range(len(pca)):
if components[i] < bound:
components[i] = 1
else:
components[i] = 0
pca_components = pd.DataFrame(components).astype(int)
return pca_components
def match_labels(pred_labels, true_labels):
"""
true and pred labels are both 0, 1. Analyzes and matches which label in true corresponds to which label in true.
Then adjusts labels so they align.
:param true_labels: Set of true labels as column data frame
:param pred_labels: Set of pred labels as column data frame
:return: correctly labeled true and predicted labels as lists
"""
true_np = true_labels.to_numpy(copy=True)
pred_np = pred_labels.to_numpy(copy=True)
# count number of 0-0 and 1-1 matches
same_count = 0
for i in range(len(true_np)):
if true_np[i] == pred_np[i]:
same_count += 1
# If over half are 0-0 and 1-1 labels, its probably correct. Otherwise, swap 0 and 1 in pred
if same_count < (len(true_np) / 2):
for i in range(len(pred_np)):
if pred_np[i] == 1:
pred_np[i] = 0
else:
pred_np[i] = 1
return np.transpose(pred_np).flatten(), np.transpose(true_np).flatten()
def percent_dropout(filename: str):
"""
:param filename: file containing spatial transcriptomics data
:return: percent dropout component as a pandas dataframe
"""
df = pd.read_csv(filename, sep="\t")
df = df.drop(df.columns[0], axis=1)
percent_dropout_component = pd.DataFrame((df == 0).astype(int).sum(axis=1) / df.shape[0])
return percent_dropout_component
def normalize_df(df):
"""
filters and preprocesses spatial transcriptomics data into dataframe
:param df: pandas dataframe of spatial transcriptomics data
:return:
"""
df = df.drop(df.columns[0], axis=1)
# filter genes with at least 15 non-zero entries
nonzero_col_sum = (df > 0).sum(axis=0)
nonzero_col_sum.to_numpy()
index = []
for i in range(len(nonzero_col_sum)):
if nonzero_col_sum[i] >= 15:
index.append(i)
df = df.iloc[:, index]
sums = df.sum(axis=1)
df = df.div(sums, axis=0)
# Log normalize data
df = df + 1
df = np.log(df)
scalar = StandardScaler()
df = scalar.fit_transform(df)
df = pd.DataFrame(df)
return df
def principal_PCA(filename: str):
"""
:param filename: file containing spatial transcriptomics data
:return: pandas dataframe of principal component of PCA analysis
"""
df = pd.read_csv(filename, sep="\t")
df = normalize_df(df)
# Run PCA
pca = PCA(n_components=1, svd_solver='full')
pca_components = pca.fit_transform(df)
print("PCA Explained Variance: %s" % pca.explained_variance_ratio_)
# Save components to a DataFrame
pca_components = pd.DataFrame(pca_components)
return pca_components
def get_precision_recall(true, pred):
tp = 0
fp = 0
fn = 0
tn = 0
for i in range(len(true)):
if true[i] == 1 and pred[i] == 1:
tp = tp + 1
if true[i] == 0 and pred[i] == 1:
fp = fp + 1
if true[i] == 0 and pred[i] == 1:
fn = fn + 1
if true[i] == 0 and pred[i] == 0:
tn = tn + 1
if (tp + fp) == 0:
precision = 1
else:
precision = tp / (tp + fp)
if (fp + fn) != 0:
recall = tp / (fp + fn)
else:
recall = 1
return precision, recall
| [
"pandas.read_csv",
"sklearn.decomposition.PCA",
"numpy.log",
"sklearn.preprocessing.StandardScaler",
"pandas.DataFrame",
"numpy.transpose"
] | [((200, 231), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'sep': '"""\t"""'}), "(filename, sep='\\t')\n", (211, 231), True, 'import pandas as pd\n'), ((474, 518), 'pandas.DataFrame', 'pd.DataFrame', (['spatial_xy'], {'columns': "['x', 'y']"}), "(spatial_xy, columns=['x', 'y'])\n", (486, 518), True, 'import pandas as pd\n'), ((2359, 2390), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'sep': '"""\t"""'}), "(filename, sep='\\t')\n", (2370, 2390), True, 'import pandas as pd\n'), ((3205, 3215), 'numpy.log', 'np.log', (['df'], {}), '(df)\n', (3211, 3215), True, 'import numpy as np\n'), ((3232, 3248), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3246, 3248), False, 'from sklearn.preprocessing import StandardScaler\n'), ((3294, 3310), 'pandas.DataFrame', 'pd.DataFrame', (['df'], {}), '(df)\n', (3306, 3310), True, 'import pandas as pd\n'), ((3530, 3561), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'sep': '"""\t"""'}), "(filename, sep='\\t')\n", (3541, 3561), True, 'import pandas as pd\n'), ((3619, 3657), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(1)', 'svd_solver': '"""full"""'}), "(n_components=1, svd_solver='full')\n", (3622, 3657), False, 'from sklearn.decomposition import PCA\n'), ((3837, 3865), 'pandas.DataFrame', 'pd.DataFrame', (['pca_components'], {}), '(pca_components)\n', (3849, 3865), True, 'import pandas as pd\n'), ((1039, 1063), 'pandas.DataFrame', 'pd.DataFrame', (['components'], {}), '(components)\n', (1051, 1063), True, 'import pandas as pd\n'), ((2094, 2115), 'numpy.transpose', 'np.transpose', (['pred_np'], {}), '(pred_np)\n', (2106, 2115), True, 'import numpy as np\n'), ((2127, 2148), 'numpy.transpose', 'np.transpose', (['true_np'], {}), '(true_np)\n', (2139, 2148), True, 'import numpy as np\n')] |
import copy
from typing import Callable, Tuple
import numpy as np
from odyssey.distribution import Distribution
from iliad.integrators.fields import softabs
from iliad.integrators.info import CoupledInfo
from iliad.integrators.terminal import cond
from iliad.integrators.states.coupled_state import CoupledState
from iliad.linalg import solve_psd, sqrtm
def phi_a(qn: np.ndarray, xn: np.ndarray, pn: np.ndarray, yn: np.ndarray, step_size: float, vector_field: Callable) -> Tuple[np.ndarray]:
vel, force = vector_field(qn, yn)
dxn = step_size * vel
dpn = step_size * force
return qn, xn + dxn, pn + dpn, yn
def phi_b(qn: np.ndarray, xn: np.ndarray, pn: np.ndarray, yn: np.ndarray, step_size: float, vector_field: Callable) -> Tuple[np.ndarray]:
vel, force = vector_field(xn, pn)
dqn = step_size * vel
dyn = step_size * force
return qn + dqn, xn, pn, yn + dyn
def phi_c(qn: np.ndarray, xn: np.ndarray, pn: np.ndarray, yn: np.ndarray, step_size: float, omega: float) -> Tuple[np.ndarray]:
cos = np.cos(2*omega*step_size)
sin = np.sin(2*omega*step_size)
add = np.vstack([qn + xn, pn + yn])
qnmxn, pnmyn = qn - xn, pn - yn
Rsub = np.vstack((
np.hstack((cos*qnmxn + sin*pnmyn)),
np.hstack((-sin*qnmxn + cos*pnmyn))))
qnpn = 0.5*(add + Rsub).ravel()
xnyn = 0.5*(add - Rsub).ravel()
(qn, pn), (xn, yn) = np.split(qnpn, 2), np.split(xnyn, 2)
return qn, xn, pn, yn
def coupled_integrate(
vector_field: Callable,
zo: Tuple[np.ndarray],
step_size: float,
omega: float
) -> Tuple[np.ndarray]:
"""Implements the explicit integrator for non-separable Hamiltonian dynamics.
The coupled explicit integrator is composed of three component integration
steps.
Args:
vector_field: A function returning the time derivatives of position and
momentum.
force_vector: Function computing the time derivative of momentum.
zo: Tuple containing the position and momentum variables in the expanded
phase space.
step_size: Integration step_size.
omega: Binding strength between the two approximate solutions.
Returns:
qn: Terminal state of the original position variable.
xn: Terminal state of the expanded position variable.
pn: Terminal state of the original momentum variable.
yn: Terminal state of the expanded momentum variable.
"""
# Compute prerequisite quantities for the explicit integrator.
half_step = step_size / 2.0
# Apply the explicit integrator to the input.
qo, xo, po, yo = zo
qn, xn, pn, yn = phi_a(qo, xo, po, yo, half_step, vector_field)
if omega > 0:
qn, xn, pn, yn = phi_b(qn, xn, pn, yn, half_step, vector_field)
qn, xn, pn, yn = phi_c(qn, xn, pn, yn, step_size, omega)
qn, xn, pn, yn = phi_b(qn, xn, pn, yn, half_step, vector_field)
else:
qn, xn, pn, yn = phi_b(qn, xn, pn, yn, step_size, vector_field)
qn, xn, pn, yn = phi_a(qn, xn, pn, yn, half_step, vector_field)
return qn, xn, pn, yn
def constraint(q: np.ndarray, x: np.ndarray) -> np.ndarray:
"""The holonomic constraint function with which to equip the Lagrange
multiplier augmented explicit integrator. The constraint states that the
position variables in the expanded phase space must be equal.
Args:
q: Original position variable.
x: Expanded position variable.
Returns:
out: The element-wise difference between the original and expanded
position variables.
"""
return q - x
def loss(
vector_field: Callable,
zo: Tuple[np.ndarray],
step_size: float,
omega: float,
mu: np.ndarray
) -> np.ndarray:
"""A loss function representing violation of the constraint function with
respect to the inputs. In practice, one will want to identify the Lagrange
multipliers that cause the constraint to be satisfied.
Args:
vector_field: A function returning the time derivatives of position and
momentum.
zo: Tuple containing the position and momentum variables in the expanded
phase space.
step_size: Integration step_size.
omega: Binding strength between the two approximate solutions.
Returns:
c: The element-wise difference between the original and expanded
position variables.
zn: The output of the explicit integrator.
"""
qo, xo, po, yo = zo
zn = coupled_integrate(
vector_field,
(qo, xo, po + mu, yo - mu),
step_size,
omega
)
c = constraint(zn[0], zn[1])
return c, zn
def step(val, vector_field, zo, step_size, omega):
"""Single step of a Newton iteration to identify constraint-preserving Lagrange
multipliers.
"""
# Broyden's method.
mup, _, J, Jinv, cp, num_iters = val
Dx = -Jinv@cp
mun = mup + Dx
cn, aux = loss(vector_field, zo, step_size, omega, mun)
Df = cn - cp
# Update inverse using Sherman–Morrison formula.
u = (Df - J@Dx) / (Dx@Dx)
v = Dx
J += np.outer(u, v)
div = 1. + v@Jinv@u
if np.abs(div) > 1e-10:
Jinv -= (Jinv@np.outer(u, v)@Jinv) / div
else:
num_mu = len(mun)
J = np.eye(num_mu)
Jinv = np.eye(num_mu)
num_iters += 1
return mun, aux, J, Jinv, cn, num_iters
def single_step(
vector_field: Callable,
state: CoupledState,
info: CoupledInfo,
step_size: float,
omega: float,
thresh: float,
max_iters: int
) -> Tuple:
"""Use the explicit integrator in combination with Lagrange multipliers in
order to satisfy the constraints that the position and momentum variables
in the expanded phase space are equal along trajectories.
Args:
vector_field: A function returning the time derivatives of position and
momentum.
state: An object containing the position and momentum variables of the
state in phase space.
info: An object that keeps track of the number of fixed point iterations
and whether or not integration has been successful.
step_size: Integration step_size.
omega: Binding strength between the two approximate solutions.
thresh: Convergence tolerance for Newton's method to find Lagrange
multipliers.
max_iters: Maximum number of iterations.
Returns:
state: An augmented state object with the updated position and momentum
and values for the log-posterior and metric and their gradients.
info: An information object with the updated number of fixed point
iterations and boolean indicator for successful integration.
"""
qo, po = state.position, state.momentum
zo = (qo, qo, po, po)
mu = np.zeros_like(qo)
# Decide whether or not to initialize the estimate of the Jacobian with the
# identity matrix or with a finite-difference approximation of the
# Jacobian.
num_mu = len(mu)
J = np.eye(num_mu)
Jinv = np.eye(num_mu)
# I think the correct course is to provide the auxiliary data. If the code
# doesn't complete a single iteration, then the auxiliary data will
# remain a vector of zeros, which is clearly incorrect.
cn, aux = loss(vector_field, zo, step_size, omega, mu)
val = (mu, aux, J, Jinv, cn, 1)
while cond(val, thresh, max_iters):
val = step(val, vector_field, zo, step_size, omega)
mu, (qn, xn, pn, yn), J, Jinv, cn, num_iters = val
# Compute whether or not integration was successful.
success = np.max(np.abs(cn)) < thresh
# Averaging the momentum variables is the projection to the cotangent
# bundle of the manifold. The averaging of the position variables is not
# necessary; they are equal under the constraint. However, averaging has a
# nicer aesthetic when only approximate constraint satisfaction is
# required.
qm = 0.5*(qn + xn)
pm = 0.5*(pn + yn)
state.position, state.momentum = qm, pm
info.num_iters += num_iters
info.success &= success
return state, info
def coupled_leapfrog(
state: CoupledState,
step_size: float,
num_steps: int,
distr: Distribution,
vector_field: Callable,
omega: float,
thresh: float,
max_iters: int
) -> Tuple[CoupledState, CoupledInfo]:
"""Implements the coupled explicit integrator where Lagrange multipliers are
used to satisfy reversibility and volume preservation.
Args:
state: An object containing the position and momentum variables of the
state in phase space.
step_size: Integration step_size.
num_steps: Number of integration steps.
log_posterior: The log-density of the posterior from which to sample.
metric_handler: Function to compute the Riemannian metric, its inverse,
its matrix square root, and its log-determinant.
vector_field: A function returning the time derivatives of position and
momentum.
omega: Binding strength between the two approximate solutions.
thresh: Convergence tolerance for Newton's method to find Lagrange
multipliers.
max_iters: Maximum number of iterations.
Returns:
state: An augmented state object with the updated position and momentum
and values for the log-posterior and metric and their gradients.
info: An information object with the updated number of fixed point
iterations and boolean indicator for successful integration.
"""
state = copy.copy(state)
info = CoupledInfo()
for i in range(num_steps):
state, info = single_step(
vector_field,
state,
info,
step_size,
omega,
thresh,
max_iters
)
state.update(distr)
return state, info
| [
"numpy.abs",
"numpy.eye",
"iliad.integrators.info.CoupledInfo",
"numpy.hstack",
"iliad.integrators.terminal.cond",
"numpy.split",
"numpy.outer",
"numpy.vstack",
"numpy.cos",
"numpy.sin",
"copy.copy",
"numpy.zeros_like"
] | [((1035, 1064), 'numpy.cos', 'np.cos', (['(2 * omega * step_size)'], {}), '(2 * omega * step_size)\n', (1041, 1064), True, 'import numpy as np\n'), ((1071, 1100), 'numpy.sin', 'np.sin', (['(2 * omega * step_size)'], {}), '(2 * omega * step_size)\n', (1077, 1100), True, 'import numpy as np\n'), ((1107, 1136), 'numpy.vstack', 'np.vstack', (['[qn + xn, pn + yn]'], {}), '([qn + xn, pn + yn])\n', (1116, 1136), True, 'import numpy as np\n'), ((5144, 5158), 'numpy.outer', 'np.outer', (['u', 'v'], {}), '(u, v)\n', (5152, 5158), True, 'import numpy as np\n'), ((6887, 6904), 'numpy.zeros_like', 'np.zeros_like', (['qo'], {}), '(qo)\n', (6900, 6904), True, 'import numpy as np\n'), ((7101, 7115), 'numpy.eye', 'np.eye', (['num_mu'], {}), '(num_mu)\n', (7107, 7115), True, 'import numpy as np\n'), ((7127, 7141), 'numpy.eye', 'np.eye', (['num_mu'], {}), '(num_mu)\n', (7133, 7141), True, 'import numpy as np\n'), ((7458, 7486), 'iliad.integrators.terminal.cond', 'cond', (['val', 'thresh', 'max_iters'], {}), '(val, thresh, max_iters)\n', (7462, 7486), False, 'from iliad.integrators.terminal import cond\n'), ((9698, 9714), 'copy.copy', 'copy.copy', (['state'], {}), '(state)\n', (9707, 9714), False, 'import copy\n'), ((9726, 9739), 'iliad.integrators.info.CoupledInfo', 'CoupledInfo', ([], {}), '()\n', (9737, 9739), False, 'from iliad.integrators.info import CoupledInfo\n'), ((1383, 1400), 'numpy.split', 'np.split', (['qnpn', '(2)'], {}), '(qnpn, 2)\n', (1391, 1400), True, 'import numpy as np\n'), ((1402, 1419), 'numpy.split', 'np.split', (['xnyn', '(2)'], {}), '(xnyn, 2)\n', (1410, 1419), True, 'import numpy as np\n'), ((5190, 5201), 'numpy.abs', 'np.abs', (['div'], {}), '(div)\n', (5196, 5201), True, 'import numpy as np\n'), ((5308, 5322), 'numpy.eye', 'np.eye', (['num_mu'], {}), '(num_mu)\n', (5314, 5322), True, 'import numpy as np\n'), ((5338, 5352), 'numpy.eye', 'np.eye', (['num_mu'], {}), '(num_mu)\n', (5344, 5352), True, 'import numpy as np\n'), ((1204, 1240), 'numpy.hstack', 'np.hstack', (['(cos * qnmxn + sin * pnmyn)'], {}), '(cos * qnmxn + sin * pnmyn)\n', (1213, 1240), True, 'import numpy as np\n'), ((1248, 1285), 'numpy.hstack', 'np.hstack', (['(-sin * qnmxn + cos * pnmyn)'], {}), '(-sin * qnmxn + cos * pnmyn)\n', (1257, 1285), True, 'import numpy as np\n'), ((7682, 7692), 'numpy.abs', 'np.abs', (['cn'], {}), '(cn)\n', (7688, 7692), True, 'import numpy as np\n'), ((5233, 5247), 'numpy.outer', 'np.outer', (['u', 'v'], {}), '(u, v)\n', (5241, 5247), True, 'import numpy as np\n')] |
"""
Author: Anonymous
Description:
Contains several features for analyzing and comparing the
performance across multiple experiments:
- perfloss : Performance w.r.t. test/train loss ratio and the
used AE architecture
- perfratio : Showing performance w.r.t. test, train loss and the
used AE architecture
- bd : Plots the behaviour coverage graph
- fit : Plots the fitness graph
"""
import os
import sys
import ast
import csv
import logging
import pickle
import glob
import argparse
import time
import numpy as np
import pandas as pd
import multiprocessing as mpi
import matplotlib as mpl
mpl.rcParams['pdf.fonttype'] = 42
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.markers as mmarkers
import matplotlib.ticker as ticker
from itertools import combinations
from functools import partial
from behaviour_representations.analysis import load_metadata, load_dataset
from behaviour_representations.utils.utils import timing
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser()
parser.add_argument('-load', '--load_path',
default=None, required=True,
help="Path to directory to plot.")
parser.add_argument('-save', '--save_path',
default=None, required=False,
help="Path to directory where to save.")
parser.add_argument('-t', '--plot_type', nargs='+',
default=['bd'], # , 'fit', 'perfloss', 'perfratio', 'perfl2'
help="Select plot type(s):\n"
"'perfloss'\n"
"'perfratio'\n"
"'perfl2'\n"
"'bd'\n"
"'fit'\n")
parser.add_argument('-f', '--filter_string',
default='',
help="Take into account experiments that contain this.")
def mscatter(x,y,ax=None, m=None, **kw):
if not ax: ax=plt.gca()
sc = ax.scatter(x, y, **kw)
if (m is not None) and (len(m)==len(x)):
paths = []
for marker in m:
if isinstance(marker, mmarkers.MarkerStyle):
marker_obj = marker
else:
marker_obj = mmarkers.MarkerStyle(marker)
path = marker_obj.get_path().transformed(
marker_obj.get_transform())
paths.append(path)
sc.set_paths(paths)
return sc
def _smooth(data, w_len=10000):
window = np.ones(w_len)/w_len
pad = np.ones(w_len//2)
data_pad = np.concatenate([pad*data[0], data, pad[:-1]*data[-1]])
data_smooth = np.convolve(data_pad, window, mode='valid')
assert len(data_smooth) == len(data), \
"data_smooth: {}; data: {}; smooth {}".format(
len(data_smooth), len(data), len(smooth))
return data_smooth
def load_bddata(filename):
data = pd.read_csv(filename)
if data.shape[1]!=8: return load_bddata_old(data.values.T, filename)
data_dict = dict(zip(data.columns, data.values.T))
data_dict['name'] = filename.split('/')[-1][9:-4]
if '_mix_' not in data_dict['name'] and '_dde' not in data_dict['name']:
data_dict['ratios'] = None
elif len(data_dict['ratios'])>1:
data_dict['ratios'][0] = data_dict['ratios'][1]
return data_dict
def load_bddata_old(data, filename):
# data = pd.read_csv(filename, header=None)
data_dict = {}
# Get experiment name
data_dict['name'] = filename.split('/')[-1][9:-4]
# Get loop number
data_dict['nloop'] = data[0]
# Get iteration number
data_dict['niter'] = data[1]
# Get number of samples per iteration
data_dict['nsmp'] = data[2]
# Get behaviour descriptor lists
with mpi.Pool(processes=7) as pool:
data_nbd = list(pool.map(ast.literal_eval, data[3]))
# data_dict['labs'] = list(pool.map(ast.literal_eval, data[4]))
try:
data_fit = list(pool.map(ast.literal_eval, data[4]))
except:
data_fit = None
try:
# data_ratios = list(pool.map(ast.literal_eval, data[5][1:]))
data_ratios = list(pool.map(ast.literal_eval, data[6][1:]))
except:
data_ratios = None
# [len(bds) for bds in data_nbd])
data_dict['coverage'] = np.array(list(map(len, data_nbd)))
data_dict['fitness'] = np.array(list(map(max, data_fit))) \
if data_fit is not None else None
# Get mixing ratios if available
if data_ratios is None or len(data_ratios)==0 \
or '_mix_' not in data_dict['name']:
data_dict['ratios'] = None
else:
tmp = np.array(data_ratios)
assert np.min(tmp, axis=0)[1] > 0
ratios = tmp[:,0]/tmp[:,1]
data_dict['ratios'] = np.concatenate([[ratios[0]], ratios])
return data_dict
def load_lossdata(filepath):
""" Extract the losses """
filepath = '/'.join(filepath.split('/')[:-1])
filename = os.path.join(filepath,
'saved_models/training_losses_param_ae.csv')
try:
data = pd.read_csv(filename,
header=None, usecols=[1, 2], names=['test', 'train'])
except:
return None, None
if len(data['test'].values)==0: return None, None
final_test = ast.literal_eval(data['test'].values[-1])
final_test = None if final_test[0] is None else sum(final_test)
final_train = sum(ast.literal_eval(data['train'].values[-1]))
return final_test, final_train
def load_metadata_info(filepath):
""" Extract the representation learning approach and latent size """
filepath = '/'.join(filepath.split('/')[:-1])
metadata = load_metadata(filepath)
search_algo = metadata['exploration']['normal']
if 'ps_' in search_algo:
return 'PS', 'PS', search_algo
if metadata['training']['ae_param'] is not None:
arch_arg = metadata['training']['ae_param']['architecture']
representation_type = 'AE-'+'-'.join([str(aa[1]) for aa in arch_arg])
else:
representation_type = 'PCA'
dim_latent = metadata['training']['dim_latent']
return 'LD-{}'.format(dim_latent), representation_type, search_algo
def mpi_get_dist(ij, param_original):
focus_param = param_original[ij[0]]
compare_param = param_original[ij[1]]
return np.linalg.norm(focus_param-compare_param)
def load_get_l2dist(filepath):
""" Extract the mean of the pairwise l2-dist of parameters in archive """
filepath = '/'.join(filepath.split('/')[:-1])
dataset = load_dataset(filepath)
param_original = dataset['param_original']
del dataset
non_inf = param_original[0]<np.inf
comb_idx = list(combinations(range(len(param_original)), 2))
param_flat = param_original[:, non_inf]
# ### indexing - cannot fit in memory
# try:
# mm = np.linalg.norm(param_flat[None,...]-param_flat[:,None,:], axis=2)
# mean_dist = np.triu(mm).sum() / (np.triu(mm)>0).sum()
# return mean_dist
# except Exception as e:
# print("\nNUMPY VERSION FAILED:", e)
### parallelizing with multiprocessing
with mpi.Pool(mpi.cpu_count()-1) as pool:
dist_list = pool.map(partial(mpi_get_dist,
param_original=param_flat), comb_idx)
mean_dist = np.mean(dist_list)
# ### bruteforce
# dist_list = []
# for i, pp in enumerate(param_original):
# focus_param = pp[non_inf]
# for j, qq in enumerate(param_original):
# if i != j:
# compare_param = qq[non_inf]
# dist_list.append(np.linalg.norm(focus_param-compare_param))
# mean_dist = np.mean(dist_list)
return mean_dist
################################################################################
def plot_performance_v_l2dist(refpath, graph_name, metric_name, metric_dim,
filter_string,
savepath=None, show_plots=False, spec_title=None,
img_format='jpg', dpi=300, **kwargs):
""" Get all .csv data files of same experiment type """
if len(filter_string): graph_name = graph_name+'__'+filter_string
fig, ax = plt.subplots(1, 1, figsize=(5, 5))
mlist = ['o', 'v', '^', '*','P', 's', 'X', '<', '>', 'p', 'D', 'd']
colors = np.array(plt.rcParams['axes.prop_cycle'].by_key()['color'])
fname = os.path.join(refpath, 'saved_performance_v_l2dist.pkl')
if os.path.exists(fname):
print("\nLOADING:", fname)
with open(fname, "rb") as f:
experiments_dict = pickle.load(f)
else:
# Organise experiments to consider
experiments_dict = {}
if '.csv' in refpath:
# If plotting only one experiment
show_xloop = True
exp_name = '_'.join(refpath.split('/')[-2].split('__')[:2])
experiments_dict[exp_name] = refpath
else:
# Organise plotting multiple experiments
filter_include = []
filter_exclude = []
for fterm in filter_string.split('+'):
if len(fterm) and fterm[0]=='^':
filter_exclude += glob.glob('{}/ENV_*{}*'.format(refpath,
fterm[1:]))
else:
filter_include += glob.glob('{}/ENV_*{}*'.format(refpath,
fterm))
filter_exp = np.setdiff1d(filter_include, filter_exclude)
# Extract only AE-based experiments
for d in filter_exp:
# Define the filters
exp_name = d.split('/')[-1].split('__')[2]
# Group csv files accordimg to filters
if '__S' not in d.split('/')[-1]:
csv_file = glob.glob(d+'/S*/ref_data_*.csv')
else:
csv_file = glob.glob(d+'/ref_data_*.csv')
if exp_name in experiments_dict.keys():
experiments_dict[exp_name]['csv'] += csv_file
else:
experiments_dict[exp_name] = dict(csv=csv_file)
# Load and plot points for each experiment
experiments_to_plot = sorted(experiments_dict.keys(), reverse=True)
n_exp = len(experiments_to_plot)
print("\n\n=== starting: L2-DIST GRAPH ===\n- {}".format(
'\n- '.join(experiments_to_plot)))
all_points = []
for i, klab in enumerate(experiments_to_plot):
print("\n> Extracting performance ({}/{}): '{}'".format(
i+1, n_exp, klab))
# Get all seeds of this experiment and average values
l2dist, num_bd = [], []
for cv in experiments_dict[klab]['csv']:
print(" > Loading:", cv)
latent_dim, latent_type, search_algo = load_metadata_info(cv)
data_dict = load_bddata(cv)
if len(data_dict['coverage']):
final_num_bd = data_dict['coverage'][-1]
num_bd.append(final_num_bd)
seedl2dist = load_get_l2dist(cv)
l2dist.append(seedl2dist)
else:
print(" > EMPTY!")
del data_dict
if len(num_bd) == 0:
print("> ALL EMPTY!")
continue
else:
experiments_dict[klab]['num_bd'] = np.median(num_bd)
experiments_dict[klab]['l2dist'] = np.mean(l2dist)
experiments_dict[klab]['latent_type'] = latent_type
experiments_dict[klab]['latent_dim'] = latent_dim
experiments_dict[klab]['search_algo'] = search_algo
# save experiments_dict
with open(fname, "wb") as f:
print("\nSAVING:", fname)
pickle.dump(experiments_dict, f)
# Plot graph
total_l2 = [ed['l2dist'] for ed in experiments_dict.values()]
total_nbd = [ed['num_bd'] for ed in experiments_dict.values()]
l2min, l2max = min(total_l2), max(total_l2)
bdmin, bdmax = min(total_nbd), max(total_nbd)
total_ltype = [ed['latent_type'] for ed in experiments_dict.values()]
total_ldim = [ed['latent_dim'] for ed in experiments_dict.values()]
total_search = [ed['search_algo'] for ed in experiments_dict.values()]
uniq_ltype = sorted(np.unique(total_ltype), reverse=True)
uniq_ldim = sorted(np.unique(total_ldim), reverse=True)
uniq_search = sorted(np.unique(total_search))
szdict = dict(zip(uniq_ltype, (5*np.arange(1,len(uniq_ltype)+1))**2))
mkdict = dict(zip(uniq_ldim, mlist[:len(uniq_ldim)]))
expdict = dict(zip(uniq_search, colors[:len(uniq_search)]))
plot_ltype = [szdict[rtl] for rtl in total_ltype]
plot_ldim = [mkdict[rtd] for rtd in total_ldim]
plot_search = [expdict[rts] for rts in total_search]
plot_hatch = ['....' if 'PCA' in rtl else '' for rtl in total_ltype]
# Plot experimant points
for i in range(len(experiments_dict)):
ax.scatter(total_l2[i], total_nbd[i],
s=plot_ltype[i], marker=plot_ldim[i], c=plot_search[i],
hatch=plot_hatch[i],
label=total_search[i],
edgecolor='k', lw=.4, alpha=0.5)
# Plot lines to PS versions
ax.set_xlim(0.1, 10*l2max)
ax.set_ylim(0.8*bdmin, 1.05*bdmax)
ps_search = [ek for ek in experiments_dict.keys() if 'ps_' in ek]
for psexp in ps_search:
sa = experiments_dict[psexp]['search_algo']
xcoord = experiments_dict[psexp]['l2dist']
ycoord = experiments_dict[psexp]['num_bd']
# Add linear line
ax.vlines(xcoord, ax.get_ylim()[0], ycoord, alpha=0.6, linestyles='--',
lw=1, colors=expdict[sa], zorder=0)
# Add ps_mape line
ax.hlines(ycoord, ax.get_xlim()[0], xcoord, alpha=0.6, linestyles='--',
lw=1, colors=expdict[sa], zorder=0)
# Labels
max_bd = np.prod(metric_dim)
ylabel = 'discovered behaviours (max {})'.format(max_bd)
xlabel = 'mean L2-distance'
# Add labels
num_exp = len(experiments_dict)
plt.minorticks_on()
ax.set_title('{} (total: {} experiments)'.format(graph_name, num_exp))
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_xscale("log", nonposx='clip')
ax.grid(b=True, which='minor', alpha=0.2)
ax.grid(b=True, which='major', alpha=0.5)
# Add legends
ldim_sorted = np.vstack([(plt.scatter([], [], c='k', marker=mkdict[ld]),
ld) for ld in sorted(mkdict.keys(), reverse=True)])
ltype_sorted = np.vstack([(plt.scatter([], [], c='w', edgecolor='k',
s=szdict[lt], hatch='....' if 'PCA' in lt else ''), lt) \
for lt in sorted(szdict.keys(), reverse=True)])
search_sorted = np.vstack([(plt.scatter([], [], c=expdict[lt]), lt) \
for lt in sorted(expdict.keys())])
lgd_search = ax.legend(search_sorted[:,0], search_sorted[:,1],
loc='upper left', bbox_to_anchor=(1., 1.01,), ncol=1)
nsalg = 1 - len(search_sorted)*0.065
lgd_ldim = ax.legend(ldim_sorted[:,0], ldim_sorted[:,1],
loc='upper left', bbox_to_anchor=(1., nsalg,), ncol=1) #len(uniq_ldim))
lgd_ltype = ax.legend(ltype_sorted[:,0], ltype_sorted[:,1],
loc='upper left', bbox_to_anchor=(1.27, nsalg,), ncol=1) #len(uniq_ltype))
ax.add_artist(lgd_ldim)
ax.add_artist(lgd_ltype)
ax.add_artist(lgd_search)
# Save/show figure
savepath = refpath if savepath is None else savepath
if not os.path.isdir(savepath):
os.makedirs(savepath)
if type(spec_title)==int:
plt.savefig('{}/peformance_v_l2dist_analysis_loop_{:05d}.{}'.format(
savepath, spec_title, img_format),
format=img_format, bbox_extra_artists=(lgd_ldim, lgd_ltype),
bbox_inches='tight', dpi=dpi)
else:
plt.savefig('{}/{}__peformance_v_l2dist_analysis.{}'.format(
savepath, graph_name, img_format),
format=img_format, bbox_extra_artists=(lgd_ldim, lgd_ltype),
# bbox_inches=mpl.transforms.Bbox([[0,0],[10,10.1]]),
bbox_inches='tight',
# pad_inches=0.3,
dpi=dpi)
if show_plots:
plt.show()
else:
plt.cla()
################################################################################
def plot_performance_v_ratio(refpath, graph_name, metric_name, metric_dim,
filter_string,
savepath=None, show_plots=False, spec_title=None,
img_format='jpg', dpi=300, **kwargs):
""" Get all .csv data files of same experiment type """
if len(filter_string): graph_name = graph_name+'__'+filter_string
# nrow, ncol = 1, 4
nrow, ncol = 2, 2
fig, ax = plt.subplots(nrow, ncol, figsize=(5*ncol,5*nrow))
ax = ax.reshape((nrow, ncol))
cm = plt.cm.get_cmap('jet') # RdYlBu summer viridis_r
# mdict = {2:'o', 5:'v', 10:'^', 20:'*', 50:'P', 100:'s'}
mlist = ['o', 'v', '^', '*','P', 's', 'x', '<', '>', 'p', 'D', 'd']
search_list = ['ls_mape_jacobian', 'ls_mape_standard',
'ls_mape_mix_region', 'ls_mape_mix_ucb']
# Organise experiments to consider
experiments_dict = {}
if '.csv' in refpath:
# If plotting only one experiment
show_xloop = True
exp_name = '_'.join(refpath.split('/')[-2].split('__')[:2])
experiments_dict[exp_name] = refpath
else:
# Organise plotting multiple experiments
filter_include = []
filter_exclude = []
for fterm in filter_string.split('+'):
if len(fterm) and fterm[0]=='^':
filter_exclude += glob.glob('{}/ENV_*AE*{}*'.format(refpath,
fterm[1:]))
else:
filter_include += glob.glob('{}/ENV_*AE*{}*'.format(refpath,
fterm))
filter_exp = np.setdiff1d(filter_include, filter_exclude)
# Extract only AE-based experiments
for d in filter_exp:
# Define the filters
exp_name = d.split('/')[-1].split('__')[2]
# Group csv files accordimg to filters
if '__S' not in d.split('/')[-1]:
csv_file = glob.glob(d+'/S*/ref_data_*.csv')
else:
csv_file = glob.glob(d+'/ref_data_*.csv')
if exp_name in experiments_dict.keys():
experiments_dict[exp_name]['csv'] += csv_file
else:
experiments_dict[exp_name] = dict(csv=csv_file)
# Load and plot points for each experiment
experiments_to_plot = sorted(experiments_dict.keys(), reverse=True)
n_exp = len(experiments_to_plot)
print("\n\n=== starting: ANALYSIS GRAPH ===\n- {}".format(
'\n- '.join(experiments_to_plot)))
all_points = []
for i, klab in enumerate(experiments_to_plot):
print("\n> Extracting performance ({}/{}): '{}'".format(
i+1, n_exp, klab))
# Get all seeds of this experiment and average values
loss_test, loss_train, num_bd = [], [], []
for cv in experiments_dict[klab]['csv']:
print(" > Loading:", cv)
latent_dim, latent_type, _ = load_metadata_info(cv)
data_dict = load_bddata(cv)
data_coverage = data_dict['coverage']
final_test_loss, final_train_loss = load_lossdata(cv)
if len(data_coverage) and final_test_loss is not None:
loss_test.append(final_test_loss)
loss_train.append(final_train_loss)
final_num_bd = data_coverage[-1]
num_bd.append(final_num_bd)
else:
print(" > EMPTY!")
if len(num_bd) == 0 or len(loss_test) == 0:
print("> ALL EMPTY!")
continue
else:
experiments_dict[klab]['loss_test'] = np.mean(loss_test)
experiments_dict[klab]['loss_train'] = np.mean(loss_train)
experiments_dict[klab]['num_bd'] = np.median(num_bd)
experiments_dict[klab]['latent_type'] = latent_type
experiments_dict[klab]['latent_dim'] = latent_dim
# Extract ps_mape as a reference
ps_vals = []
for ps in glob.glob('{}/ENV_*ps_mape*/S*/ref_data_*.csv'.format(refpath)):
data_dict = load_bddata(ps)
ps_vals.append(data_dict['coverage'][-1])
psmedian = np.median(ps_vals)
# Plot separate graphs
total_test = [ed['loss_test'] for ed in experiments_dict.values() \
if 'loss_test' in ed.keys()]
total_train = [ed['loss_train'] for ed in experiments_dict.values() \
if 'loss_test' in ed.keys()]
total_nbd = [ed['num_bd'] for ed in experiments_dict.values() \
if 'loss_test' in ed.keys()]
tsmin, tsmax = min(total_test), max(total_test)
trmin, trmax = min(total_train), max(total_train)
bdmin, bdmax = min(total_nbd), max(total_nbd + [psmedian])
total_ltype = [ed['latent_type'] for ed in experiments_dict.values() \
if 'loss_test' in ed.keys()]
total_ldim = [ed['latent_dim'] for ed in experiments_dict.values() \
if 'loss_test' in ed.keys()]
uniq_ltype = sorted(np.unique(total_ltype))
uniq_ldim = sorted(np.unique(total_ldim))
szdict = dict(zip(uniq_ltype, (5*np.arange(1,len(uniq_ltype)+1))**2))
mkdict = dict(zip(uniq_ldim, mlist[:len(uniq_ldim)]))
for i, ss in enumerate(search_list):
if sum([ss in ek for ek in experiments_dict.keys()])==0:
continue
# Exctract values from dictionary
plot_test = [vv['loss_test'] for kk, vv in experiments_dict.items() \
if ss in kk and 'loss_test' in vv.keys()] # if 'loss_test' in ed.keys()
plot_train = [vv['loss_train'] for kk, vv in experiments_dict.items() \
if ss in kk and 'loss_test' in vv.keys()]
plot_nbd = [vv['num_bd'] for kk, vv in experiments_dict.items() \
if ss in kk and 'loss_test' in vv.keys()]
raw_ltype = [vv['latent_type'] for kk, vv in experiments_dict.items() \
if ss in kk and 'loss_test' in vv.keys()]
raw_ldim = [vv['latent_dim'] for kk, vv in experiments_dict.items() \
if ss in kk and 'loss_test' in vv.keys()]
plot_ltype = [szdict[rtl] for rtl in raw_ltype]
plot_ldim = [mkdict[rtd] for rtd in raw_ldim]
# # Plot experimant points
ratios = np.array(plot_test)/np.array(plot_train)
scatter = mscatter(ratios, plot_nbd,
ax=ax[i//ncol, i%ncol],
cmap=cm, edgecolor='k', lw=.4, alpha=0.5,
vmin=tsmin, vmax=tsmax,
c=plot_test, s=plot_ltype, m=plot_ldim,
norm=mpl.colors.LogNorm())
ax[i//ncol, i%ncol].set_xlim(min(0.9, 0.5*min(ratios)),
max(1.1, 1.5*max(ratios)))
# Fix figure
if nrow == 1:
fig.tight_layout(rect=[0.01, 0.1, 0.955, 0.92])
cbar_ax = fig.add_axes([0.95, 0.21, 0.01, 0.65])
# plt.subplots_adjust(wspace=-0.1)
ty = 0.98
else:
fig.tight_layout(rect=[0.012, 0.075, 0.915, 0.97])
cbar_ax = fig.add_axes([0.91, 0.115, 0.015, 0.82])
plt.subplots_adjust(hspace=0.15)
ty = 0.998
# Labels
max_bd = np.prod(metric_dim)
clabel = 'test loss final'
ylabel = 'discovered behaviours (max {})'.format(max_bd)
xlabel = 'test / train loss ratio'
# Add colorbar
cbar = fig.colorbar(scatter, cax=cbar_ax, orientation='vertical') #, format='%.0e'
cbar.ax.get_yaxis().labelpad = 15
cbar.ax.set_ylabel(clabel, rotation=270)
# cbar.ax.set_yscale("log", nonposy='clip')
# Add labels
num_exp = len(experiments_dict)
fig.suptitle('{} (total: {} experiments)'.format(graph_name, num_exp),
fontsize=16, y=ty) # if nrow>1 else 1.012)
plt.minorticks_on()
for i, ss in enumerate(search_list):
ax[i//ncol, i%ncol].set_title(ss)
if nrow==1:
ax[i//ncol, i%ncol].set_xlabel(xlabel)
if i==0: ax[i//ncol, i%ncol].set_ylabel(ylabel)
elif nrow>1:
if not i%2: ax[i//ncol, i%ncol].set_ylabel(ylabel)
if i>=ncol: ax[i//ncol, i%ncol].set_xlabel(xlabel)
# ax[i//ncol, i%ncol].set_xlim(0, 1.1*rmax)
ax[i//ncol, i%ncol].set_ylim(0.95*bdmin, 1.05*bdmax)
ax[i//ncol, i%ncol].set_xscale("log") #, nonposx='clip')
# ax[i//ncol, i%ncol].set_yscale("log", nonposy='clip')
ax[i//ncol, i%ncol].grid(b=True, which='minor', alpha=0.2)
ax[i//ncol, i%ncol].grid(b=True, which='major', alpha=0.5)
# Add linear line
ax[i//ncol, i%ncol].vlines(1, *ax[i//ncol, i%ncol].get_ylim(),
linestyles='--', lw=1, colors='gray', zorder=0)
# Add ps_mape line
ax[i//ncol, i%ncol].hlines(psmedian, *ax[i//ncol, i%ncol].get_xlim(),
linestyles='--', lw=1, colors='green', zorder=0)
# ax[i//ncol, i%ncol].set_aspect('equal', adjustable='box')
# ax.ticklabel_format(style='sci', axis='both', scilimits=(0,0))
# Add legends
ldim_sorted = np.vstack([(plt.scatter([], [], c='k', marker=mkdict[ld]),
ld) for ld in sorted(mkdict.keys())])
ltype_sorted = np.vstack([(plt.scatter([], [], c='w', edgecolor='k',
s=szdict[lt]), lt) for lt in sorted(szdict.keys())])
lgd_ldim = ax[-1,0].legend(ldim_sorted[:,0], ldim_sorted[:,1],
loc='upper left', bbox_to_anchor=(0., -.12,), ncol=len(uniq_ldim))
lgd_ltype = ax[-1,0].legend(ltype_sorted[:,0], ltype_sorted[:,1],
loc='upper left', bbox_to_anchor=(0., -.2,), ncol=len(uniq_ltype))
ax[-1,0].add_artist(lgd_ldim)
ax[-1,0].add_artist(lgd_ltype)
# Save/show figure
savepath = refpath if savepath is None else savepath
if not os.path.isdir(savepath):
os.makedirs(savepath)
if type(spec_title)==int:
plt.savefig('{}/peformance_v_ratio_analysis_loop_{:05d}.{}'.format(
savepath, spec_title, img_format),
format=img_format, bbox_extra_artists=(lgd_ldim, lgd_ltype),
bbox_inches='tight', dpi=dpi)
else:
plt.savefig('{}/{}__peformance_v_ratio_analysis.{}'.format(
savepath, graph_name, img_format),
format=img_format, bbox_extra_artists=(lgd_ldim, lgd_ltype),
# bbox_inches=mpl.transforms.Bbox([[0,0],[10,10.1]]),
# bbox_inches='tight',
# pad_inches=0.3,
dpi=dpi)
if show_plots:
plt.show()
else:
plt.cla()
################################################################################
def plot_performance_v_loss(refpath, graph_name, metric_name, metric_dim,
filter_string,
savepath=None, show_plots=False, spec_title=None,
img_format='jpg', dpi=300, **kwargs):
""" Get all .csv data files of same experiment type """
if len(filter_string): graph_name = graph_name+'__'+filter_string
# nrow, ncol = 1, 4
nrow, ncol = 2, 2
fig, ax = plt.subplots(nrow, ncol, figsize=(5*ncol,5*nrow))
ax = ax.reshape((nrow, ncol))
cm = plt.cm.get_cmap('jet') # RdYlBu summer viridis_r
# mdict = {2:'o', 5:'v', 10:'^', 20:'*', 50:'P', 100:'s'}
mlist = ['o', 'v', '^', '*','P', 's', 'x', '<', '>', 'p', 'D', 'd']
search_list = ['ls_mape_jacobian', 'ls_mape_standard',
'ls_mape_mix_region', 'ls_mape_mix_ucb']
# Organise experiments to consider
experiments_dict = {}
if '.csv' in refpath:
# If plotting only one experiment
show_xloop = True
exp_name = '_'.join(refpath.split('/')[-2].split('__')[:2])
experiments_dict[exp_name] = refpath
else:
# Organise plotting multiple experiments
filter_include = []
filter_exclude = []
for fterm in filter_string.split('+'):
if len(fterm) and fterm[0]=='^':
filter_exclude += glob.glob('{}/ENV_*AE*{}*'.format(refpath,
fterm[1:]))
else:
filter_include += glob.glob('{}/ENV_*AE*{}*'.format(refpath,
fterm))
filter_exp = np.setdiff1d(filter_include, filter_exclude)
# Extract only AE-based experiments
for d in filter_exp:
# Define the filters
exp_name = d.split('/')[-1].split('__')[2]
# Group csv files accordimg to filters
if '__S' not in d.split('/')[-1]:
csv_file = glob.glob(d+'/S*/ref_data_*.csv')
else:
csv_file = glob.glob(d+'/ref_data_*.csv')
if exp_name in experiments_dict.keys():
experiments_dict[exp_name]['csv'] += csv_file
else:
experiments_dict[exp_name] = dict(csv=csv_file)
# Load and plot points for each experiment
experiments_to_plot = sorted(experiments_dict.keys(), reverse=True)
n_exp = len(experiments_to_plot)
print("\n\n=== starting: ANALYSIS GRAPH ===\n- {}".format(
'\n- '.join(experiments_to_plot)))
all_points = []
for i, klab in enumerate(experiments_to_plot):
print("\n> Extracting performance ({}/{}): '{}'".format(
i+1, n_exp, klab))
# Get all seeds of this experiment and average values
loss_test, loss_train, num_bd = [], [], []
for cv in experiments_dict[klab]['csv']:
print(" > Loading:", cv)
latent_dim, latent_type, _ = load_metadata_info(cv)
data_dict = load_bddata(cv)
data_coverage = data_dict['coverage']
final_test_loss, final_train_loss = load_lossdata(cv)
if len(data_coverage) and final_test_loss is not None:
loss_test.append(final_test_loss)
loss_train.append(final_train_loss)
final_num_bd = data_coverage[-1]
num_bd.append(final_num_bd)
else:
print(" > EMPTY!")
if len(num_bd) == 0:
print("> ALL EMPTY!")
continue
else:
experiments_dict[klab]['loss_test'] = np.mean(loss_test)
experiments_dict[klab]['loss_train'] = np.mean(loss_train)
experiments_dict[klab]['num_bd'] = np.median(num_bd) # np.mean(num_bd)
experiments_dict[klab]['latent_type'] = latent_type
experiments_dict[klab]['latent_dim'] = latent_dim
# Extract ps_mape as a reference
ps_vals = []
for ps in glob.glob('{}/ENV_*ps_mape*/S*/ref_data_*.csv'.format(refpath)):
data_dict = load_bddata(ps)
ps_vals.append(data_dict['coverage'][-1])
psmedian = np.median(ps_vals)
# Plot separate graphs
total_test = [ed['loss_test'] for ed in experiments_dict.values() \
if 'loss_test' in ed.keys()]
total_train = [ed['loss_train'] for ed in experiments_dict.values() \
if 'loss_test' in ed.keys()]
total_nbd = [ed['num_bd'] for ed in experiments_dict.values() \
if 'loss_test' in ed.keys()]
tsmin, tsmax = min(total_test), max(total_test)
trmin, trmax = min(total_train), max(total_train)
cmin, cmax = min(total_nbd), max(total_nbd + [psmedian])
total_ltype = [ed['latent_type'] for ed in experiments_dict.values() \
if 'loss_test' in ed.keys()]
total_ldim = [ed['latent_dim'] for ed in experiments_dict.values() \
if 'loss_test' in ed.keys()]
uniq_ltype = sorted(np.unique(total_ltype))
uniq_ldim = sorted(np.unique(total_ldim))
szdict = dict(zip(uniq_ltype, (5*np.arange(1,len(uniq_ltype)+1))**2))
mkdict = dict(zip(uniq_ldim, mlist[:len(uniq_ldim)]))
for i, ss in enumerate(search_list):
if sum([ss in ek for ek in experiments_dict.keys()])==0:
continue
# Exctract values from dictionary
plot_test = [vv['loss_test'] for kk, vv in experiments_dict.items() \
if ss in kk and 'loss_test' in vv.keys()] # if 'loss_test' in ed.keys()
plot_train = [vv['loss_train'] for kk, vv in experiments_dict.items() \
if ss in kk and 'loss_test' in vv.keys()]
plot_nbd = [vv['num_bd'] for kk, vv in experiments_dict.items() \
if ss in kk and 'loss_test' in vv.keys()]
raw_ltype = [vv['latent_type'] for kk, vv in experiments_dict.items() \
if ss in kk and 'loss_test' in vv.keys()]
raw_ldim = [vv['latent_dim'] for kk, vv in experiments_dict.items() \
if ss in kk and 'loss_test' in vv.keys()]
plot_ltype = [szdict[rtl] for rtl in raw_ltype]
plot_ldim = [mkdict[rtd] for rtd in raw_ldim]
# Plot experimant points
scatter = mscatter(plot_test, plot_train, ax=ax[i//ncol, i%ncol],
cmap=cm, edgecolor='k', lw=.4, alpha=0.5,
vmin=cmin, vmax=1.01*cmax,
c=plot_nbd, s=plot_ltype, m=plot_ldim)
# Fix figure
if nrow == 1:
fig.tight_layout(rect=[0.01, 0.1, 0.955, 0.92])
cbar_ax = fig.add_axes([0.95, 0.21, 0.01, 0.65])
# plt.subplots_adjust(wspace=-0.1)
ty = 0.98
else:
fig.tight_layout(rect=[0.012, 0.075, 0.915, 0.97])
cbar_ax = fig.add_axes([0.91, 0.115, 0.015, 0.82])
plt.subplots_adjust(hspace=0.15)
ty = 0.995
# Add colorbar
cbar = fig.colorbar(scatter, cax=cbar_ax, orientation='vertical')
cbar.ax.get_yaxis().labelpad = 15
max_bd = np.prod(metric_dim)
cbar.ax.set_ylabel('discovered behaviours (max {})'.format(max_bd),
rotation=270)
cticks = list(cbar.get_ticks())
ctlbs = [t.get_text() for t in cbar.ax.get_yticklabels()]
cbar.set_ticks(cticks + [psmedian])
cbar.set_ticklabels(ctlbs + ['ps_mape'])
# Add labels
num_exp = len(experiments_dict)
fig.suptitle('{} (total: {} experiments)'.format(graph_name, num_exp),
fontsize=16, y=ty) # if nrow>1 else 1.012)
# ax[i].set_title('{} ({})'.format(graph_name, len(plot_test)), pad=50)
plt.minorticks_on()
lq, uq = min(tsmin,trmin), max(tsmax,trmax)
for i, ss in enumerate(search_list):
ax[i//ncol, i%ncol].set_title(ss)
if nrow==1:
ax[i//ncol, i%ncol].set_xlabel('test loss final')
if i==0: ax[i//ncol, i%ncol].set_ylabel('training loss final')
elif nrow>1:
if not i%2: ax[i//ncol, i%ncol].set_ylabel('training loss final')
if i>=ncol: ax[i//ncol, i%ncol].set_xlabel('test loss final')
ax[i//ncol, i%ncol].set_xscale("log", nonposx='clip')
ax[i//ncol, i%ncol].set_yscale("log", nonposy='clip')
ax[i//ncol, i%ncol].set_xlim(0.01*tsmin, 10*tsmax)
ax[i//ncol, i%ncol].set_ylim(0.01*trmin, 10*trmax)
ax[i//ncol, i%ncol].grid(b=True, which='minor', alpha=0.2)
ax[i//ncol, i%ncol].grid(b=True, which='major', alpha=0.5)
# Add linear line
ax[i//ncol, i%ncol].plot(ax[i//ncol, i%ncol].get_xlim(),
ax[i//ncol, i%ncol].get_xlim(),
ls='--', lw=0.5, c='gray', zorder=0)
# ax[i//ncol, i%ncol].set_aspect('equal', adjustable='box')
# ax.ticklabel_format(style='sci', axis='both', scilimits=(0,0))
# Add legends
ldim_sorted = np.vstack([(plt.scatter([], [], c='k', marker=mkdict[ld]),
ld) for ld in sorted(mkdict.keys())])
ltype_sorted = np.vstack([(plt.scatter([], [], c='w', edgecolor='k',
s=szdict[lt]), lt) for lt in sorted(szdict.keys())])
lgd_ldim = ax[-1,0].legend(ldim_sorted[:,0], ldim_sorted[:,1],
loc='upper left', bbox_to_anchor=(0., -.12,), ncol=len(uniq_ldim))
lgd_ltype = ax[-1,0].legend(ltype_sorted[:,0], ltype_sorted[:,1],
loc='upper left', bbox_to_anchor=(0., -.2,), ncol=len(uniq_ltype))
ax[-1,0].add_artist(lgd_ldim)
ax[-1,0].add_artist(lgd_ltype)
# Save/show figure
savepath = refpath if savepath is None else savepath
if not os.path.isdir(savepath):
os.makedirs(savepath)
if type(spec_title)==int:
plt.savefig('{}/peformance_v_loss_analysis_loop_{:05d}.{}'.format(
savepath, spec_title, img_format),
format=img_format, bbox_extra_artists=(lgd_ldim, lgd_ltype),
bbox_inches='tight', dpi=dpi)
else:
plt.savefig('{}/{}__peformance_v_loss_analysis.{}'.format(
savepath, graph_name, img_format),
format=img_format, bbox_extra_artists=(lgd_ldim, lgd_ltype),
# bbox_inches=mpl.transforms.Bbox([[0,0],[10,10.1]]),
# bbox_inches='tight',
# pad_inches=0.3,
dpi=dpi)
if show_plots:
plt.show()
else:
plt.cla()
################################################################################
def _get_klab(k):
if 'ls_mape_mix_region-AE' in k:
klab = 'PMS'
elif 'ls_mape_mix_region-PCA' in k:
klab = 'PMS-PCA'
elif 'ls_mape_standard' in k:
klab = 'PMS-no-jacobian'
elif 'ls_mape_dde' in k:
klab = 'DDE'
elif 'ps_mape_directional' in k:
klab = 'MAPE-IsoLineDD'
elif 'ps_nn_uniform' in k:
klab = 'ps_uniform'
elif 'ps_nn_glorot' in k:
klab = 'ps_glorot'
elif 'ps_mape' == k:
klab = 'MAPE-Iso'
else:
klab = k
return klab
def plot_bd_graph(refpath, graph_name, metric_name, metric_dim, graph_type,
filter_string, show_xloop=False,
savepath=None, show_plots=False, spec_title=None,
img_format='jpg', dpi=300, **kwargs):
""" Get all .csv data files of same experiment type """
if len(filter_string): graph_name = graph_name+'__'+filter_string
# f, ax = plt.subplots(figsize=(15,10))
f, ax = plt.subplots(2,1, figsize=(15,15),
gridspec_kw={'height_ratios': [3, 1]})
color_list = np.array(plt.rcParams['axes.prop_cycle'].by_key()['color'])
linestyle_list = ["-","--","-.",":"]
experiments_dict = {}
plot_ratio_list = []
markerdict = {'PMS':'o',
'PMS-PCA': 's',
'PMS-no-jacobian': 'X',
'MAPE-DDE': '^'}
# Organise experiments to plot
if '.csv' in refpath:
# If plotting only one experiment
show_xloop = True
exp_name = '_'.join(refpath.split('/')[-2].split('__')[:2])
experiments_dict[exp_name] = refpath
else:
# Organise plotting multiple experiments
filter_include = []
filter_exclude = []
for fterm in filter_string.split('+'):
if len(fterm) and fterm[0]=='^':
filter_exclude += glob.glob('{}/ENV_*{}*'.format(refpath,
fterm[1:]))
else:
filter_include += glob.glob('{}/ENV_*{}*'.format(refpath,
fterm))
filter_exp = np.setdiff1d(filter_include, filter_exclude)
for d in filter_exp:
# Define the filters
exp_name = d.split('/')[-1].split('__')[2]
# Group csv files accordimg to filters
if '__S' not in d.split('/')[-1]:
csv_file = glob.glob(d+'/S*/ref_data_*.csv')
else:
csv_file = glob.glob(d+'/ref_data_*.csv')
if exp_name in experiments_dict.keys():
experiments_dict[exp_name] += csv_file
else:
experiments_dict[exp_name] = csv_file
# Load and plot bd coverage line of each experiment
experiments_to_plot = sorted(experiments_dict.keys(), reverse=True)
n_exp = len(experiments_to_plot)
print("\n\n=== starting: {} GRAPH ===\n- {}".format(
graph_type.upper(), '\n- '.join(experiments_to_plot)))
# remap experiment order
# new_idx = np.array([6,5,4,7,2,3,0,1])
# experiments_to_plot = np.array(experiments_to_plot)[new_idx]
color_dict = {}
max_nsmp = 0
for i, k in enumerate(experiments_to_plot):
# klab = k[8:] if 'uniform' in k else k
print("\n> Plotting {} ({}/{}): '{}'".format(graph_type,i+1,n_exp,k))
klab = k # _get_klab(k)
color = color_list[i % len(color_list)]
# linestyle = linestyle_list[i // len(color_list)]
linestyle = ':' if 'PCA-' in klab else '-'
color_dict[k] = color
# Extract experiment data
plot_ratio = False
num_smp, num_bd, mix_ratio, m_xloop = [], [], [], []
for cv in experiments_dict[k]:
print(" > Loading:", cv)
data_dict = load_bddata(cv)
data_nsmp = data_dict['nsmp'].astype(int)
if len(data_nsmp):
xl = np.where(data_dict['niter']==0)[0]
# data_nsmp = data_dict['nsmp'].astype(int)
m_xloop.append(xl * data_nsmp[xl])
num_smp.append(np.arange(sum(data_nsmp)))
num_bd.append(np.repeat(data_dict[graph_type], data_nsmp))
if data_dict['ratios'] is not None:
plot_ratio = True
mix_ratio.append(np.repeat(data_dict['ratios'], data_nsmp))
else:
print(" > EMPTY!")
del data_dict
if sum([len(ns) for ns in num_smp]) == 0:
print("> ALL EMPTY!")
continue
plot_ratio_list.append(plot_ratio)
# Get median and quartiles if multiple experiments (seeds)
if len(num_smp)>1:
print(" >>>> merging", len(num_smp))
out_bd_rng = [[], []]
out_ratio_rng = [[], []]
lens = sorted(np.unique([len(s) for s in num_bd]))
if len(lens) > 1:
# cut lists in chunks based on data lengths
cut_idx = list(zip([0]+lens[:-1], lens))
# Organize chunks for behaviour discovery
tmp_bd = []
for ndb in num_bd:
tmp_bd.append([ndb[s:e] for s,e in cut_idx])
# collect the appropriate chunks if non empty
tmp_bd_chunks = []
for i in range(len(lens)):
tmp_bd_chunks.append([t[i] for t in tmp_bd if len(t[i])])
# stack chunks of same size and get their statistics
out_bd = []
for tc in tmp_bd_chunks:
q1, qm, q3 = np.percentile(np.vstack(tc), [25, 50, 75],
# [0, 50, 100],
interpolation='nearest', axis=0)
out_bd.extend(qm)
out_bd_rng[0].extend(q1)
out_bd_rng[1].extend(q3)
out_bd = np.array(out_bd)
# Organize chunks for mixing ratios
if plot_ratio:
tmp_ratio = []
for mr in mix_ratio:
tmp_ratio.append([mr[s:e] for s,e in cut_idx])
# collect the appropriate chunks if non empty
tmp_mr_chunks = []
for i in range(len(lens)):
tmp_mr_chunks.append([t[i] for t in tmp_ratio \
if len(t[i])])
# stack chunks of same size and get their statistics
out_ratio = []
for tmr in tmp_mr_chunks:
# q1, qm, q3 = np.percentile(np.vstack(tmr), [25, 50, 75],
# # [0, 50, 100],
# interpolation='nearest', axis=0)
qm = np.mean(np.vstack(tmr), axis=0)
std = np.std(np.vstack(tmr), axis=0)
q1, q3 = qm-std, qm+std
out_ratio.extend(qm)
out_ratio_rng[0].extend(q1)
out_ratio_rng[1].extend(q3)
out_ratio = np.array(out_ratio)
else:
out_bd_rng[0], out_bd, out_bd_rng[1] = \
np.percentile(np.vstack(num_bd), [25, 50, 75], # [0, 50, 100],
interpolation='nearest', axis=0)
if plot_ratio:
# out_ratio_rng[0], out_ratio, out_ratio_rng[1] = \
# np.percentile(np.vstack(mix_ratio), [25, 50, 75], # [0, 50, 100],
# interpolation='nearest', axis=0)
out_ratio = np.mean(np.vstack(mix_ratio), axis=0)
ratio_std = np.std(np.vstack(mix_ratio), axis=0)
out_ratio_rng = [out_ratio-ratio_std, out_ratio+ratio_std]
# Get x-axis length
out_smp = np.array(num_smp[np.argmax([len(ns) for ns in num_smp])])
# Get y-axis values
out_bd_rng = np.array(out_bd_rng)
ax[0].fill_between(out_smp, out_bd_rng[0], out_bd_rng[1],
color=color, alpha=0.2)
if plot_ratio:
out_ratio_rng = np.clip(out_ratio_rng, 0, 1)
ax[1].fill_between(out_smp,
_smooth(out_ratio_rng[0]),
_smooth(out_ratio_rng[1]),
color=color, alpha=0.2)
# Get x-axis loop locations
m_xloop = m_xloop[np.argmax(list(map(len, num_bd)))]
else:
out_bd = np.array(num_bd[0])
out_smp = np.array(num_smp[0])
if plot_ratio:
out_ratio = np.array(mix_ratio[0])
# Plot coverage curve of experiment type k
if show_xloop:
xloop = m_xloop[0]
ax.plot(out_smp, out_bd,
alpha=0.5, label="__"+k, color='k', linestyle='--')
[ax[0].axvline(ln, c='b', ls='--', alpha=0.2) for ln in xloop]
# make plot nice
ax[0].set_xticks(list(ax[0].get_xticks())[1:] \
+list(xloop) + list(xloop))
xlabels = ax[0].get_xticks().astype(int).tolist()
xlabels[-len(xloop):] = ['\n L{}'.format(i) \
for i in range(len(xloop))]
ax[0].set_xticklabels(xlabels)
npts = len(out_smp)
ax[0].set_xlim(-int(0.02*npts), npts+int(0.02*npts))
else:
if 'ls_mape_' in klab:
# if klab in markerdict.keys():
ax[0].scatter(m_xloop, out_bd[np.array(m_xloop)],
s=10, color=color)
# marker=markerdict[klab])
ax[0].plot(out_smp, out_bd, label=klab, color=color,
linestyle=linestyle, alpha=0.5)
# Check if mixing ratios plot is needed
if plot_ratio:
out_ratio = _smooth(out_ratio)
if 'ls_mape_' in klab:
ax[1].scatter(m_xloop, out_ratio[np.array(m_xloop)],
s=10, color=color, alpha=0.5)
ax[1].plot(out_smp, out_ratio, label=klab, color=color,
linestyle=linestyle, alpha=0.5)
# Align graphs
if len(out_smp) > max_nsmp: max_nsmp = len(out_smp)
# Add labels and legends
legends = []
ax[0].set_title(graph_name)
# ax[0].set_ylabel('# {} ( /{})'.format(metric_name, np.prod(metric_dim)))
max_bd = np.prod(metric_dim)
if graph_type=='coverage':
ax[0].set_ylabel('discovered behaviours (max {})'.format(max_bd))
elif graph_type=='fitness':
ax[0].set_ylabel('best behaviour fitness')
ax[0].set_xlabel('total trial evaluations')
ax[0].grid(which='minor', alpha=0.2)
ax[0].grid(which='major', alpha=0.5)
# ax[0].set_ylim(5000,7000)
ax[0].set_xlim(right=int(1.01*max_nsmp))
ax[0].ticklabel_format(style='sci', axis='x', scilimits=(0,0))
h, l = ax[0].get_legend_handles_labels()
legends.append(ax[0].legend(h, l, loc='upper left',
bbox_to_anchor=(1, 1), ncol=1))
if np.any(plot_ratio_list): # and graph_type != 'fitness':
ax[1].set_ylabel('mixing ratio (ps/nsmp)')
ax[1].set_xlabel('total trial evaluations')
ax[1].minorticks_on()
ax[1].grid(which='minor', alpha=0.5, linestyle=':')
ax[1].grid(which='major', alpha=0.5)
ax[1].set_xlim(right=int(1.01*max_nsmp))
ax[1].set_ylim(-0.03, 1.03)
ax[1].set_yticks([0, 0.5, 1])
ax[1].ticklabel_format(style='sci', axis='x', scilimits=(0,0))
h, l = ax[1].get_legend_handles_labels()
legends.append(ax[1].legend(h, l, loc='upper left',
bbox_to_anchor=(1, 1), ncol=1))
else:
f.delaxes(ax[1])
# Save/show figure
plt.subplots_adjust(hspace = 0.1)
savepath = refpath if savepath is None else savepath
if not os.path.isdir(savepath):
os.makedirs(savepath)
if type(spec_title)==int:
plt.savefig('{}/bd_{}_loop_{:05d}.{}'.format(
savepath, graph_type, spec_title, img_format),
format=img_format, bbox_extra_artists=tuple(legends),
bbox_inches='tight', dpi=dpi)
else:
plt.savefig('{}/{}__bd_{}.{}'.format(
savepath, graph_name, graph_type, img_format),
format=img_format, bbox_extra_artists=tuple(legends),
bbox_inches='tight', dpi=dpi)
if show_plots:
plt.show()
else:
plt.cla()
################################################################################
if __name__ == "__main__":
args = parser.parse_args()
refpath=args.load_path
if 'hyperplane' in refpath or 'HYPERPLANE' in refpath:
dim = 100
metric_dim = [dim, dim]
metric_name = 'simple_grid'
elif 'striker' in refpath or 'STRIKER' in refpath:
dim = 30
metric_dim = [17, dim, dim]
metric_name = 'contact_grid'
elif 'bipedal_walker' in refpath or 'BIPEDAL_WALKER' in refpath:
dim = 10
metric_dim = [dim//2, dim**2, dim//2, dim//2]
metric_name = 'gait_grid'
elif 'bipedal_kicker' in refpath or 'BIPEDAL_KICKER' in refpath:
dim = 10
metric_dim = [5*dim, 2*dim**2]
metric_name = 'simple_grid'
elif 'quadruped_walker' in refpath or 'QUADRUPED_WALKER' in refpath:
dim = 10
metric_dim = [dim**2, dim**2]
metric_name = 'simple_grid'
elif 'quadruped_kicker' in refpath or 'QUADRUPED_KICKER' in refpath:
dim = 30
metric_dim = [17, dim, dim]
metric_name = 'contact_grid'
idx = -2 if refpath[-1]=='/' else -1
graph_name = refpath.split('/')[idx]
if 'perfl2' in args.plot_type:
plot_performance_v_l2dist(refpath, graph_name, metric_name, metric_dim,
filter_string=args.filter_string,
savepath=args.save_path)
if 'perfratio' in args.plot_type:
plot_performance_v_ratio(refpath, graph_name, metric_name, metric_dim,
filter_string=args.filter_string,
savepath=args.save_path)
if 'perfloss' in args.plot_type:
plot_performance_v_loss(refpath, graph_name, metric_name, metric_dim,
filter_string=args.filter_string,
savepath=args.save_path)
if 'bd' in args.plot_type:
plot_bd_graph(refpath, graph_name, metric_name, metric_dim,
filter_string=args.filter_string, graph_type='coverage',
savepath=args.save_path)
if 'fit' in args.plot_type:
plot_bd_graph(refpath, graph_name, metric_name, metric_dim,
filter_string=args.filter_string, graph_type='fitness',
savepath=args.save_path)
| [
"logging.getLogger",
"numpy.prod",
"numpy.clip",
"numpy.convolve",
"pandas.read_csv",
"multiprocessing.cpu_count",
"numpy.array",
"numpy.linalg.norm",
"matplotlib.colors.LogNorm",
"behaviour_representations.analysis.load_metadata",
"numpy.mean",
"os.path.exists",
"numpy.repeat",
"argparse.... | [((794, 808), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (801, 808), True, 'import matplotlib as mpl\n'), ((1122, 1149), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1139, 1149), False, 'import logging\n'), ((1161, 1186), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1184, 1186), False, 'import argparse\n'), ((2642, 2661), 'numpy.ones', 'np.ones', (['(w_len // 2)'], {}), '(w_len // 2)\n', (2649, 2661), True, 'import numpy as np\n'), ((2675, 2733), 'numpy.concatenate', 'np.concatenate', (['[pad * data[0], data, pad[:-1] * data[-1]]'], {}), '([pad * data[0], data, pad[:-1] * data[-1]])\n', (2689, 2733), True, 'import numpy as np\n'), ((2748, 2791), 'numpy.convolve', 'np.convolve', (['data_pad', 'window'], {'mode': '"""valid"""'}), "(data_pad, window, mode='valid')\n", (2759, 2791), True, 'import numpy as np\n'), ((3016, 3037), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (3027, 3037), True, 'import pandas as pd\n'), ((5104, 5171), 'os.path.join', 'os.path.join', (['filepath', '"""saved_models/training_losses_param_ae.csv"""'], {}), "(filepath, 'saved_models/training_losses_param_ae.csv')\n", (5116, 5171), False, 'import os\n'), ((5437, 5478), 'ast.literal_eval', 'ast.literal_eval', (["data['test'].values[-1]"], {}), "(data['test'].values[-1])\n", (5453, 5478), False, 'import ast\n'), ((5823, 5846), 'behaviour_representations.analysis.load_metadata', 'load_metadata', (['filepath'], {}), '(filepath)\n', (5836, 5846), False, 'from behaviour_representations.analysis import load_metadata, load_dataset\n'), ((6470, 6513), 'numpy.linalg.norm', 'np.linalg.norm', (['(focus_param - compare_param)'], {}), '(focus_param - compare_param)\n', (6484, 6513), True, 'import numpy as np\n'), ((6687, 6709), 'behaviour_representations.analysis.load_dataset', 'load_dataset', (['filepath'], {}), '(filepath)\n', (6699, 6709), False, 'from behaviour_representations.analysis import load_metadata, load_dataset\n'), ((7455, 7473), 'numpy.mean', 'np.mean', (['dist_list'], {}), '(dist_list)\n', (7462, 7473), True, 'import numpy as np\n'), ((8357, 8391), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(5, 5)'}), '(1, 1, figsize=(5, 5))\n', (8369, 8391), True, 'import matplotlib.pyplot as plt\n'), ((8550, 8605), 'os.path.join', 'os.path.join', (['refpath', '"""saved_performance_v_l2dist.pkl"""'], {}), "(refpath, 'saved_performance_v_l2dist.pkl')\n", (8562, 8605), False, 'import os\n'), ((8613, 8634), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (8627, 8634), False, 'import os\n'), ((14264, 14283), 'numpy.prod', 'np.prod', (['metric_dim'], {}), '(metric_dim)\n', (14271, 14283), True, 'import numpy as np\n'), ((14434, 14453), 'matplotlib.pyplot.minorticks_on', 'plt.minorticks_on', ([], {}), '()\n', (14451, 14453), True, 'import matplotlib.pyplot as plt\n'), ((17265, 17319), 'matplotlib.pyplot.subplots', 'plt.subplots', (['nrow', 'ncol'], {'figsize': '(5 * ncol, 5 * nrow)'}), '(nrow, ncol, figsize=(5 * ncol, 5 * nrow))\n', (17277, 17319), True, 'import matplotlib.pyplot as plt\n'), ((17358, 17380), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""jet"""'], {}), "('jet')\n", (17373, 17380), True, 'import matplotlib.pyplot as plt\n'), ((21061, 21079), 'numpy.median', 'np.median', (['ps_vals'], {}), '(ps_vals)\n', (21070, 21079), True, 'import numpy as np\n'), ((24273, 24292), 'numpy.prod', 'np.prod', (['metric_dim'], {}), '(metric_dim)\n', (24280, 24292), True, 'import numpy as np\n'), ((24856, 24875), 'matplotlib.pyplot.minorticks_on', 'plt.minorticks_on', ([], {}), '()\n', (24873, 24875), True, 'import matplotlib.pyplot as plt\n'), ((28237, 28291), 'matplotlib.pyplot.subplots', 'plt.subplots', (['nrow', 'ncol'], {'figsize': '(5 * ncol, 5 * nrow)'}), '(nrow, ncol, figsize=(5 * ncol, 5 * nrow))\n', (28249, 28291), True, 'import matplotlib.pyplot as plt\n'), ((28330, 28352), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""jet"""'], {}), "('jet')\n", (28345, 28352), True, 'import matplotlib.pyplot as plt\n'), ((32027, 32045), 'numpy.median', 'np.median', (['ps_vals'], {}), '(ps_vals)\n', (32036, 32045), True, 'import numpy as np\n'), ((35078, 35097), 'numpy.prod', 'np.prod', (['metric_dim'], {}), '(metric_dim)\n', (35085, 35097), True, 'import numpy as np\n'), ((35662, 35681), 'matplotlib.pyplot.minorticks_on', 'plt.minorticks_on', ([], {}), '()\n', (35679, 35681), True, 'import matplotlib.pyplot as plt\n'), ((39533, 39608), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(15, 15)', 'gridspec_kw': "{'height_ratios': [3, 1]}"}), "(2, 1, figsize=(15, 15), gridspec_kw={'height_ratios': [3, 1]})\n", (39545, 39608), True, 'import matplotlib.pyplot as plt\n'), ((49330, 49349), 'numpy.prod', 'np.prod', (['metric_dim'], {}), '(metric_dim)\n', (49337, 49349), True, 'import numpy as np\n'), ((49985, 50008), 'numpy.any', 'np.any', (['plot_ratio_list'], {}), '(plot_ratio_list)\n', (49991, 50008), True, 'import numpy as np\n'), ((50715, 50746), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.1)'}), '(hspace=0.1)\n', (50734, 50746), True, 'import matplotlib.pyplot as plt\n'), ((2085, 2094), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2092, 2094), True, 'import matplotlib.pyplot as plt\n'), ((2611, 2625), 'numpy.ones', 'np.ones', (['w_len'], {}), '(w_len)\n', (2618, 2625), True, 'import numpy as np\n'), ((3867, 3888), 'multiprocessing.Pool', 'mpi.Pool', ([], {'processes': '(7)'}), '(processes=7)\n', (3875, 3888), True, 'import multiprocessing as mpi\n'), ((4788, 4809), 'numpy.array', 'np.array', (['data_ratios'], {}), '(data_ratios)\n', (4796, 4809), True, 'import numpy as np\n'), ((4917, 4954), 'numpy.concatenate', 'np.concatenate', (['[[ratios[0]], ratios]'], {}), '([[ratios[0]], ratios])\n', (4931, 4954), True, 'import numpy as np\n'), ((5224, 5299), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'header': 'None', 'usecols': '[1, 2]', 'names': "['test', 'train']"}), "(filename, header=None, usecols=[1, 2], names=['test', 'train'])\n", (5235, 5299), True, 'import pandas as pd\n'), ((5570, 5612), 'ast.literal_eval', 'ast.literal_eval', (["data['train'].values[-1]"], {}), "(data['train'].values[-1])\n", (5586, 5612), False, 'import ast\n'), ((12643, 12665), 'numpy.unique', 'np.unique', (['total_ltype'], {}), '(total_ltype)\n', (12652, 12665), True, 'import numpy as np\n'), ((12704, 12725), 'numpy.unique', 'np.unique', (['total_ldim'], {}), '(total_ldim)\n', (12713, 12725), True, 'import numpy as np\n'), ((12766, 12789), 'numpy.unique', 'np.unique', (['total_search'], {}), '(total_search)\n', (12775, 12789), True, 'import numpy as np\n'), ((15906, 15929), 'os.path.isdir', 'os.path.isdir', (['savepath'], {}), '(savepath)\n', (15919, 15929), False, 'import os\n'), ((15939, 15960), 'os.makedirs', 'os.makedirs', (['savepath'], {}), '(savepath)\n', (15950, 15960), False, 'import os\n'), ((16685, 16695), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16693, 16695), True, 'import matplotlib.pyplot as plt\n'), ((16714, 16723), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (16721, 16723), True, 'import matplotlib.pyplot as plt\n'), ((18493, 18537), 'numpy.setdiff1d', 'np.setdiff1d', (['filter_include', 'filter_exclude'], {}), '(filter_include, filter_exclude)\n', (18505, 18537), True, 'import numpy as np\n'), ((21980, 22002), 'numpy.unique', 'np.unique', (['total_ltype'], {}), '(total_ltype)\n', (21989, 22002), True, 'import numpy as np\n'), ((22027, 22048), 'numpy.unique', 'np.unique', (['total_ldim'], {}), '(total_ldim)\n', (22036, 22048), True, 'import numpy as np\n'), ((24195, 24227), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.15)'}), '(hspace=0.15)\n', (24214, 24227), True, 'import matplotlib.pyplot as plt\n'), ((26880, 26903), 'os.path.isdir', 'os.path.isdir', (['savepath'], {}), '(savepath)\n', (26893, 26903), False, 'import os\n'), ((26913, 26934), 'os.makedirs', 'os.makedirs', (['savepath'], {}), '(savepath)\n', (26924, 26934), False, 'import os\n'), ((27659, 27669), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (27667, 27669), True, 'import matplotlib.pyplot as plt\n'), ((27688, 27697), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (27695, 27697), True, 'import matplotlib.pyplot as plt\n'), ((29465, 29509), 'numpy.setdiff1d', 'np.setdiff1d', (['filter_include', 'filter_exclude'], {}), '(filter_include, filter_exclude)\n', (29477, 29509), True, 'import numpy as np\n'), ((32944, 32966), 'numpy.unique', 'np.unique', (['total_ltype'], {}), '(total_ltype)\n', (32953, 32966), True, 'import numpy as np\n'), ((32991, 33012), 'numpy.unique', 'np.unique', (['total_ldim'], {}), '(total_ldim)\n', (33000, 33012), True, 'import numpy as np\n'), ((34886, 34918), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.15)'}), '(hspace=0.15)\n', (34905, 34918), True, 'import matplotlib.pyplot as plt\n'), ((37656, 37679), 'os.path.isdir', 'os.path.isdir', (['savepath'], {}), '(savepath)\n', (37669, 37679), False, 'import os\n'), ((37689, 37710), 'os.makedirs', 'os.makedirs', (['savepath'], {}), '(savepath)\n', (37700, 37710), False, 'import os\n'), ((38433, 38443), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (38441, 38443), True, 'import matplotlib.pyplot as plt\n'), ((38462, 38471), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (38469, 38471), True, 'import matplotlib.pyplot as plt\n'), ((40742, 40786), 'numpy.setdiff1d', 'np.setdiff1d', (['filter_include', 'filter_exclude'], {}), '(filter_include, filter_exclude)\n', (40754, 40786), True, 'import numpy as np\n'), ((50817, 50840), 'os.path.isdir', 'os.path.isdir', (['savepath'], {}), '(savepath)\n', (50830, 50840), False, 'import os\n'), ((50850, 50871), 'os.makedirs', 'os.makedirs', (['savepath'], {}), '(savepath)\n', (50861, 50871), False, 'import os\n'), ((51426, 51436), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (51434, 51436), True, 'import matplotlib.pyplot as plt\n'), ((51455, 51464), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (51462, 51464), True, 'import matplotlib.pyplot as plt\n'), ((7341, 7389), 'functools.partial', 'partial', (['mpi_get_dist'], {'param_original': 'param_flat'}), '(mpi_get_dist, param_original=param_flat)\n', (7348, 7389), False, 'from functools import partial\n'), ((8739, 8753), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (8750, 8753), False, 'import pickle\n'), ((9658, 9702), 'numpy.setdiff1d', 'np.setdiff1d', (['filter_include', 'filter_exclude'], {}), '(filter_include, filter_exclude)\n', (9670, 9702), True, 'import numpy as np\n'), ((12116, 12148), 'pickle.dump', 'pickle.dump', (['experiments_dict', 'f'], {}), '(experiments_dict, f)\n', (12127, 12148), False, 'import pickle\n'), ((20536, 20554), 'numpy.mean', 'np.mean', (['loss_test'], {}), '(loss_test)\n', (20543, 20554), True, 'import numpy as np\n'), ((20606, 20625), 'numpy.mean', 'np.mean', (['loss_train'], {}), '(loss_train)\n', (20613, 20625), True, 'import numpy as np\n'), ((20673, 20690), 'numpy.median', 'np.median', (['num_bd'], {}), '(num_bd)\n', (20682, 20690), True, 'import numpy as np\n'), ((23339, 23358), 'numpy.array', 'np.array', (['plot_test'], {}), '(plot_test)\n', (23347, 23358), True, 'import numpy as np\n'), ((23359, 23379), 'numpy.array', 'np.array', (['plot_train'], {}), '(plot_train)\n', (23367, 23379), True, 'import numpy as np\n'), ((31485, 31503), 'numpy.mean', 'np.mean', (['loss_test'], {}), '(loss_test)\n', (31492, 31503), True, 'import numpy as np\n'), ((31555, 31574), 'numpy.mean', 'np.mean', (['loss_train'], {}), '(loss_train)\n', (31562, 31574), True, 'import numpy as np\n'), ((31622, 31639), 'numpy.median', 'np.median', (['num_bd'], {}), '(num_bd)\n', (31631, 31639), True, 'import numpy as np\n'), ((46766, 46786), 'numpy.array', 'np.array', (['out_bd_rng'], {}), '(out_bd_rng)\n', (46774, 46786), True, 'import numpy as np\n'), ((47372, 47391), 'numpy.array', 'np.array', (['num_bd[0]'], {}), '(num_bd[0])\n', (47380, 47391), True, 'import numpy as np\n'), ((47414, 47434), 'numpy.array', 'np.array', (['num_smp[0]'], {}), '(num_smp[0])\n', (47422, 47434), True, 'import numpy as np\n'), ((2356, 2384), 'matplotlib.markers.MarkerStyle', 'mmarkers.MarkerStyle', (['marker'], {}), '(marker)\n', (2376, 2384), True, 'import matplotlib.markers as mmarkers\n'), ((4825, 4844), 'numpy.min', 'np.min', (['tmp'], {'axis': '(0)'}), '(tmp, axis=0)\n', (4831, 4844), True, 'import numpy as np\n'), ((7284, 7299), 'multiprocessing.cpu_count', 'mpi.cpu_count', ([], {}), '()\n', (7297, 7299), True, 'import multiprocessing as mpi\n'), ((11708, 11725), 'numpy.median', 'np.median', (['num_bd'], {}), '(num_bd)\n', (11717, 11725), True, 'import numpy as np\n'), ((11778, 11793), 'numpy.mean', 'np.mean', (['l2dist'], {}), '(l2dist)\n', (11785, 11793), True, 'import numpy as np\n'), ((14763, 14808), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[]', '[]'], {'c': '"""k"""', 'marker': 'mkdict[ld]'}), "([], [], c='k', marker=mkdict[ld])\n", (14774, 14808), True, 'import matplotlib.pyplot as plt\n'), ((14922, 15019), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[]', '[]'], {'c': '"""w"""', 'edgecolor': '"""k"""', 's': 'szdict[lt]', 'hatch': "('....' if 'PCA' in lt else '')"}), "([], [], c='w', edgecolor='k', s=szdict[lt], hatch='....' if \n 'PCA' in lt else '')\n", (14933, 15019), True, 'import matplotlib.pyplot as plt\n'), ((15143, 15177), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[]', '[]'], {'c': 'expdict[lt]'}), '([], [], c=expdict[lt])\n', (15154, 15177), True, 'import matplotlib.pyplot as plt\n'), ((18823, 18858), 'glob.glob', 'glob.glob', (["(d + '/S*/ref_data_*.csv')"], {}), "(d + '/S*/ref_data_*.csv')\n", (18832, 18858), False, 'import glob\n'), ((18902, 18934), 'glob.glob', 'glob.glob', (["(d + '/ref_data_*.csv')"], {}), "(d + '/ref_data_*.csv')\n", (18911, 18934), False, 'import glob\n'), ((23697, 23717), 'matplotlib.colors.LogNorm', 'mpl.colors.LogNorm', ([], {}), '()\n', (23715, 23717), True, 'import matplotlib as mpl\n'), ((26161, 26206), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[]', '[]'], {'c': '"""k"""', 'marker': 'mkdict[ld]'}), "([], [], c='k', marker=mkdict[ld])\n", (26172, 26206), True, 'import matplotlib.pyplot as plt\n'), ((26302, 26357), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[]', '[]'], {'c': '"""w"""', 'edgecolor': '"""k"""', 's': 'szdict[lt]'}), "([], [], c='w', edgecolor='k', s=szdict[lt])\n", (26313, 26357), True, 'import matplotlib.pyplot as plt\n'), ((29795, 29830), 'glob.glob', 'glob.glob', (["(d + '/S*/ref_data_*.csv')"], {}), "(d + '/S*/ref_data_*.csv')\n", (29804, 29830), False, 'import glob\n'), ((29874, 29906), 'glob.glob', 'glob.glob', (["(d + '/ref_data_*.csv')"], {}), "(d + '/ref_data_*.csv')\n", (29883, 29906), False, 'import glob\n'), ((36932, 36977), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[]', '[]'], {'c': '"""k"""', 'marker': 'mkdict[ld]'}), "([], [], c='k', marker=mkdict[ld])\n", (36943, 36977), True, 'import matplotlib.pyplot as plt\n'), ((37073, 37128), 'matplotlib.pyplot.scatter', 'plt.scatter', (['[]', '[]'], {'c': '"""w"""', 'edgecolor': '"""k"""', 's': 'szdict[lt]'}), "([], [], c='w', edgecolor='k', s=szdict[lt])\n", (37084, 37128), True, 'import matplotlib.pyplot as plt\n'), ((41028, 41063), 'glob.glob', 'glob.glob', (["(d + '/S*/ref_data_*.csv')"], {}), "(d + '/S*/ref_data_*.csv')\n", (41037, 41063), False, 'import glob\n'), ((41107, 41139), 'glob.glob', 'glob.glob', (["(d + '/ref_data_*.csv')"], {}), "(d + '/ref_data_*.csv')\n", (41116, 41139), False, 'import glob\n'), ((44561, 44577), 'numpy.array', 'np.array', (['out_bd'], {}), '(out_bd)\n', (44569, 44577), True, 'import numpy as np\n'), ((46973, 47001), 'numpy.clip', 'np.clip', (['out_ratio_rng', '(0)', '(1)'], {}), '(out_ratio_rng, 0, 1)\n', (46980, 47001), True, 'import numpy as np\n'), ((47490, 47512), 'numpy.array', 'np.array', (['mix_ratio[0]'], {}), '(mix_ratio[0])\n', (47498, 47512), True, 'import numpy as np\n'), ((10016, 10051), 'glob.glob', 'glob.glob', (["(d + '/S*/ref_data_*.csv')"], {}), "(d + '/S*/ref_data_*.csv')\n", (10025, 10051), False, 'import glob\n'), ((10103, 10135), 'glob.glob', 'glob.glob', (["(d + '/ref_data_*.csv')"], {}), "(d + '/ref_data_*.csv')\n", (10112, 10135), False, 'import glob\n'), ((42537, 42570), 'numpy.where', 'np.where', (["(data_dict['niter'] == 0)"], {}), "(data_dict['niter'] == 0)\n", (42545, 42570), True, 'import numpy as np\n'), ((42771, 42814), 'numpy.repeat', 'np.repeat', (['data_dict[graph_type]', 'data_nsmp'], {}), '(data_dict[graph_type], data_nsmp)\n', (42780, 42814), True, 'import numpy as np\n'), ((45859, 45878), 'numpy.array', 'np.array', (['out_ratio'], {}), '(out_ratio)\n', (45867, 45878), True, 'import numpy as np\n'), ((45988, 46005), 'numpy.vstack', 'np.vstack', (['num_bd'], {}), '(num_bd)\n', (45997, 46005), True, 'import numpy as np\n'), ((42943, 42984), 'numpy.repeat', 'np.repeat', (["data_dict['ratios']", 'data_nsmp'], {}), "(data_dict['ratios'], data_nsmp)\n", (42952, 42984), True, 'import numpy as np\n'), ((44243, 44256), 'numpy.vstack', 'np.vstack', (['tc'], {}), '(tc)\n', (44252, 44256), True, 'import numpy as np\n'), ((46418, 46438), 'numpy.vstack', 'np.vstack', (['mix_ratio'], {}), '(mix_ratio)\n', (46427, 46438), True, 'import numpy as np\n'), ((46487, 46507), 'numpy.vstack', 'np.vstack', (['mix_ratio'], {}), '(mix_ratio)\n', (46496, 46507), True, 'import numpy as np\n'), ((48423, 48440), 'numpy.array', 'np.array', (['m_xloop'], {}), '(m_xloop)\n', (48431, 48440), True, 'import numpy as np\n'), ((48870, 48887), 'numpy.array', 'np.array', (['m_xloop'], {}), '(m_xloop)\n', (48878, 48887), True, 'import numpy as np\n'), ((45544, 45558), 'numpy.vstack', 'np.vstack', (['tmr'], {}), '(tmr)\n', (45553, 45558), True, 'import numpy as np\n'), ((45605, 45619), 'numpy.vstack', 'np.vstack', (['tmr'], {}), '(tmr)\n', (45614, 45619), True, 'import numpy as np\n')] |
from unittest import TestCase, mock
from unittest.mock import MagicMock
import numpy as np
from source.constants import Constants
from source.preprocessing.epoch import Epoch
from source.preprocessing.heart_rate.heart_rate_collection import HeartRateCollection
from source.preprocessing.heart_rate.heart_rate_feature_service import HeartRateFeatureService
class TestHeartRateFeatureService(TestCase):
@mock.patch('source.preprocessing.heart_rate.heart_rate_feature_service.pd')
def test_load(self, mock_pd):
mock_pd.read_csv.return_value = mock_return = MagicMock()
mock_return.values = expected_return = np.array([1, 2, 3, 4, 5])
actual_returned_value = HeartRateFeatureService.load("subjectA")
self.assertListEqual(expected_return.tolist(), actual_returned_value.tolist())
mock_pd.read_csv.assert_called_once_with(str(HeartRateFeatureService.get_path("subjectA")), delimiter=' ')
def test_get_path(self):
expected_path = Constants.FEATURE_FILE_PATH.joinpath("subjectA" + '_hr_feature.out')
self.assertEqual(expected_path, HeartRateFeatureService.get_path("subjectA"))
@mock.patch('source.preprocessing.heart_rate.heart_rate_feature_service.np')
def test_write(self, mock_np):
feature_to_write = np.array([1, 2, 3, 4])
subject_id = "subjectA"
HeartRateFeatureService.write(subject_id, feature_to_write)
mock_np.savetxt.assert_called_once_with(HeartRateFeatureService.get_path(subject_id), feature_to_write,
fmt='%f')
def test_get_window(self):
timestamps = np.array([-1000, -500, 32, 50, 60, 800, 1000])
epoch = Epoch(timestamp=55, index=120)
expected_indices_in_range = np.array([2, 3, 4])
actual_indices_in_range = HeartRateFeatureService.get_window(timestamps, epoch)
self.assertEqual(expected_indices_in_range.tolist(), actual_indices_in_range.tolist())
@mock.patch.object(HeartRateFeatureService, 'get_feature')
@mock.patch('source.preprocessing.heart_rate.heart_rate_feature_service.HeartRateService')
def test_build_feature_array(self, mock_heart_rate_service, mock_get_feature):
subject_id = "subjectA"
data = np.array(
[[1, 10], [10, 220], [20, 0], [40, 500], [70, 200], [90, 0], [100, 0], [400, 4]])
motion_collection = HeartRateCollection(subject_id=subject_id, data=data)
mock_heart_rate_service.load_cropped.return_value = motion_collection
expected_features = [np.array([0.1]), np.array([0.2])]
mock_get_feature.side_effect = expected_features
expected_feature_array = np.array(expected_features)
valid_epochs = [Epoch(timestamp=4, index=1), Epoch(timestamp=50, index=2)]
returned_feature_array = HeartRateFeatureService.build(subject_id, valid_epochs)
self.assertEqual(expected_feature_array.tolist(), returned_feature_array.tolist())
| [
"source.constants.Constants.FEATURE_FILE_PATH.joinpath",
"source.preprocessing.heart_rate.heart_rate_feature_service.HeartRateFeatureService.write",
"source.preprocessing.epoch.Epoch",
"unittest.mock.MagicMock",
"source.preprocessing.heart_rate.heart_rate_feature_service.HeartRateFeatureService.load",
"nu... | [((409, 484), 'unittest.mock.patch', 'mock.patch', (['"""source.preprocessing.heart_rate.heart_rate_feature_service.pd"""'], {}), "('source.preprocessing.heart_rate.heart_rate_feature_service.pd')\n", (419, 484), False, 'from unittest import TestCase, mock\n'), ((1150, 1225), 'unittest.mock.patch', 'mock.patch', (['"""source.preprocessing.heart_rate.heart_rate_feature_service.np"""'], {}), "('source.preprocessing.heart_rate.heart_rate_feature_service.np')\n", (1160, 1225), False, 'from unittest import TestCase, mock\n'), ((1976, 2033), 'unittest.mock.patch.object', 'mock.patch.object', (['HeartRateFeatureService', '"""get_feature"""'], {}), "(HeartRateFeatureService, 'get_feature')\n", (1993, 2033), False, 'from unittest import TestCase, mock\n'), ((2039, 2138), 'unittest.mock.patch', 'mock.patch', (['"""source.preprocessing.heart_rate.heart_rate_feature_service.HeartRateService"""'], {}), "(\n 'source.preprocessing.heart_rate.heart_rate_feature_service.HeartRateService'\n )\n", (2049, 2138), False, 'from unittest import TestCase, mock\n'), ((573, 584), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (582, 584), False, 'from unittest.mock import MagicMock\n'), ((632, 657), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (640, 657), True, 'import numpy as np\n'), ((690, 730), 'source.preprocessing.heart_rate.heart_rate_feature_service.HeartRateFeatureService.load', 'HeartRateFeatureService.load', (['"""subjectA"""'], {}), "('subjectA')\n", (718, 730), False, 'from source.preprocessing.heart_rate.heart_rate_feature_service import HeartRateFeatureService\n'), ((988, 1056), 'source.constants.Constants.FEATURE_FILE_PATH.joinpath', 'Constants.FEATURE_FILE_PATH.joinpath', (["('subjectA' + '_hr_feature.out')"], {}), "('subjectA' + '_hr_feature.out')\n", (1024, 1056), False, 'from source.constants import Constants\n'), ((1288, 1310), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (1296, 1310), True, 'import numpy as np\n'), ((1351, 1410), 'source.preprocessing.heart_rate.heart_rate_feature_service.HeartRateFeatureService.write', 'HeartRateFeatureService.write', (['subject_id', 'feature_to_write'], {}), '(subject_id, feature_to_write)\n', (1380, 1410), False, 'from source.preprocessing.heart_rate.heart_rate_feature_service import HeartRateFeatureService\n'), ((1635, 1681), 'numpy.array', 'np.array', (['[-1000, -500, 32, 50, 60, 800, 1000]'], {}), '([-1000, -500, 32, 50, 60, 800, 1000])\n', (1643, 1681), True, 'import numpy as np\n'), ((1698, 1728), 'source.preprocessing.epoch.Epoch', 'Epoch', ([], {'timestamp': '(55)', 'index': '(120)'}), '(timestamp=55, index=120)\n', (1703, 1728), False, 'from source.preprocessing.epoch import Epoch\n'), ((1765, 1784), 'numpy.array', 'np.array', (['[2, 3, 4]'], {}), '([2, 3, 4])\n', (1773, 1784), True, 'import numpy as np\n'), ((1820, 1873), 'source.preprocessing.heart_rate.heart_rate_feature_service.HeartRateFeatureService.get_window', 'HeartRateFeatureService.get_window', (['timestamps', 'epoch'], {}), '(timestamps, epoch)\n', (1854, 1873), False, 'from source.preprocessing.heart_rate.heart_rate_feature_service import HeartRateFeatureService\n'), ((2259, 2353), 'numpy.array', 'np.array', (['[[1, 10], [10, 220], [20, 0], [40, 500], [70, 200], [90, 0], [100, 0], [400, 4]\n ]'], {}), '([[1, 10], [10, 220], [20, 0], [40, 500], [70, 200], [90, 0], [100,\n 0], [400, 4]])\n', (2267, 2353), True, 'import numpy as np\n'), ((2391, 2444), 'source.preprocessing.heart_rate.heart_rate_collection.HeartRateCollection', 'HeartRateCollection', ([], {'subject_id': 'subject_id', 'data': 'data'}), '(subject_id=subject_id, data=data)\n', (2410, 2444), False, 'from source.preprocessing.heart_rate.heart_rate_collection import HeartRateCollection\n'), ((2676, 2703), 'numpy.array', 'np.array', (['expected_features'], {}), '(expected_features)\n', (2684, 2703), True, 'import numpy as np\n'), ((2822, 2877), 'source.preprocessing.heart_rate.heart_rate_feature_service.HeartRateFeatureService.build', 'HeartRateFeatureService.build', (['subject_id', 'valid_epochs'], {}), '(subject_id, valid_epochs)\n', (2851, 2877), False, 'from source.preprocessing.heart_rate.heart_rate_feature_service import HeartRateFeatureService\n'), ((1098, 1142), 'source.preprocessing.heart_rate.heart_rate_feature_service.HeartRateFeatureService.get_path', 'HeartRateFeatureService.get_path', (['"""subjectA"""'], {}), "('subjectA')\n", (1130, 1142), False, 'from source.preprocessing.heart_rate.heart_rate_feature_service import HeartRateFeatureService\n'), ((1460, 1504), 'source.preprocessing.heart_rate.heart_rate_feature_service.HeartRateFeatureService.get_path', 'HeartRateFeatureService.get_path', (['subject_id'], {}), '(subject_id)\n', (1492, 1504), False, 'from source.preprocessing.heart_rate.heart_rate_feature_service import HeartRateFeatureService\n'), ((2552, 2567), 'numpy.array', 'np.array', (['[0.1]'], {}), '([0.1])\n', (2560, 2567), True, 'import numpy as np\n'), ((2569, 2584), 'numpy.array', 'np.array', (['[0.2]'], {}), '([0.2])\n', (2577, 2584), True, 'import numpy as np\n'), ((2729, 2756), 'source.preprocessing.epoch.Epoch', 'Epoch', ([], {'timestamp': '(4)', 'index': '(1)'}), '(timestamp=4, index=1)\n', (2734, 2756), False, 'from source.preprocessing.epoch import Epoch\n'), ((2758, 2786), 'source.preprocessing.epoch.Epoch', 'Epoch', ([], {'timestamp': '(50)', 'index': '(2)'}), '(timestamp=50, index=2)\n', (2763, 2786), False, 'from source.preprocessing.epoch import Epoch\n'), ((872, 916), 'source.preprocessing.heart_rate.heart_rate_feature_service.HeartRateFeatureService.get_path', 'HeartRateFeatureService.get_path', (['"""subjectA"""'], {}), "('subjectA')\n", (904, 916), False, 'from source.preprocessing.heart_rate.heart_rate_feature_service import HeartRateFeatureService\n')] |
"""
## pyart radar object
pyart.core.radar
================
A general central radial scanning (or dwelling) instrument class.
.. autosummary::
:toctree: generated/
_rays_per_sweep_data_factory
_gate_data_factory
_gate_lon_lat_data_factory
_gate_altitude_data_factory
.. autosummary::
:toctree: generated/
:template: dev_template.rst
Radar
"""
# the code for Radar Object in this file were adapted from pyart by <NAME>. & <NAME>.
# https://github.com/ARM-DOE/pyart
from __future__ import print_function
import numpy as np
import sys
from ..configure.pyart_config import get_metadata
from ..configure.pyart_lazydict import LazyLoadDict
from .transforms import antenna_vectors_to_cartesian, cartesian_to_geographic
class Radar(object):
"""
A class for storing antenna coordinate radar data.
The structure of the Radar class is based on the CF/Radial Data file
format. Global attributes and variables (section 4.1 and 4.3) are
represented as a dictionary in the metadata attribute. Other required and
optional variables are represented as dictionaries in a attribute with the
same name as the variable in the CF/Radial standard. When a optional
attribute not present the attribute has a value of None. The data for a
given variable is stored in the dictionary under the 'data' key. Moment
field data is stored as a dictionary of dictionaries in the fields
attribute. Sub-convention variables are stored as a dictionary of
dictionaries under the meta_group attribute.
Refer to the attribute section for information on the parameters.
Attributes
----------
time : dict
Time at the center of each ray.
range : dict
Range to the center of each gate (bin).
fields : dict of dicts
Moment fields.
metadata : dict
Metadata describing the instrument and data.
scan_type : str
Type of scan, one of 'ppi', 'rhi', 'sector' or 'other'. If the scan
volume contains multiple sweep modes this should be 'other'.
latitude : dict
Latitude of the instrument.
longitude : dict
Longitude of the instrument.
altitude : dict
Altitude of the instrument, above sea level.
altitude_agl : dict or None
Altitude of the instrument above ground level. If not provided this
attribute is set to None, indicating this parameter not available.
sweep_number : dict
The number of the sweep in the volume scan, 0-based.
sweep_mode : dict
Sweep mode for each mode in the volume scan.
fixed_angle : dict
Target angle for thr sweep. Azimuth angle in RHI modes, elevation
angle in all other modes.
sweep_start_ray_index : dict
Index of the first ray in each sweep relative to the start of the
volume, 0-based.
sweep_end_ray_index : dict
Index of the last ray in each sweep relative to the start of the
volume, 0-based.
rays_per_sweep : LazyLoadDict
Number of rays in each sweep. The data key of this attribute is
create upon first access from the data in the sweep_start_ray_index and
sweep_end_ray_index attributes. If the sweep locations needs to be
modified, do this prior to accessing this attribute or use
:py:func:`init_rays_per_sweep` to reset the attribute.
target_scan_rate : dict or None
Intended scan rate for each sweep. If not provided this attribute is
set to None, indicating this parameter is not available.
rays_are_indexed : dict or None
Indication of whether ray angles are indexed to a regular grid in
each sweep. If not provided this attribute is set to None, indicating
ray angle spacing is not determined.
ray_angle_res : dict or None
If rays_are_indexed is not None, this provides the angular resolution
of the grid. If not provided or available this attribute is set to
None.
azimuth : dict
Azimuth of antenna, relative to true North. Azimuth angles are
recommended to be expressed in the range of [0, 360], but other
representations are not forbidden.
elevation : dict
Elevation of antenna, relative to the horizontal plane. Elevation
angles are recommended to be expressed in the range of [-180, 180],
but other representations are not forbidden.
gate_x, gate_y, gate_z : LazyLoadDict
Location of each gate in a Cartesian coordinate system assuming a
standard atmosphere with a 4/3 Earth's radius model. The data keys of
these attributes are create upon first access from the data in the
range, azimuth and elevation attributes. If these attributes are
changed use :py:func:`init_gate_x_y_z` to reset.
gate_longitude, gate_latitude : LazyLoadDict
Geographic location of each gate. The projection parameter(s) defined
in the `projection` attribute are used to perform an inverse map
projection from the Cartesian gate locations relative to the radar
location to longitudes and latitudes. If these attributes are changed
use :py:func:`init_gate_longitude_latitude` to reset the attributes.
projection : dic or str
Projection parameters defining the map projection used to transform
from Cartesian to geographic coordinates. The default dictionary sets
the 'proj' key to 'pyart_aeqd' indicating that the native Py-ART
azimuthal equidistant projection is used. This can be modified to
specify a valid pyproj.Proj projparams dictionary or string.
The special key '_include_lon_0_lat_0' is removed when interpreting
this dictionary. If this key is present and set to True, which is
required when proj='pyart_aeqd', then the radar longitude and
latitude will be added to the dictionary as 'lon_0' and 'lat_0'.
gate_altitude : LazyLoadDict
The altitude of each radar gate as calculated from the altitude of the
radar and the Cartesian z location of each gate. If this attribute
is changed use :py:func:`init_gate_altitude` to reset the attribute.
scan_rate : dict or None
Actual antenna scan rate. If not provided this attribute is set to
None, indicating this parameter is not available.
antenna_transition : dict or None
Flag indicating if the antenna is in transition, 1 = yes, 0 = no.
If not provided this attribute is set to None, indicating this
parameter is not available.
rotation : dict or None
The rotation angle of the antenna. The angle about the aircraft
longitudinal axis for a vertically scanning radar.
tilt : dict or None
The tilt angle with respect to the plane orthogonal (Z-axis) to
aircraft longitudinal axis.
roll : dict or None
The roll angle of platform, for aircraft right wing down is positive.
drift : dict or None
Drift angle of antenna, the angle between heading and track.
heading : dict or None
Heading (compass) angle, clockwise from north.
pitch : dict or None
Pitch angle of antenna, for aircraft nose up is positive.
georefs_applied : dict or None
Indicates whether the variables have had georeference calculation
applied. Leading to Earth-centric azimuth and elevation angles.
instrument_parameters : dict of dicts or None
Instrument parameters, if not provided this attribute is set to None,
indicating these parameters are not avaiable. This dictionary also
includes variables in the radar_parameters CF/Radial subconvention.
radar_calibration : dict of dicts or None
Instrument calibration parameters. If not provided this attribute is
set to None, indicating these parameters are not available
ngates : int
Number of gates (bins) in a ray.
nrays : int
Number of rays in the volume.
nsweeps : int
Number of sweep in the volume.
"""
def __init__(self, time, _range, fields, metadata, scan_type,
latitude, longitude, altitude,
sweep_number, sweep_mode, fixed_angle, sweep_start_ray_index,
sweep_end_ray_index,
azimuth, elevation,
altitude_agl=None,
target_scan_rate=None, rays_are_indexed=None,
ray_angle_res=None,
scan_rate=None, antenna_transition=None,
instrument_parameters=None,
radar_calibration=None,
rotation=None, tilt=None, roll=None, drift=None, heading=None,
pitch=None, georefs_applied=None,
):
if 'calendar' not in time:
time['calendar'] = 'gregorian'
self.time = time
self.range = _range
self.fields = fields
self.metadata = metadata
self.scan_type = scan_type
self.latitude = latitude
self.longitude = longitude
self.altitude = altitude
self.altitude_agl = altitude_agl # optional
self.sweep_number = sweep_number
self.sweep_mode = sweep_mode
self.fixed_angle = fixed_angle
self.sweep_start_ray_index = sweep_start_ray_index
self.sweep_end_ray_index = sweep_end_ray_index
self.target_scan_rate = target_scan_rate # optional
self.rays_are_indexed = rays_are_indexed # optional
self.ray_angle_res = ray_angle_res # optional
self.azimuth = azimuth
self.elevation = elevation
self.scan_rate = scan_rate # optional
self.antenna_transition = antenna_transition # optional
self.rotation = rotation # optional
self.tilt = tilt # optional
self.roll = roll # optional
self.drift = drift # optional
self.heading = heading # optional
self.pitch = pitch # optional
self.georefs_applied = georefs_applied # optional
self.instrument_parameters = instrument_parameters # optional
self.radar_calibration = radar_calibration # optional
self.ngates = len(_range['data'])
self.nrays = len(time['data'])
self.nsweeps = len(sweep_number['data'])
self.projection = {'proj': 'pyart_aeqd', '_include_lon_0_lat_0': True}
# initalize attributes with lazy load dictionaries
self.init_rays_per_sweep()
self.init_gate_x_y_z()
self.init_gate_longitude_latitude()
self.init_gate_altitude()
def __getstate__(self):
""" Return object's state which can be pickled. """
state = self.__dict__.copy() # copy the objects state
# Remove unpicklable entries (those which are lazily loaded
del state['rays_per_sweep']
del state['gate_x']
del state['gate_y']
del state['gate_z']
del state['gate_longitude']
del state['gate_latitude']
del state['gate_altitude']
return state
def __setstate__(self, state):
""" Restore unpicklable entries from pickled object. """
self.__dict__.update(state)
self.init_rays_per_sweep()
self.init_gate_x_y_z()
self.init_gate_longitude_latitude()
self.init_gate_altitude()
# Attribute init/reset method
def init_rays_per_sweep(self):
""" Initialize or reset the rays_per_sweep attribute. """
lazydic = LazyLoadDict(get_metadata('rays_per_sweep'))
lazydic.set_lazy('data', _rays_per_sweep_data_factory(self))
self.rays_per_sweep = lazydic
def init_gate_x_y_z(self):
""" Initialize or reset the gate_{x, y, z} attributes. """
gate_x = LazyLoadDict(get_metadata('gate_x'))
gate_x.set_lazy('data', _gate_data_factory(self, 0))
self.gate_x = gate_x
gate_y = LazyLoadDict(get_metadata('gate_y'))
gate_y.set_lazy('data', _gate_data_factory(self, 1))
self.gate_y = gate_y
gate_z = LazyLoadDict(get_metadata('gate_z'))
gate_z.set_lazy('data', _gate_data_factory(self, 2))
self.gate_z = gate_z
def init_gate_longitude_latitude(self):
"""
Initialize or reset the gate_longitude and gate_latitude attributes.
"""
gate_longitude = LazyLoadDict(get_metadata('gate_longitude'))
gate_longitude.set_lazy('data', _gate_lon_lat_data_factory(self, 0))
self.gate_longitude = gate_longitude
gate_latitude = LazyLoadDict(get_metadata('gate_latitude'))
gate_latitude.set_lazy('data', _gate_lon_lat_data_factory(self, 1))
self.gate_latitude = gate_latitude
def init_gate_altitude(self):
""" Initialize the gate_altitude attribute. """
gate_altitude = LazyLoadDict(get_metadata('gate_altitude'))
gate_altitude.set_lazy('data', _gate_altitude_data_factory(self))
self.gate_altitude = gate_altitude
# private functions for checking limits, etc.
def _check_sweep_in_range(self, sweep):
""" Check that a sweep number is in range. """
if sweep < 0 or sweep >= self.nsweeps:
raise IndexError('Sweep out of range: ', sweep)
return
# public check functions
def check_field_exists(self, field_name):
"""
Check that a field exists in the fields dictionary.
If the field does not exist raise a KeyError.
Parameters
----------
field_name : str
Name of field to check.
"""
if field_name not in self.fields:
raise KeyError('Field not available: ' + field_name)
return
# Iterators
def iter_start(self):
""" Return an iterator over the sweep start indices. """
return (s for s in self.sweep_start_ray_index['data'])
def iter_end(self):
""" Return an iterator over the sweep end indices. """
return (s for s in self.sweep_end_ray_index['data'])
def iter_start_end(self):
""" Return an iterator over the sweep start and end indices. """
return ((s, e) for s, e in zip(self.iter_start(), self.iter_end()))
def iter_slice(self):
""" Return an iterator which returns sweep slice objects. """
return (slice(s, e+1) for s, e in self.iter_start_end())
def iter_field(self, field_name):
""" Return an iterator which returns sweep field data. """
self.check_field_exists(field_name)
return (self.fields[field_name]['data'][s] for s in self.iter_slice())
def iter_azimuth(self):
""" Return an iterator which returns sweep azimuth data. """
return (self.azimuth['data'][s] for s in self.iter_slice())
def iter_elevation(self):
""" Return an iterator which returns sweep elevation data. """
return (self.elevation['data'][s] for s in self.iter_slice())
# get methods
def get_start(self, sweep):
""" Return the starting ray index for a given sweep. """
self._check_sweep_in_range(sweep)
return self.sweep_start_ray_index['data'][sweep]
def get_end(self, sweep):
""" Return the ending ray for a given sweep. """
self._check_sweep_in_range(sweep)
return self.sweep_end_ray_index['data'][sweep]
def get_start_end(self, sweep):
""" Return the starting and ending ray for a given sweep. """
return self.get_start(sweep), self.get_end(sweep)
def get_slice(self, sweep):
""" Return a slice for selecting rays for a given sweep. """
start, end = self.get_start_end(sweep)
return slice(start, end+1)
def get_field(self, sweep, field_name, copy=False):
"""
Return the field data for a given sweep.
When used with :py:func:`get_gate_x_y_z` this method can be used to
obtain the data needed for plotting a radar field with the correct
spatial context.
Parameters
----------
sweep : int
Sweep number to retrieve data for, 0 based.
field_name : str
Name of the field from which data should be retrieved.
copy : bool, optional
True to return a copy of the data. False, the default, returns
a view of the data (when possible), changing this data will
change the data in the underlying Radar object.
Returns
-------
data : array
Array containing data for the requested sweep and field.
"""
self.check_field_exists(field_name)
s = self.get_slice(sweep)
data = self.fields[field_name]['data'][s]
if copy:
return data.copy()
else:
return data
def get_azimuth(self, sweep, copy=False):
"""
Return an array of azimuth angles for a given sweep.
Parameters
----------
sweep : int
Sweep number to retrieve data for, 0 based.
copy : bool, optional
True to return a copy of the azimuths. False, the default, returns
a view of the azimuths (when possible), changing this data will
change the data in the underlying Radar object.
Returns
-------
azimuths : array
Array containing the azimuth angles for a given sweep.
"""
s = self.get_slice(sweep)
azimuths = self.azimuth['data'][s]
if copy:
return azimuths.copy()
else:
return azimuths
def get_elevation(self, sweep, copy=False):
"""
Return an array of elevation angles for a given sweep.
Parameters
----------
sweep : int
Sweep number to retrieve data for, 0 based.
copy : bool, optional
True to return a copy of the elevations. False, the default,
returns a view of the elevations (when possible), changing this
data will change the data in the underlying Radar object.
Returns
-------
azimuths : array
Array containing the elevation angles for a given sweep.
"""
s = self.get_slice(sweep)
elevation = self.elevation['data'][s]
if copy:
return elevation.copy()
else:
return elevation
def get_gate_x_y_z(self, sweep, edges=False, filter_transitions=False):
"""
Return the x, y and z gate locations in meters for a given sweep.
With the default parameter this method returns the same data as
contained in the gate_x, gate_y and gate_z attributes but this method
performs the gate location calculations only for the specified sweep
and therefore is more efficient than accessing this data through these
attribute.
When used with :py:func:`get_field` this method can be used to obtain
the data needed for plotting a radar field with the correct spatial
context.
Parameters
----------
sweep : int
Sweep number to retrieve gate locations from, 0 based.
edges : bool, optional
True to return the locations of the gate edges calculated by
interpolating between the range, azimuths and elevations.
False (the default) will return the locations of the gate centers
with no interpolation.
filter_transitions : bool, optional
True to remove rays where the antenna was in transition between
sweeps. False will include these rays. No rays will be removed
if the antenna_transition attribute is not available (set to None).
Returns
-------
x, y, z : 2D array
Array containing the x, y and z, distances from the radar in
meters for the center (or edges) for all gates in the sweep.
"""
azimuths = self.get_azimuth(sweep)
elevations = self.get_elevation(sweep)
if filter_transitions and self.antenna_transition is not None:
sweep_slice = self.get_slice(sweep)
valid = self.antenna_transition['data'][sweep_slice] == 0
azimuths = azimuths[valid]
elevations = elevations[valid]
return antenna_vectors_to_cartesian(
self.range['data'], azimuths, elevations, edges=edges)
def get_gate_lat_lon_alt(self, sweep, reset_gate_coords=False,
filter_transitions=False):
"""
Return the longitude, latitude and altitude gate locations.
Longitude and latitude are in degrees and altitude in meters.
With the default parameter this method returns the same data as
contained in the gate_latitude, gate_longitude and gate_altitude
attributes but this method performs the gate location calculations
only for the specified sweep and therefore is more efficient than
accessing this data through these attribute. If coordinates have
at all, please use the reset_gate_coords parameter.
Parameters
----------
sweep : int
Sweep number to retrieve gate locations from, 0 based.
reset_gate_coords : bool, optional
Optional to reset the gate latitude, gate longitude and gate
altitude attributes before using them in this function. This
is useful when the geographic coordinates have changed and gate
latitude, gate longitude and gate altitude need to be reset.
filter_transitions : bool, optional
True to remove rays where the antenna was in transition between
sweeps. False will include these rays. No rays will be removed
if the antenna_transition attribute is not available (set to None).
Returns
-------
lat, lon, alt : 2D array
Array containing the latitude, longitude and altitude,
for all gates in the sweep.
"""
s = self.get_slice(sweep)
if reset_gate_coords:
gate_latitude = LazyLoadDict(get_metadata('gate_latitude'))
gate_latitude.set_lazy('data', _gate_lon_lat_data_factory(self, 1))
self.gate_latitude = gate_latitude
gate_longitude = LazyLoadDict(get_metadata('gate_longitude'))
gate_longitude.set_lazy('data', _gate_lon_lat_data_factory(self, 0))
self.gate_longitude = gate_longitude
gate_altitude = LazyLoadDict(get_metadata('gate_altitude'))
gate_altitude.set_lazy('data', _gate_altitude_data_factory(self))
self.gate_altitude = gate_altitude
lat = self.gate_latitude['data'][s]
lon = self.gate_longitude['data'][s]
alt = self.gate_altitude['data'][s]
if filter_transitions and self.antenna_transition is not None:
valid = self.antenna_transition['data'][s] == 0
lat = lat[valid]
lon = lon[valid]
alt = alt[valid]
return lat, lon, alt
def get_nyquist_vel(self, sweep, check_uniform=True):
"""
Return the Nyquist velocity in meters per second for a given sweep.
Raises a LookupError if the Nyquist velocity is not available, an
Exception is raised if the velocities are not uniform in the sweep
unless check_uniform is set to False.
Parameters
----------
sweep : int
Sweep number to retrieve data for, 0 based.
check_uniform : bool
True to check to perform a check on the Nyquist velocities that
they are uniform in the sweep, False will skip this check and
return the velocity of the first ray in the sweep.
Returns
-------
nyquist_velocity : float
Array containing the Nyquist velocity in m/s for a given sweep.
"""
s = self.get_slice(sweep)
try:
nyq_vel = self.instrument_parameters['nyquist_velocity']['data'][s]
except:
raise LookupError('Nyquist velocity unavailable')
if check_uniform:
if np.any(nyq_vel != nyq_vel[0]):
raise Exception('Nyquist velocities are not uniform in sweep')
return float(nyq_vel[0])
# Methods
def info(self, level='standard', out=sys.stdout):
"""
Print information on radar.
Parameters
----------
level : {'compact', 'standard', 'full', 'c', 's', 'f'}, optional
Level of information on radar object to print, compact is
minimal information, standard more and full everything.
out : file-like, optional
Stream to direct output to, default is to print information
to standard out (the screen).
"""
if level == 'c':
level = 'compact'
elif level == 's':
level = 'standard'
elif level == 'f':
level = 'full'
if level not in ['standard', 'compact', 'full']:
raise ValueError('invalid level parameter')
self._dic_info('altitude', level, out)
self._dic_info('altitude_agl', level, out)
self._dic_info('antenna_transition', level, out)
self._dic_info('azimuth', level, out)
self._dic_info('elevation', level, out)
print('fields:', file=out)
for field_name, field_dic in self.fields.items():
self._dic_info(field_name, level, out, field_dic, 1)
self._dic_info('fixed_angle', level, out)
if self.instrument_parameters is None:
print('instrument_parameters: None', file=out)
else:
print('instrument_parameters:', file=out)
for name, dic in self.instrument_parameters.items():
self._dic_info(name, level, out, dic, 1)
self._dic_info('latitude', level, out)
self._dic_info('longitude', level, out)
print('nsweeps:', self.nsweeps, file=out)
print('ngates:', self.ngates, file=out)
print('nrays:', self.nrays, file=out)
if self.radar_calibration is None:
print('radar_calibration: None', file=out)
else:
print('radar_calibration:', file=out)
for name, dic in self.radar_calibration.items():
self._dic_info(name, level, out, dic, 1)
self._dic_info('range', level, out)
self._dic_info('scan_rate', level, out)
print('scan_type:', self.scan_type, file=out)
self._dic_info('sweep_end_ray_index', level, out)
self._dic_info('sweep_mode', level, out)
self._dic_info('sweep_number', level, out)
self._dic_info('sweep_start_ray_index', level, out)
self._dic_info('target_scan_rate', level, out)
self._dic_info('time', level, out)
# Airborne radar parameters
if self.rotation is not None:
self._dic_info('rotation', level, out)
if self.tilt is not None:
self._dic_info('tilt', level, out)
if self.roll is not None:
self._dic_info('roll', level, out)
if self.drift is not None:
self._dic_info('drift', level, out)
if self.heading is not None:
self._dic_info('heading', level, out)
if self.pitch is not None:
self._dic_info('pitch', level, out)
if self.georefs_applied is not None:
self._dic_info('georefs_applied', level, out)
# always print out all metadata last
self._dic_info('metadata', 'full', out)
def _dic_info(self, attr, level, out, dic=None, ident_level=0):
""" Print information on a dictionary attribute. """
if dic is None:
dic = getattr(self, attr)
ilvl0 = '\t' * ident_level
ilvl1 = '\t' * (ident_level + 1)
if dic is None:
print(str(attr) + ': None', file=out)
return
# make a string summary of the data key if it exists.
if 'data' not in dic:
d_str = 'Missing'
elif not isinstance(dic['data'], np.ndarray):
d_str = '<not a ndarray>'
else:
data = dic['data']
t = (data.dtype, data.shape)
d_str = '<ndarray of type: %s and shape: %s>' % t
# compact, only data summary
if level == 'compact':
print(ilvl0 + str(attr) + ':', d_str, file=out)
# standard, all keys, only summary for data
elif level == 'standard':
print(ilvl0 + str(attr) + ':', file=out)
print(ilvl1 + 'data:', d_str, file=out)
for key, val in dic.items():
if key == 'data':
continue
print(ilvl1 + key + ':', val, file=out)
# full, all keys, full data
elif level == 'full':
print(str(attr) + ':', file=out)
if 'data' in dic:
print(ilvl1 + 'data:', dic['data'], file=out)
for key, val in dic.items():
if key == 'data':
continue
print(ilvl1 + key + ':', val, file=out)
return
def add_field(self, field_name, dic, replace_existing=False):
"""
Add a field to the object.
Parameters
----------
field_name : str
Name of the field to add to the dictionary of fields.
dic : dict
Dictionary contain field data and metadata.
replace_existing : bool, optional
True to replace the existing field with key field_name if it
exists, loosing any existing data. False will raise a ValueError
when the field already exists.
"""
# check that the field dictionary to add is valid
if field_name in self.fields and replace_existing is False:
err = 'A field with name: %s already exists' % (field_name)
raise ValueError(err)
if 'data' not in dic:
raise KeyError("dic must contain a 'data' key")
if dic['data'].shape != (self.nrays, self.ngates):
t = (self.nrays, self.ngates)
err = "'data' has invalid shape, should be (%i, %i)" % t
raise ValueError(err)
# add the field
self.fields[field_name] = dic
return
def add_field_like(self, existing_field_name, field_name, data,
replace_existing=False):
"""
Add a field to the object with metadata from a existing field.
Note that the data parameter is not copied by this method.
If data refers to a 'data' array from an existing field dictionary, a
copy should be made within or prior to using this method. If this is
not done the 'data' key in both field dictionaries will point to the
same NumPy array and modification of one will change the second. To
copy NumPy arrays use the copy() method. See the Examples section
for how to create a copy of the 'reflectivity' field as a field named
'reflectivity_copy'.
Parameters
----------
existing_field_name : str
Name of an existing field to take metadata from when adding
the new field to the object.
field_name : str
Name of the field to add to the dictionary of fields.
data : array
Field data. A copy of this data is not made, see the note above.
replace_existing : bool, optional
True to replace the existing field with key field_name if it
exists, loosing any existing data. False will raise a ValueError
when the field already exists.
Examples
--------
>>> radar.add_field_like('reflectivity', 'reflectivity_copy',
... radar.fields['reflectivity']['data'].copy())
"""
if existing_field_name not in self.fields:
err = 'field %s does not exist in object' % (existing_field_name)
raise ValueError(err)
dic = {}
for k, v in self.fields[existing_field_name].items():
if k != 'data':
dic[k] = v
dic['data'] = data
return self.add_field(field_name, dic,
replace_existing=replace_existing)
def extract_sweeps(self, sweeps):
"""
Create a new radar contains only the data from select sweeps.
Parameters
----------
sweeps : array_like
Sweeps (0-based) to include in new Radar object.
Returns
-------
radar : Radar
Radar object which contains a copy of data from the selected
sweeps.
"""
# parse and verify parameters
sweeps = np.array(sweeps, dtype='int32')
if np.any(sweeps > (self.nsweeps - 1)):
raise ValueError('invalid sweeps indices in sweeps parameter')
if np.any(sweeps < 0):
raise ValueError('only positive sweeps can be extracted')
def mkdic(dic, select):
""" Make a dictionary, selecting out select from data key """
if dic is None:
return None
d = dic.copy()
if 'data' in d and select is not None:
d['data'] = d['data'][select].copy()
return d
# create array of rays which select the sweeps selected and
# the number of rays per sweep.
ray_count = (self.sweep_end_ray_index['data'] -
self.sweep_start_ray_index['data'] + 1)[sweeps]
ssri = self.sweep_start_ray_index['data'][sweeps]
rays = np.concatenate(
[range(s, s+e) for s, e in zip(ssri, ray_count)]).astype('int32')
# radar location attribute dictionary selector
if len(self.altitude['data']) == 1:
loc_select = None
else:
loc_select = sweeps
# create new dictionaries
time = mkdic(self.time, rays)
_range = mkdic(self.range, None)
fields = {}
for field_name, dic in self.fields.items():
fields[field_name] = mkdic(dic, rays)
metadata = mkdic(self.metadata, None)
scan_type = str(self.scan_type)
latitude = mkdic(self.latitude, loc_select)
longitude = mkdic(self.longitude, loc_select)
altitude = mkdic(self.altitude, loc_select)
altitude_agl = mkdic(self.altitude_agl, loc_select)
sweep_number = mkdic(self.sweep_number, sweeps)
sweep_mode = mkdic(self.sweep_mode, sweeps)
fixed_angle = mkdic(self.fixed_angle, sweeps)
sweep_start_ray_index = mkdic(self.sweep_start_ray_index, None)
sweep_start_ray_index['data'] = np.cumsum(
np.append([0], ray_count[:-1]), dtype='int32')
sweep_end_ray_index = mkdic(self.sweep_end_ray_index, None)
sweep_end_ray_index['data'] = np.cumsum(ray_count, dtype='int32') - 1
target_scan_rate = mkdic(self.target_scan_rate, sweeps)
azimuth = mkdic(self.azimuth, rays)
elevation = mkdic(self.elevation, rays)
scan_rate = mkdic(self.scan_rate, rays)
antenna_transition = mkdic(self.antenna_transition, rays)
# instrument_parameters
# Filter the instrument_parameter dictionary based size of leading
# dimension, this might not always be correct.
if self.instrument_parameters is None:
instrument_parameters = None
else:
instrument_parameters = {}
for key, dic in self.instrument_parameters.items():
if dic['data'].ndim != 0:
dim0_size = dic['data'].shape[0]
else:
dim0_size = -1
if dim0_size == self.nsweeps:
fdic = mkdic(dic, sweeps)
elif dim0_size == self.nrays:
fdic = mkdic(dic, rays)
else: # keep everything
fdic = mkdic(dic, None)
instrument_parameters[key] = fdic
# radar_calibration
# copy all field in radar_calibration as is except for
# r_calib_index which we filter based upon time. This might
# leave some indices in the "r_calib" dimension not referenced in
# the r_calib_index array.
if self.radar_calibration is None:
radar_calibration = None
else:
radar_calibration = {}
for key, dic in self.radar_calibration.items():
if key == 'r_calib_index':
radar_calibration[key] = mkdic(dic, rays)
else:
radar_calibration[key] = mkdic(dic, None)
return Radar(time, _range, fields, metadata, scan_type,
latitude, longitude, altitude,
sweep_number, sweep_mode, fixed_angle,
sweep_start_ray_index, sweep_end_ray_index,
azimuth, elevation,
altitude_agl=altitude_agl,
target_scan_rate=target_scan_rate,
scan_rate=scan_rate,
antenna_transition=antenna_transition,
instrument_parameters=instrument_parameters,
radar_calibration=radar_calibration)
def _rays_per_sweep_data_factory(radar):
""" Return a function which returns the number of rays per sweep. """
def _rays_per_sweep_data():
""" The function which returns the number of rays per sweep. """
return (radar.sweep_end_ray_index['data'] -
radar.sweep_start_ray_index['data'] + 1)
return _rays_per_sweep_data
def _gate_data_factory(radar, coordinate):
""" Return a function which returns the Cartesian locations of gates. """
def _gate_data():
""" The function which returns the Cartesian locations of gates. """
ranges = radar.range['data']
azimuths = radar.azimuth['data']
elevations = radar.elevation['data']
cartesian_coords = antenna_vectors_to_cartesian(
ranges, azimuths, elevations, edges=False)
# load x, y, and z data except for the coordinate in question
if coordinate != 0:
radar.gate_x['data'] = cartesian_coords[0]
if coordinate != 1:
radar.gate_y['data'] = cartesian_coords[1]
if coordinate != 2:
radar.gate_z['data'] = cartesian_coords[2]
return cartesian_coords[coordinate]
return _gate_data
def _gate_lon_lat_data_factory(radar, coordinate):
""" Return a function which returns the geographic locations of gates. """
def _gate_lon_lat_data():
""" The function which returns the geographic locations gates. """
x = radar.gate_x['data']
y = radar.gate_y['data']
projparams = radar.projection.copy()
if projparams.pop('_include_lon_0_lat_0', False):
projparams['lon_0'] = radar.longitude['data'][0]
projparams['lat_0'] = radar.latitude['data'][0]
geographic_coords = cartesian_to_geographic(x, y, projparams)
# set the other geographic coordinate
if coordinate == 0:
radar.gate_latitude['data'] = geographic_coords[1]
else:
radar.gate_longitude['data'] = geographic_coords[0]
return geographic_coords[coordinate]
return _gate_lon_lat_data
def _gate_altitude_data_factory(radar):
""" Return a function which returns the gate altitudes. """
def _gate_altitude_data():
""" The function which returns the gate altitudes. """
try:
return radar.altitude['data'] + radar.gate_z['data']
except ValueError:
return np.mean(radar.altitude['data']) + radar.gate_z['data']
return _gate_altitude_data
| [
"numpy.mean",
"numpy.any",
"numpy.append",
"numpy.array",
"numpy.cumsum"
] | [((32840, 32871), 'numpy.array', 'np.array', (['sweeps'], {'dtype': '"""int32"""'}), "(sweeps, dtype='int32')\n", (32848, 32871), True, 'import numpy as np\n'), ((32883, 32916), 'numpy.any', 'np.any', (['(sweeps > self.nsweeps - 1)'], {}), '(sweeps > self.nsweeps - 1)\n', (32889, 32916), True, 'import numpy as np\n'), ((33006, 33024), 'numpy.any', 'np.any', (['(sweeps < 0)'], {}), '(sweeps < 0)\n', (33012, 33024), True, 'import numpy as np\n'), ((24185, 24214), 'numpy.any', 'np.any', (['(nyq_vel != nyq_vel[0])'], {}), '(nyq_vel != nyq_vel[0])\n', (24191, 24214), True, 'import numpy as np\n'), ((34828, 34858), 'numpy.append', 'np.append', (['[0]', 'ray_count[:-1]'], {}), '([0], ray_count[:-1])\n', (34837, 34858), True, 'import numpy as np\n'), ((34981, 35016), 'numpy.cumsum', 'np.cumsum', (['ray_count'], {'dtype': '"""int32"""'}), "(ray_count, dtype='int32')\n", (34990, 35016), True, 'import numpy as np\n'), ((39807, 39838), 'numpy.mean', 'np.mean', (["radar.altitude['data']"], {}), "(radar.altitude['data'])\n", (39814, 39838), True, 'import numpy as np\n')] |
import unittest
from numpy.random import RandomState
class TestRandomState(unittest.TestCase):
def test_random_state(self):
my_random = RandomState(42)
random_list = [-4, 9, 4, 0, -3, -4, 8, 0, 0, -7]
gen_random_list = []
for i in range(10):
gen_random_list.append(my_random.randint(-10, 10))
print(gen_random_list)
self.assertListEqual(gen_random_list, random_list)
| [
"numpy.random.RandomState"
] | [((152, 167), 'numpy.random.RandomState', 'RandomState', (['(42)'], {}), '(42)\n', (163, 167), False, 'from numpy.random import RandomState\n')] |
# -*- coding: utf-8 -*-
"""Generating a cosmological summary of the H0 or D_dt H0_samples
Example
-------
To run this script, pass in the version ID and the sampling method as the argument::
$ python summarize.py 21 simple_mc_default
The summary will be saved to the same directory level as the sample directory.
"""
import os
import numpy as np
import pandas as pd
import argparse
import scipy.stats
from baobab.configs import BaobabConfig
from h0rton.configs import TestConfig
import h0rton.h0_inference.h0_utils as h0_utils
import h0rton.tdlmc_utils as tdlmc_utils
def parse_args():
"""Parse command-line arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument('version_id', help='version ID', type=int)
parser.add_argument('sampling_method', help='the sampling method (one of simple_mc_default, mcmc_default, hybrid', type=str)
parser.add_argument('--rung_idx', help='the TDLMC rung index, if H0rton was run on TDLMC data', type=int, default=None)
args = parser.parse_args()
return args
def main():
args = parse_args()
# Folder where all the H0 samples live
samples_dir = '/home/jwp/stage/sl/h0rton/experiments/v{:d}/{:s}'.format(args.version_id, args.sampling_method)
# Read in test cfg for this version and sampling method
test_cfg_path = os.path.join(samples_dir, '..', '{:s}.json'.format(args.sampling_method))
test_cfg = TestConfig.from_file(test_cfg_path)
if 'mcmc_default' in args.sampling_method:
summarize_mcmc(samples_dir, test_cfg, 'mcmc_default', args.rung_idx)
elif args.sampling_method == 'hybrid':
summarize_mcmc(samples_dir, test_cfg, 'hybrid')
elif args.sampling_method == 'simple_mc_default':
summarize_simple_mc_default(samples_dir, test_cfg)
else:
raise ValueError("This sampling method is not supported. Choose one of [simple_mc_default, mcmc_default, hybrid].")
def summarize_simple_mc_default(samples_dir, test_cfg):
"""Summarize the output of simple_mc_default, i.e. the uniform H0 samples with corresponding weights
"""
H0_dicts = [f for f in os.listdir(samples_dir) if f.startswith('h0_dict')]
H0_dicts.sort()
# Read in the redshift columns of metadata
baobab_cfg = BaobabConfig.from_file(test_cfg.data.test_baobab_cfg_path)
metadata_path = os.path.join(baobab_cfg.out_dir, 'metadata.csv')
meta = pd.read_csv(metadata_path, index_col=None, usecols=['z_lens', 'z_src', 'n_img'])
summary_df = pd.DataFrame() # instantiate empty dataframe for storing summary
for i, f_name in enumerate(H0_dicts):
lens_i = int(os.path.splitext(f_name)[0].split('h0_dict_')[1])
# Slice meta for this lensing system
meta_i = meta.iloc[lens_i]
z_lens = meta_i['z_lens']
z_src = meta_i['z_src']
n_img = meta_i['n_img']
# Read in H0 samples using lens identifier
H0_dict = np.load(os.path.join(samples_dir, f_name), allow_pickle=True).item()
H0_samples = H0_dict['h0_samples']
weights = H0_dict['h0_weights']
H0_normal_stats = h0_utils.get_normal_stats_naive(H0_samples, weights)
n_eff = np.sum(weights)**2.0/(np.sum(weights**2.0))
# Convert H0 H0_samples to D_dt
cosmo_converter = h0_utils.CosmoConverter(z_lens, z_src)
D_dt_samples = cosmo_converter.get_D_dt(H0_samples)
D_dt_stats = h0_utils.get_lognormal_stats_naive(D_dt_samples, weights)
D_dt_normal_stats = h0_utils.get_normal_stats_naive(D_dt_samples, weights)
summary_i = dict(
id=lens_i,
measured_td_wrt0=list(H0_dict['measured_td_wrt0']),
H0_mean=H0_normal_stats['mean'],
H0_std=H0_normal_stats['std'],
D_dt_mu=D_dt_stats['mu'],
D_dt_sigma=D_dt_stats['sigma'],
D_dt_mean=D_dt_normal_stats['mean'],
D_dt_std=D_dt_normal_stats['std'],
n_eff=n_eff,
z_lens=z_lens,
z_src=z_src,
n_img=n_img,
inference_time=H0_dict['inference_time'],
)
summary_df = summary_df.append(summary_i, ignore_index=True)
summary_df.to_csv(os.path.join(samples_dir, '..', 'summary.csv'))
# Output list of problem lens IDs
problem_id = summary_df.loc[(summary_df['n_eff'] < 3) | (summary_df['H0_std'] < 1.0)]['id'].astype(int)
with open(os.path.join(samples_dir, '..', "mcmc_default_candidates.txt"), "w") as f:
for pid in problem_id:
f.write(str(pid) +"\n")
def summarize_mcmc(samples_dir, test_cfg, sampling_method, rung_idx):
"""Summarize the output of mcmc_default, i.e. MCMC samples from the D_dt posterior for each lens
"""
true_H0 = 70.0
true_Om0 = 0.3
if 'mcmc_default' in sampling_method:
if rung_idx is None:
# Read in the relevant columns of metadata,
baobab_cfg = BaobabConfig.from_file(test_cfg.data.test_baobab_cfg_path)
metadata_path = os.path.join(baobab_cfg.out_dir, 'metadata.csv')
summary_df = pd.read_csv(metadata_path, index_col=None, usecols=['z_lens', 'z_src', 'n_img'], nrows=500) # FIXME: capped test set size at 500, as the stored dataset may be much larger
else:
summary_df = tdlmc_utils.convert_to_dataframe(rung=rung_idx, save_csv_path=None)
summary_df.sort_values('seed', axis=0, inplace=True)
true_H0 = summary_df.iloc[0]['H0']
true_Om0 = 0.27
summary_df['id'] = summary_df.index
summary_df['D_dt_mu'] = np.nan
summary_df['D_dt_sigma'] = np.nan
summary_df['H0_mean'] = np.nan
summary_df['H0_std'] = np.nan
summary_df['inference_time'] = 0.0
else:
summary_df = pd.read_csv(os.path.join(samples_dir, '..', 'summary.csv'), index_col=None)
D_dt_dicts = [f for f in os.listdir(samples_dir) if f.startswith('D_dt_dict')]
D_dt_dicts.sort()
oversampling = 20
threshold = 1000
# Initialize list for catastrophic lenses not solved by MCMC
lenses_to_rerun = []
lenses_run = []
for i, f_name in enumerate(D_dt_dicts):
lens_i = int(os.path.splitext(f_name)[0].split('D_dt_dict_')[1])
lenses_run.append(lens_i)
meta = summary_df.loc[summary_df['id']==lens_i, ['z_lens', 'z_src']].squeeze()
# Read in D_dt samples using lens identifier
D_dt_dict = np.load(os.path.join(samples_dir, f_name), allow_pickle=True).item()
# Rescale D_dt samples to correct for k_ext
uncorrected_D_dt_samples = D_dt_dict['D_dt_samples'] # [old_n_samples,]
uncorrected_D_dt_samples = h0_utils.remove_outliers_from_lognormal(uncorrected_D_dt_samples, 3).reshape(-1, 1) # [n_samples, 1]
k_ext_rv = getattr(scipy.stats, test_cfg.kappa_ext_prior.dist)(**test_cfg.kappa_ext_prior.kwargs)
k_ext = k_ext_rv.rvs(size=[len(uncorrected_D_dt_samples), oversampling]) # [n_samples, oversampling]
if test_cfg.kappa_ext_prior.transformed:
D_dt_samples = (uncorrected_D_dt_samples*k_ext).flatten()
else:
D_dt_samples = (uncorrected_D_dt_samples/(1.0 - k_ext)).flatten() # [n_samples,]
# Compute lognormal params for D_dt and update summary
try:
D_dt_stats = h0_utils.get_lognormal_stats(D_dt_samples)
D_dt_normal_stats = h0_utils.get_normal_stats(D_dt_samples)
except:
print("lens", lens_i)
print("==========")
lenses_to_rerun.append(lens_i)
#continue
summary_df.loc[summary_df['id']==lens_i, 'D_dt_mu'] = D_dt_stats['mu']
summary_df.loc[summary_df['id']==lens_i, 'D_dt_sigma'] = D_dt_stats['sigma']
summary_df.loc[summary_df['id']==lens_i, 'D_dt_mean'] = D_dt_normal_stats['mean']
summary_df.loc[summary_df['id']==lens_i, 'D_dt_std'] = D_dt_normal_stats['std']
# Convert D_dt samples to H0
D_dt_samples = scipy.stats.lognorm.rvs(scale=np.exp(D_dt_stats['mu']), s=D_dt_stats['sigma'], size=oversampling*threshold)
D_dt_samples = D_dt_samples[np.isfinite(D_dt_samples)]
cosmo_converter = h0_utils.CosmoConverter(meta['z_lens'], meta['z_src'], H0=true_H0, Om0=true_Om0)
H0_samples = cosmo_converter.get_H0(D_dt_samples)
# Reject H0 samples outside H0 prior
H0_samples = H0_samples[np.isfinite(H0_samples)]
if len(H0_samples) > 0:
H0_samples = H0_samples[np.logical_and(H0_samples > 50.0, H0_samples < 90.0)]
if len(H0_samples) < threshold:
lenses_to_rerun.append(lens_i)
summary_df.loc[summary_df['id']==lens_i, 'H0_mean'] = np.mean(H0_samples)
summary_df.loc[summary_df['id']==lens_i, 'H0_std'] = np.std(H0_samples)
summary_df.loc[summary_df['id']==lens_i, 'inference_time'] += D_dt_dict['inference_time']
# Replace existing summary
summary_df.to_csv(os.path.join(samples_dir, '..', 'summary.csv'))
# Output list of catastrophic/no-good lens IDs
if sampling_method == 'mcmc_default':
# List of lenses that skipped MCMC
total_lenses = np.arange(test_cfg.data.n_test)
lenses_not_run = set(list(total_lenses)) - set(list(lenses_run))
lenses_for_hybrid = list(lenses_not_run.union(set(lenses_to_rerun)))
with open(os.path.join(samples_dir, '..', "hybrid_candidates.txt"), "w") as f:
for lens_i in lenses_for_hybrid:
f.write(str(lens_i) +"\n")
else: # hybrid case
with open(os.path.join(samples_dir, '..', "no_good_candidates.txt"), "w") as f:
for lens_i in lenses_to_rerun:
f.write(str(lens_i) +"\n")
if __name__ == '__main__':
main() | [
"pandas.read_csv",
"h0rton.h0_inference.h0_utils.get_normal_stats_naive",
"numpy.isfinite",
"numpy.arange",
"numpy.mean",
"os.listdir",
"h0rton.h0_inference.h0_utils.get_normal_stats",
"argparse.ArgumentParser",
"h0rton.tdlmc_utils.convert_to_dataframe",
"numpy.exp",
"h0rton.h0_inference.h0_util... | [((656, 681), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (679, 681), False, 'import argparse\n'), ((1413, 1448), 'h0rton.configs.TestConfig.from_file', 'TestConfig.from_file', (['test_cfg_path'], {}), '(test_cfg_path)\n', (1433, 1448), False, 'from h0rton.configs import TestConfig\n'), ((2253, 2311), 'baobab.configs.BaobabConfig.from_file', 'BaobabConfig.from_file', (['test_cfg.data.test_baobab_cfg_path'], {}), '(test_cfg.data.test_baobab_cfg_path)\n', (2275, 2311), False, 'from baobab.configs import BaobabConfig\n'), ((2332, 2380), 'os.path.join', 'os.path.join', (['baobab_cfg.out_dir', '"""metadata.csv"""'], {}), "(baobab_cfg.out_dir, 'metadata.csv')\n", (2344, 2380), False, 'import os\n'), ((2392, 2477), 'pandas.read_csv', 'pd.read_csv', (['metadata_path'], {'index_col': 'None', 'usecols': "['z_lens', 'z_src', 'n_img']"}), "(metadata_path, index_col=None, usecols=['z_lens', 'z_src', 'n_img']\n )\n", (2403, 2477), True, 'import pandas as pd\n'), ((2491, 2505), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2503, 2505), True, 'import pandas as pd\n'), ((3094, 3146), 'h0rton.h0_inference.h0_utils.get_normal_stats_naive', 'h0_utils.get_normal_stats_naive', (['H0_samples', 'weights'], {}), '(H0_samples, weights)\n', (3125, 3146), True, 'import h0rton.h0_inference.h0_utils as h0_utils\n'), ((3273, 3311), 'h0rton.h0_inference.h0_utils.CosmoConverter', 'h0_utils.CosmoConverter', (['z_lens', 'z_src'], {}), '(z_lens, z_src)\n', (3296, 3311), True, 'import h0rton.h0_inference.h0_utils as h0_utils\n'), ((3393, 3450), 'h0rton.h0_inference.h0_utils.get_lognormal_stats_naive', 'h0_utils.get_lognormal_stats_naive', (['D_dt_samples', 'weights'], {}), '(D_dt_samples, weights)\n', (3427, 3450), True, 'import h0rton.h0_inference.h0_utils as h0_utils\n'), ((3479, 3533), 'h0rton.h0_inference.h0_utils.get_normal_stats_naive', 'h0_utils.get_normal_stats_naive', (['D_dt_samples', 'weights'], {}), '(D_dt_samples, weights)\n', (3510, 3533), True, 'import h0rton.h0_inference.h0_utils as h0_utils\n'), ((4356, 4402), 'os.path.join', 'os.path.join', (['samples_dir', '""".."""', '"""summary.csv"""'], {}), "(samples_dir, '..', 'summary.csv')\n", (4368, 4402), False, 'import os\n'), ((8321, 8406), 'h0rton.h0_inference.h0_utils.CosmoConverter', 'h0_utils.CosmoConverter', (["meta['z_lens']", "meta['z_src']"], {'H0': 'true_H0', 'Om0': 'true_Om0'}), "(meta['z_lens'], meta['z_src'], H0=true_H0, Om0=true_Om0\n )\n", (8344, 8406), True, 'import h0rton.h0_inference.h0_utils as h0_utils\n'), ((8829, 8848), 'numpy.mean', 'np.mean', (['H0_samples'], {}), '(H0_samples)\n', (8836, 8848), True, 'import numpy as np\n'), ((8910, 8928), 'numpy.std', 'np.std', (['H0_samples'], {}), '(H0_samples)\n', (8916, 8928), True, 'import numpy as np\n'), ((9080, 9126), 'os.path.join', 'os.path.join', (['samples_dir', '""".."""', '"""summary.csv"""'], {}), "(samples_dir, '..', 'summary.csv')\n", (9092, 9126), False, 'import os\n'), ((9287, 9318), 'numpy.arange', 'np.arange', (['test_cfg.data.n_test'], {}), '(test_cfg.data.n_test)\n', (9296, 9318), True, 'import numpy as np\n'), ((2117, 2140), 'os.listdir', 'os.listdir', (['samples_dir'], {}), '(samples_dir)\n', (2127, 2140), False, 'import os\n'), ((3185, 3207), 'numpy.sum', 'np.sum', (['(weights ** 2.0)'], {}), '(weights ** 2.0)\n', (3191, 3207), True, 'import numpy as np\n'), ((4564, 4626), 'os.path.join', 'os.path.join', (['samples_dir', '""".."""', '"""mcmc_default_candidates.txt"""'], {}), "(samples_dir, '..', 'mcmc_default_candidates.txt')\n", (4576, 4626), False, 'import os\n'), ((5078, 5136), 'baobab.configs.BaobabConfig.from_file', 'BaobabConfig.from_file', (['test_cfg.data.test_baobab_cfg_path'], {}), '(test_cfg.data.test_baobab_cfg_path)\n', (5100, 5136), False, 'from baobab.configs import BaobabConfig\n'), ((5165, 5213), 'os.path.join', 'os.path.join', (['baobab_cfg.out_dir', '"""metadata.csv"""'], {}), "(baobab_cfg.out_dir, 'metadata.csv')\n", (5177, 5213), False, 'import os\n'), ((5239, 5334), 'pandas.read_csv', 'pd.read_csv', (['metadata_path'], {'index_col': 'None', 'usecols': "['z_lens', 'z_src', 'n_img']", 'nrows': '(500)'}), "(metadata_path, index_col=None, usecols=['z_lens', 'z_src',\n 'n_img'], nrows=500)\n", (5250, 5334), True, 'import pandas as pd\n'), ((5449, 5516), 'h0rton.tdlmc_utils.convert_to_dataframe', 'tdlmc_utils.convert_to_dataframe', ([], {'rung': 'rung_idx', 'save_csv_path': 'None'}), '(rung=rung_idx, save_csv_path=None)\n', (5481, 5516), True, 'import h0rton.tdlmc_utils as tdlmc_utils\n'), ((5945, 5991), 'os.path.join', 'os.path.join', (['samples_dir', '""".."""', '"""summary.csv"""'], {}), "(samples_dir, '..', 'summary.csv')\n", (5957, 5991), False, 'import os\n'), ((6040, 6063), 'os.listdir', 'os.listdir', (['samples_dir'], {}), '(samples_dir)\n', (6050, 6063), False, 'import os\n'), ((7460, 7502), 'h0rton.h0_inference.h0_utils.get_lognormal_stats', 'h0_utils.get_lognormal_stats', (['D_dt_samples'], {}), '(D_dt_samples)\n', (7488, 7502), True, 'import h0rton.h0_inference.h0_utils as h0_utils\n'), ((7535, 7574), 'h0rton.h0_inference.h0_utils.get_normal_stats', 'h0_utils.get_normal_stats', (['D_dt_samples'], {}), '(D_dt_samples)\n', (7560, 7574), True, 'import h0rton.h0_inference.h0_utils as h0_utils\n'), ((8268, 8293), 'numpy.isfinite', 'np.isfinite', (['D_dt_samples'], {}), '(D_dt_samples)\n', (8279, 8293), True, 'import numpy as np\n'), ((8537, 8560), 'numpy.isfinite', 'np.isfinite', (['H0_samples'], {}), '(H0_samples)\n', (8548, 8560), True, 'import numpy as np\n'), ((3163, 3178), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (3169, 3178), True, 'import numpy as np\n'), ((6816, 6884), 'h0rton.h0_inference.h0_utils.remove_outliers_from_lognormal', 'h0_utils.remove_outliers_from_lognormal', (['uncorrected_D_dt_samples', '(3)'], {}), '(uncorrected_D_dt_samples, 3)\n', (6855, 6884), True, 'import h0rton.h0_inference.h0_utils as h0_utils\n'), ((8154, 8178), 'numpy.exp', 'np.exp', (["D_dt_stats['mu']"], {}), "(D_dt_stats['mu'])\n", (8160, 8178), True, 'import numpy as np\n'), ((8630, 8682), 'numpy.logical_and', 'np.logical_and', (['(H0_samples > 50.0)', '(H0_samples < 90.0)'], {}), '(H0_samples > 50.0, H0_samples < 90.0)\n', (8644, 8682), True, 'import numpy as np\n'), ((9487, 9543), 'os.path.join', 'os.path.join', (['samples_dir', '""".."""', '"""hybrid_candidates.txt"""'], {}), "(samples_dir, '..', 'hybrid_candidates.txt')\n", (9499, 9543), False, 'import os\n'), ((9686, 9743), 'os.path.join', 'os.path.join', (['samples_dir', '""".."""', '"""no_good_candidates.txt"""'], {}), "(samples_dir, '..', 'no_good_candidates.txt')\n", (9698, 9743), False, 'import os\n'), ((2924, 2957), 'os.path.join', 'os.path.join', (['samples_dir', 'f_name'], {}), '(samples_dir, f_name)\n', (2936, 2957), False, 'import os\n'), ((6588, 6621), 'os.path.join', 'os.path.join', (['samples_dir', 'f_name'], {}), '(samples_dir, f_name)\n', (6600, 6621), False, 'import os\n'), ((2619, 2643), 'os.path.splitext', 'os.path.splitext', (['f_name'], {}), '(f_name)\n', (2635, 2643), False, 'import os\n'), ((6334, 6358), 'os.path.splitext', 'os.path.splitext', (['f_name'], {}), '(f_name)\n', (6350, 6358), False, 'import os\n')] |
import unittest
import numpy as np
from qubo_nn.problems import KnapsackIntegerWeights
class TestKnapsackIntegerWeights(unittest.TestCase):
def test_gen_qubo_matrix(self):
"""Test whether a correct QUBO is generated.
Test case from: My brain.
"""
w = np.array([2, 5, 3])
c = np.array([5, 2, 4])
W = 7
problem = KnapsackIntegerWeights(
{"problems": {"KIW": {}}},
w, c, W
)
matrix = problem.gen_qubo_matrix()
want = [
[35.0, 100.0, 60.0, -20.0, -40.0, -60.0, -80.0, -100.0, -120.0, -140.0],
[100.0, 248.0, 150.0, -50.0, -100.0, -150.0, -200.0, -250.0, -300.0, -350.0],
[55.0, 148.0, 82.0, -30.0, -60.0, -90.0, -120.0, -150.0, -180.0, -210.0],
[-20.0, -50.0, -30.0, 0.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0],
[-40.0, -100.0, -60.0, 30.0, 30.0, 70.0, 90.0, 110.0, 130.0, 150.0],
[-60.0, -150.0, -90.0, 40.0, 70.0, 80.0, 130.0, 160.0, 190.0, 220.0],
[-80.0, -200.0, -120.0, 50.0, 90.0, 130.0, 150.0, 210.0, 250.0, 290.0],
[-100.0, -250.0, -150.0, 60.0, 110.0, 160.0, 210.0, 240.0, 310.0, 360.0],
[-120.0, -300.0, -180.0, 70.0, 130.0, 190.0, 250.0, 310.0, 350.0, 430.0],
[-140.0, -350.0, -210.0, 80.0, 150.0, 220.0, 290.0, 360.0, 430.0, 480.0]
]
self.assertCountEqual(matrix.tolist(), want)
def test_gen_problems(self):
st0 = np.random.get_state()
np.random.seed(1)
data = KnapsackIntegerWeights.gen_problems(
{"problems": {"KIW": {}}},
1,
size=(5, 5)
)
np.random.set_state(st0)
w_want = [37, 43, 12, 8, 9]
c_want = [11, 5, 15, 0, 16]
self.assertCountEqual(data[0]["w"].tolist(), w_want)
self.assertCountEqual(data[0]["c"].tolist(), c_want)
| [
"numpy.random.get_state",
"numpy.random.set_state",
"qubo_nn.problems.KnapsackIntegerWeights",
"numpy.array",
"qubo_nn.problems.KnapsackIntegerWeights.gen_problems",
"numpy.random.seed"
] | [((290, 309), 'numpy.array', 'np.array', (['[2, 5, 3]'], {}), '([2, 5, 3])\n', (298, 309), True, 'import numpy as np\n'), ((322, 341), 'numpy.array', 'np.array', (['[5, 2, 4]'], {}), '([5, 2, 4])\n', (330, 341), True, 'import numpy as np\n'), ((375, 433), 'qubo_nn.problems.KnapsackIntegerWeights', 'KnapsackIntegerWeights', (["{'problems': {'KIW': {}}}", 'w', 'c', 'W'], {}), "({'problems': {'KIW': {}}}, w, c, W)\n", (397, 433), False, 'from qubo_nn.problems import KnapsackIntegerWeights\n'), ((1480, 1501), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (1499, 1501), True, 'import numpy as np\n'), ((1510, 1527), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (1524, 1527), True, 'import numpy as np\n'), ((1543, 1621), 'qubo_nn.problems.KnapsackIntegerWeights.gen_problems', 'KnapsackIntegerWeights.gen_problems', (["{'problems': {'KIW': {}}}", '(1)'], {'size': '(5, 5)'}), "({'problems': {'KIW': {}}}, 1, size=(5, 5))\n", (1578, 1621), False, 'from qubo_nn.problems import KnapsackIntegerWeights\n'), ((1676, 1700), 'numpy.random.set_state', 'np.random.set_state', (['st0'], {}), '(st0)\n', (1695, 1700), True, 'import numpy as np\n')] |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import random
import numpy as np
import torch
from torch import nn
from typing import Dict
def mem2str(num_bytes):
assert num_bytes >= 0
if num_bytes >= 2 ** 30: # GB
val = float(num_bytes) / (2 ** 30)
result = "%.3f GB" % val
elif num_bytes >= 2 ** 20: # MB
val = float(num_bytes) / (2 ** 20)
result = "%.3f MB" % val
elif num_bytes >= 2 ** 10: # KB
val = float(num_bytes) / (2 ** 10)
result = "%.3f KB" % val
else:
result = "%d bytes" % num_bytes
return result
def sec2str(seconds):
seconds = int(seconds)
hour = seconds // 3600
seconds = seconds % (24 * 3600)
seconds %= 3600
minutes = seconds // 60
seconds %= 60
return "%dH %02dM %02dS" % (hour, minutes, seconds)
def get_mem_usage():
import psutil
mem = psutil.virtual_memory()
result = ""
result += "available: %s, " % (mem2str(mem.available))
result += "used: %s, " % (mem2str(mem.used))
result += "free: %s" % (mem2str(mem.free))
# result += "active: %s\t" % (mem2str(mem.active))
# result += "inactive: %s\t" % (mem2str(mem.inactive))
# result += "buffers: %s\t" % (mem2str(mem.buffers))
# result += "cached: %s\t" % (mem2str(mem.cached))
# result += "shared: %s\t" % (mem2str(mem.shared))
# result += "slab: %s\t" % (mem2str(mem.slab))
return result
def flatten_first2dim(batch):
if isinstance(batch, torch.Tensor):
size = batch.size()[2:]
batch = batch.view(-1, *size)
return batch
elif isinstance(batch, dict):
return {key: flatten_first2dim(batch[key]) for key in batch}
else:
assert False, "unsupported type: %s" % type(batch)
def _tensor_slice(t, dim, b, e):
if dim == 0:
return t[b:e]
elif dim == 1:
return t[:, b:e]
elif dim == 2:
return t[:, :, b:e]
else:
raise ValueError("unsupported %d in tensor_slice" % dim)
def tensor_slice(t, dim, b, e):
if isinstance(t, dict):
return {key: tensor_slice(t[key], dim, b, e) for key in t}
elif isinstance(t, torch.Tensor):
return _tensor_slice(t, dim, b, e).contiguous()
else:
assert False, "Error: unsupported type: %s" % (type(t))
def tensor_index(t, dim, i):
if isinstance(t, dict):
return {key: tensor_index(t[key], dim, i) for key in t}
elif isinstance(t, torch.Tensor):
return _tensor_slice(t, dim, i, i + 1).squeeze(dim).contiguous()
else:
assert False, "Error: unsupported type: %s" % (type(t))
def one_hot(x, n):
assert x.dim() == 2 and x.size(1) == 1
one_hot_x = torch.zeros(x.size(0), n, device=x.device)
one_hot_x.scatter_(1, x, 1)
return one_hot_x
def set_all_seeds(rand_seed):
random.seed(rand_seed)
np.random.seed(rand_seed + 1)
torch.manual_seed(rand_seed + 2)
torch.cuda.manual_seed(rand_seed + 3)
def weights_init(m):
"""custom weights initialization"""
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal(m.weight.data)
nn.init.orthogonal_(m.weight.data)
else:
print("%s is not custom-initialized." % m.__class__)
def init_net(net, net_file):
if net_file:
net.load_state_dict(torch.load(net_file))
else:
net.apply(weights_init)
def count_output_size(input_shape, model):
fake_input = torch.FloatTensor(*input_shape)
output_size = model.forward(fake_input).view(-1).size()[0]
return output_size
def write_frame_to_image(frame, path, size=(300, 300)):
batchsize = frame.size(0)
assert batchsize == 1
frame = frame[0].cpu().numpy()
rows = 2
cols = 2
fig, ax = plt.subplots(rows, cols, figsize=(cols * 10, rows * 10))
for i in range(rows * cols):
r = i // cols
c = i % cols
data = frame[i]
# data = data / 255.0
ax[r, c].axis("off")
if data.shape[0] == 3:
data = data.swapaxes(0, 1).swapaxes(1, 2)
ax[r, c].imshow(data, vmin=0, vmax=1)
continue
# print('>>>', data.shape)
# if data.shape[0] > 3:
# data = data[0]
# print(data.shape)
ax[r, c].imshow(data, vmin=0, vmax=1, cmap="gray")
# ax[r, c].set_title('c%d_%s' % (i, channel_names[i]), fontsize=50)
# break
plt.tight_layout()
plt.savefig(path)
plt.close()
def write_frame_to_image2(frame, path, size=(300, 300)):
# batchsize = frame.size(0)
# assert(batchsize == 1)
frame = frame.cpu().numpy()
rows = 4
cols = 4
fig, ax = plt.subplots(rows, cols, figsize=(cols * 10, rows * 10))
for i in range(rows * cols):
r = i // cols
c = i % cols
data = frame[i][3]
# data = data / 255.0
ax[r, c].axis("off")
if data.shape[0] == 3:
data = data.swapaxes(0, 1).swapaxes(1, 2)
ax[r, c].imshow(data, vmin=0, vmax=1)
continue
# print('>>>', data.shape)
# if data.shape[0] > 3:
# data = data[0]
# print(data.shape)
ax[r, c].imshow(data, vmin=0, vmax=1, cmap="gray")
# ax[r, c].set_title('c%d_%s' % (i, channel_names[i]), fontsize=50)
# break
plt.tight_layout()
plt.savefig(path)
plt.close()
def num2str(n):
if n < 1e3:
s = str(n)
unit = ""
elif n < 1e6:
n /= 1e3
s = "%.3f" % n
unit = "K"
else:
n /= 1e6
s = "%.3f" % n
unit = "M"
s = s.rstrip("0").rstrip(".")
return s + unit
| [
"torch.manual_seed",
"torch.load",
"psutil.virtual_memory",
"random.seed",
"torch.nn.init.orthogonal_",
"numpy.random.seed",
"torch.cuda.manual_seed",
"torch.FloatTensor"
] | [((909, 932), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (930, 932), False, 'import psutil\n'), ((2846, 2868), 'random.seed', 'random.seed', (['rand_seed'], {}), '(rand_seed)\n', (2857, 2868), False, 'import random\n'), ((2873, 2902), 'numpy.random.seed', 'np.random.seed', (['(rand_seed + 1)'], {}), '(rand_seed + 1)\n', (2887, 2902), True, 'import numpy as np\n'), ((2907, 2939), 'torch.manual_seed', 'torch.manual_seed', (['(rand_seed + 2)'], {}), '(rand_seed + 2)\n', (2924, 2939), False, 'import torch\n'), ((2944, 2981), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(rand_seed + 3)'], {}), '(rand_seed + 3)\n', (2966, 2981), False, 'import torch\n'), ((3470, 3501), 'torch.FloatTensor', 'torch.FloatTensor', (['*input_shape'], {}), '(*input_shape)\n', (3487, 3501), False, 'import torch\n'), ((3162, 3196), 'torch.nn.init.orthogonal_', 'nn.init.orthogonal_', (['m.weight.data'], {}), '(m.weight.data)\n', (3181, 3196), False, 'from torch import nn\n'), ((3344, 3364), 'torch.load', 'torch.load', (['net_file'], {}), '(net_file)\n', (3354, 3364), False, 'import torch\n')] |
import numpy as np
import torch.nn as nn
from torch.nn.modules.loss import _Loss
from torch.optim.optimizer import Optimizer
import torch.optim as optim
class ProgressBar(object):
def __init__(self,
max_iter: int = 1,
verbose: int = 1,
bar_nums: int = 20,
untrained_sign: str = '*',
trained_sign: str = '='):
self.max_iter = max_iter
self.verbose = verbose
self._nums = bar_nums - 1
self._untrained = untrained_sign
self._trained = trained_sign
self.iter = 0
def update(self, n_iter: int = 1):
self.iter += n_iter
def get_bar(self) -> str:
trained_ratio = self.iter / self.max_iter
reached_bar_nums = round(trained_ratio * self._nums)
unreached_bar_nums = self._nums - reached_bar_nums
if self.verbose == 1:
bar = reached_bar_nums * self._trained + '>' + unreached_bar_nums * self._untrained
else:
percent = str(round(trained_ratio * 100))
bar = '{black} {percent:>{white}}%'.format(black="\033[40m%s\033[0m" % ' ' * reached_bar_nums,
percent=percent, white=unreached_bar_nums)
return bar
class AverageMeter(object):
def __init__(self, name=None, verbose=0):
self.name = name
self.val = None
self.avg = None
self.sums = None
self.steps = 0
self.verbose = verbose
self.reset()
def reset(self):
if self.verbose == 0:
self.val = 0.
self.avg = 0.
self.sums = 0.
else:
self.val = []
self.avg = []
self.sums = []
def update(self, val, step=1):
if val is None:
self.val = None
return
self.steps += step
if self.verbose == 0:
self.val = val
self.sums += val * step
self.avg = self.sums / self.steps
else:
self.val.append(val)
self.sums.append(self.sums[-1] + val * step)
self.avg.append(self.sums[-1] / self.steps)
def split_data(arrays, start=0, end=None):
arrays = np.array(arrays)
if isinstance(arrays, list):
if end is None:
return [x[start:] for x in arrays]
else:
return [x[start: end] for x in arrays]
else:
if end is None:
return arrays[start:]
else:
return arrays[start: end]
def get_optimizer(optimizer, model):
if isinstance(optimizer, str):
optimizer = optimizer.lower()
if optimizer in ['sgd']:
return optim.SGD(model.parameters(), lr=1e-2)
elif optimizer in ['adam']:
return optim.Adam(model.parameters())
else:
raise ValueError('Unknwon optimizer type!')
elif isinstance(optimizer, Optimizer):
return optimizer
def get_objective(objective):
if isinstance(objective, str):
objective = objective.lower()
if objective in ['l1', 'l1loss']:
return nn.L1Loss()
elif objective in ['nll', 'nllloss']:
return nn.NLLLoss()
elif objective in ['nll2d', 'nllloss2d']:
return nn.NLLLoss2d()
elif objective in ['poissonnll', 'poissonnllloss']:
return nn.PoissonNLLLoss()
elif objective in ['kldiv', 'kldivloss']:
return nn.KLDivLoss()
elif objective in ['mse', 'mseloss']:
return nn.MSELoss()
elif objective in ['bce', 'bceloss']:
return nn.BCELoss()
elif objective in ['smoothl1', 'smoothl1loss']:
return nn.SmoothL1Loss()
elif objective in ['crossentropy', 'cross_entropy']:
return nn.CrossEntropyLoss()
elif objective in ['ctc', 'ctcloss']:
return nn.CTCLoss()
else:
raise ValueError('unknown argument!')
elif isinstance(objective, _Loss):
return objective
else:
raise ValueError('unknown argument {}'.format(objective))
def console(prog_bar: ProgressBar = None,
verbose: int = 0,
trained_samples: int = None,
total_samples: int = None,
trained_batch: int = 1,
total_batch: int = 1,
trained_time: float = 0.,
batch_loss: float = 0.,
batch_acc: float = 0.,
validation_loss: float = None,
validation_acc: float = None):
if verbose == 0:
return
elif verbose == 1:
formated_trained_time = format_time(trained_time)
formated_per_batch_time = format_time(trained_time / trained_batch)
bar = prog_bar.get_bar()
if validation_loss is None and validation_acc is None:
print('\r {:d}/{:d} [{}] - {} - {}/batch -batch_loss: {:.4f} -batch_acc: {:.4f}'.format(trained_samples,
total_samples, bar,
formated_trained_time,
formated_per_batch_time,
batch_loss,
batch_acc),
flush=True, end='')
else:
print('\r {:d}/{:d} [{}] - {} - {}/batch'
' -batch_loss: {:.4f} -batch_acc: {:.4f} -validation_loss: {:.4f} -validation_acc: {:.4f}'.format(
trained_samples, total_samples, bar, formated_trained_time, formated_per_batch_time, batch_loss,
batch_acc, validation_loss, validation_acc), flush=True, end='')
elif verbose == 2:
batch_time = trained_time / trained_batch
eta = (total_batch - trained_batch) * batch_time
formated_eta = format_time(eta)
bar = prog_bar.get_bar()
if validation_loss is None and validation_acc is None:
print('{} -ETA {} -batch_loss: {:.4f} -batch_acc: {:.4f}'.format(bar, formated_eta, batch_loss, batch_acc))
else:
print(
'{} -ETA {} -batch_loss: {:.4f} -batch_acc: {:.4f} -validation_loss: {:.4f} -validation_acc: {:.4f}'.format(
bar, formated_eta, batch_loss, batch_acc, validation_loss, validation_acc))
else:
raise ValueError('Verbose only supports for 0, 1 and 2 ~')
def format_time(second_time: float) -> str:
if second_time < 1:
ms = second_time * 1000
if ms < 1:
us = second_time * 1000
return '%dus' % us
else:
return '%dms' % ms
second_time = round(second_time)
if second_time > 3600:
# hours
h = second_time // 3600
second_time = second_time % 3600
# minutes
m = second_time // 60
second_time = second_time % 60
return '%dh%dm%ds' % (h, m, second_time)
elif second_time > 60:
m = second_time // 60
second_time = second_time % 60
return '%dm%ds' % (m, second_time)
else:
return '%ds' % second_time
| [
"torch.nn.CrossEntropyLoss",
"torch.nn.L1Loss",
"torch.nn.KLDivLoss",
"torch.nn.PoissonNLLLoss",
"numpy.array",
"torch.nn.NLLLoss2d",
"torch.nn.MSELoss",
"torch.nn.NLLLoss",
"torch.nn.BCELoss",
"torch.nn.CTCLoss",
"torch.nn.SmoothL1Loss"
] | [((2249, 2265), 'numpy.array', 'np.array', (['arrays'], {}), '(arrays)\n', (2257, 2265), True, 'import numpy as np\n'), ((3148, 3159), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {}), '()\n', (3157, 3159), True, 'import torch.nn as nn\n'), ((3225, 3237), 'torch.nn.NLLLoss', 'nn.NLLLoss', ([], {}), '()\n', (3235, 3237), True, 'import torch.nn as nn\n'), ((3307, 3321), 'torch.nn.NLLLoss2d', 'nn.NLLLoss2d', ([], {}), '()\n', (3319, 3321), True, 'import torch.nn as nn\n'), ((3401, 3420), 'torch.nn.PoissonNLLLoss', 'nn.PoissonNLLLoss', ([], {}), '()\n', (3418, 3420), True, 'import torch.nn as nn\n'), ((3490, 3504), 'torch.nn.KLDivLoss', 'nn.KLDivLoss', ([], {}), '()\n', (3502, 3504), True, 'import torch.nn as nn\n'), ((3570, 3582), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (3580, 3582), True, 'import torch.nn as nn\n'), ((3648, 3660), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (3658, 3660), True, 'import torch.nn as nn\n'), ((3736, 3753), 'torch.nn.SmoothL1Loss', 'nn.SmoothL1Loss', ([], {}), '()\n', (3751, 3753), True, 'import torch.nn as nn\n'), ((3834, 3855), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (3853, 3855), True, 'import torch.nn as nn\n'), ((3921, 3933), 'torch.nn.CTCLoss', 'nn.CTCLoss', ([], {}), '()\n', (3931, 3933), True, 'import torch.nn as nn\n')] |
from event_model import DocumentRouter
import matplotlib.pyplot as plt
import numpy
class Grid(DocumentRouter):
"""
Draw a matplotlib AxesImage Arist update it for each Event.
The purposes of this callback is to create (on initialization) of a
matplotlib grid image and then update it with new data for every `event`.
NOTE: Some important parameters are fed in through **kwargs like `extent`
which defines the axes min and max and `origin` which defines if the grid
co-ordinates start in the bottom left or top left of the plot. For more
info see https://matplotlib.org/tutorials/intermediate/imshow_extent.html
or https://matplotlib.org/api/_as_gen/matplotlib.axes.Axes.imshow.html#matplotlib.axes.Axes.imshow
Parameters
----------
func : callable
This must accept a BulkEvent and return three lists of floats (x
grid co-ordinates, y grid co-ordinates and grid position intensity
values). The three lists must contain an equal number of items, but
that number is arbitrary. That is, a given document may add one new
point, no new points or multiple new points to the plot.
shape : tuple
The (row, col) shape of the grid.
ax : matplotlib Axes, optional.
if ``None``, a new Figure and Axes are created.
**kwargs
Passed through to :meth:`Axes.imshow` to style the AxesImage object.
"""
def __init__(self, func, shape, *, ax=None, **kwargs):
self.func = func
self.shape = shape
if ax is None:
_, ax = plt.subplots()
self.ax = ax
self.grid_data = numpy.full(self.shape, numpy.nan)
self.image, = ax.imshow(self.grid_data, **kwargs)
def event_page(self, doc):
'''
Takes in a bulk_events document and updates grid_data with the values
returned from self.func(doc)
Parameters
----------
doc : dict
The bulk event dictionary that contains the 'data' and 'timestamps'
associated with the bulk event.
Returns
-------
x_coords, y_coords, I_vals : Lists
These are lists of x co-ordinate, y co-ordinate and intensity
values arising from the bulk event.
'''
x_coords, y_coords, I_vals = self.func(doc)
self._update(x_coords, y_coords, I_vals)
def _update(self, x_coords, y_coords, I_vals):
'''
Updates self.grid_data with the values from the lists x_coords,
y_coords, I_vals.
Parameters
----------
x_coords, y_coords, I_vals : Lists
These are lists of x co-ordinate, y co-ordinate and intensity
values arising from the event. The length of all three lists must
be the same.
'''
if not len(x_coords) == len(y_coords) == len(I_vals):
raise ValueError("User function is expected to provide the same "
"number of x, y and I points. Got {0} x points, "
"{1} y points and {2} I values."
"".format(len(x_coords), len(y_coords),
len(I_vals)))
if not x_coords:
# No new data, Short-circuit.
return
# Update grid_data and the plot.
self.grid_data[x_coords, y_coords] = I_vals
self.image.set_array(self.grid_data)
| [
"numpy.full",
"matplotlib.pyplot.subplots"
] | [((1631, 1664), 'numpy.full', 'numpy.full', (['self.shape', 'numpy.nan'], {}), '(self.shape, numpy.nan)\n', (1641, 1664), False, 'import numpy\n'), ((1570, 1584), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1582, 1584), True, 'import matplotlib.pyplot as plt\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 15 15:26:19 2018
@authors: <NAME> (verseve) & <NAME> (haag)
"""
# required modules
import rasterio
from rasterio.mask import mask
from shapely.geometry import box
from shapely.geometry import Point
from shapely.geometry import Polygon
from shapely.geometry import MultiPolygon
import geopandas as gpd
import os
import shutil
import sys
import configparser as cp
import numpy as np
import xarray as xr
import gdal
from scipy import ndimage as nd
import pandas as pd
import glob
import json
from scipy.optimize import curve_fit
# waterbodies
import pcraster as pcr
from functools import partial
from shapely.ops import transform
import pyproj
import waterbodies as setup_waterbody_maps
import wflow_lake_intbl as setup_lake_intbl
import wflow_reservoir_intbl as setup_reservoir_intbl
# riverwidths
import derive_river_widths as setup_river_widths
# AnnualDischarge parameter
import catchment_FLO1K as flo1k
# upscaled slope
from upscaled_slope import get_slope
# logging
import setup_logging
from datetime import datetime
# modelbuilder
import geojson
import subprocess
# MERIT / pyflwdir
import pyflwdir
from merit.merit_model_data import get_merit_basin_bbox, upscale_merit_basin, network_merit_basin, resample_merit_basin
from merit.wflow_topomaps import wflow_topomaps
def fill(data, invalid=None):
"""
Replace the value of invalid 'data' cells (indicated by 'invalid') by the value of the nearest valid data cell.
Input:
data: numpy array of any dimension
invalid: binary array of same shape as 'data'. True cells set where data value should be replaced.
[if None (default), use: invalid = np.isnan(data)]
Output: filled array
"""
if invalid is None: invalid = np.isnan(data)
ind = nd.distance_transform_edt(invalid, return_distances=False, return_indices=True)
return data[tuple(ind)]
def transform_coordinates(coords):
"""
Transform coordinates from geodetic to cartesian.
Input:
coords: a set of lan/lon coordinates (e.g. a tuple or an array of tuples)
Output: same set of coords, converted to cartesian coordinates
"""
# WGS 84 reference coordinate system parameters
A = 6378.137 # major axis [km]
E2 = 6.69437999014e-3 # eccentricity squared
coords = np.asarray(coords).astype(np.float)
# is coords a tuple? Convert it to an one-element array of tuples
if coords.ndim == 1:
coords = np.array([coords])
# convert to radiants
lat_rad = np.radians(coords[:,0])
lon_rad = np.radians(coords[:,1])
# convert to cartesian coordinates
r_n = A / (np.sqrt(1 - E2 * (np.sin(lat_rad) ** 2)))
x = r_n * np.cos(lat_rad) * np.cos(lon_rad)
y = r_n * np.cos(lat_rad) * np.sin(lon_rad)
z = r_n * (1 - E2) * np.sin(lat_rad)
return np.column_stack((x, y, z))
def getFeatures(gdf):
"""Function to parse features from GeoDataFrame to rasterio"""
return [json.loads(gdf.to_json())['features'][0]['geometry']]
def reproject_raster(src, src_profile, dst_profile, resampling, threads=1):
"""Reprojects a raster/grid using rasterio.warp.reproject"""
arr = np.empty((dst_profile['height'],dst_profile['width'])).astype(np.float32)
rasterio.warp.reproject(
source = src,
destination = arr,
src_crs = src_profile['crs'],
src_nodata = src_profile['nodata'],
src_transform = src_profile['transform'],
dst_transform = dst_profile['transform'],
dst_crs = dst_profile['crs'],
resampling = resampling,
num_threads=threads)
return arr
def rasterio_mask_shapes(clone, dst_res, src_profile, clone_profile):
bnds = clone.bounds
bnd0 = bnds[0]-2*dst_res
bnd1 = bnds[1]-2*dst_res
bnd2 = bnds[2]+2*dst_res
bnd3 = bnds[3]+2*dst_res
if src_profile['crs'] == clone_profile['crs']:
bbox = box(bnd0, bnd1, bnd2, bnd3)
geo = gpd.GeoDataFrame({'geometry': bbox}, index=[0])
else:
d = 10
x = np.concatenate((np.repeat(bnd0, d), np.linspace(bnd0, bnd2,d), np.repeat(bnd2, d), np.linspace(bnd2, bnd0,d)))
y = np.concatenate((np.linspace(bnd1, bnd3, d), np.repeat(bnd3, d), np.linspace(bnd3, bnd1, d), np.repeat(bnd1, d)))
bbox = Polygon(zip(x, y))
geo = gpd.GeoDataFrame({'geometry': bbox}, index=[0])
geo.crs = clone_profile['crs']
geo = geo.to_crs(src_profile['crs'])
coords = getFeatures(geo)
return coords
def create_M(ksat, theta, zi, method, M_minmax, directory_out, logger):
"""
Creates M and related map(s).
Parameters:
ksat : [dict] Ksat maps
theta : [dict] theta maps
zi : [array] soilgrid depths
method : [int] method to create M, 0 = numpy linalg, 1 = scipy optimize, 2 = both (default)
M_minmax : [int] value used to constrain M
directory_out : [string] output path
logger : [logging object] instance of Python logging module
Output: M and related map(s)
"""
# helper functions
def func(x, b):
return np.exp(-b * x)
def contrain_M(M_, popt_0, M_minmax):
M_[(M_ > 0) & (popt_0 == 0)] = M_minmax
M_[ M_ > 100000] = M_minmax
M_[M_ < 0] = M_minmax
return M_
def do_linalg(z_i_, ks, row, col):
idx = ~np.isinf(np.log(ks[:,row,col]))
return np.linalg.lstsq(z_i_[idx,np.newaxis], np.log(ks[idx,row,col]), rcond=None)
def do_curve_fit(z_i_, ks, row, col, p0):
idx = ~np.isinf(np.log(ks[:,row,col]))
return curve_fit(func, z_i_[idx], ks[idx,row,col] ,p0=p0)
def write_file(data, path, ext):
with rasterio.open(path + ext + '.tif', 'w', **dst_profile) as dst:
dst.write(np.float32(data),1)
def do_translate(path, ext, options='-of PCRaster'):
gdal.Translate(path + ext + '.map', path + ext + '.tif', options=options)
def write_maps(M_, file_ext, popt_0):
# write M to file before contrains are applied
write_file(M_, path_M, '_original' + file_ext)
do_translate(path_M, '_original' + file_ext)
# contrain M to specified value(s)
M_ = contrain_M(M_, popt_0, M_minmax)
ks_0_new = ks_[0]
# write M and Ks to files after contrains are applied on M
write_file(M_, path_M, file_ext)
write_file(ks_0_new, path_Ks, file_ext)
do_translate(path_M, file_ext)
do_translate(path_Ks, file_ext)
# check method
if method not in [0,1,2]:
logger.warning('Unrecognized input for method of M parameter calculation! Using default value instead.')
method = 2
# start M parameter calculation
ks_ = []
for z in zi:
ks_.append(ksat['KsatVer_' + str(z) + 'cm'].read(1))
ts = theta['thetaS'].read(1)
tr = theta['thetaR'].read(1)
dst_profile = theta['thetaR'].profile
rows = ks_[0].shape[0]
cols = ks_[0].shape[1]
ks = (np.array( ks_ )/ks_[0])
z_i_ = zi * 10.0
popt_0 = np.zeros(ks_[0].shape)
path_M = os.path.join(directory_out, 'M')
path_Ks = os.path.join(directory_out, 'KsatVer')
if (method == 0 or method ==2):
logger.info('fit zi - log(Ksat) with numpy linalg regression (y = b*x) -> M_')
for row in range(0, rows):
print(str(round((float(row)/float(rows))*100,2)) + "% completed of curve fit")
for col in range(0, cols):
d = do_linalg(z_i_, ks, row, col)
popt_0[row,col] = d[0]
M_ = (ts - tr)/(-popt_0)
write_maps(M_, '_', popt_0)
if (method == 1 or method ==2):
logger.info('fit zi - Ksat with curve_fit (scipy.optimize) -> M')
for row in range(0, rows):
print(str(round((float(row)/float(rows))*100,2)) + "% completed of curve fit")
for col in range(0, cols):
# try curve fitting with certain p0
try:
popt, pcov = do_curve_fit(z_i_, ks, row, col, (1e-3 ))
except RuntimeError:
# try curve fitting with lower p0
try:
popt, pcov = do_curve_fit(z_i_, ks, row, col, (1e-4 ))
except RuntimeError:
# do linalg regression instead (method 0)
popt = np.array(do_linalg(z_i_, ks, row, col))
popt[0] = popt[0] * -1.0
popt_0[row,col] = popt[0]
M_ = (ts - tr)/(popt_0)
write_maps(M_, '', popt_0)
def make_clone(dst_res, settings, interp_soilthick, M_method, M_minmax, directory_in, directory_out, clonefile, logger):
"""
Creates maps for wflow model (staticmaps).
Parameters:
dst_res : [float] resolution of output maps
settings : [string] path to settings file
interp_soilthick : [boolean] control for interpolation/filling of zeros in soil thickness map
M_method : [int] method to create M, 0 = numpy linalg, 1 = scipy optimize, 2 = both (default)
M_minmax : [int] value used to constrain M
directory_in : [string] input path
directory_out : [string] output path
clonefile : [string] filename of (PCRaster) clone map file (by default wflow_dem.map)
logger : [logging object] instance of Python logging module
Output: map files
"""
settings = pd.read_csv(settings)
clone_path = os.path.join(directory_out, clonefile)
clone = rasterio.open(clone_path)
clone_profile = clone.profile
soilgrids_depths = np.array([0,5,15,30,60,100,200])
for index, row in settings.iterrows():
files = []
files.extend((glob.glob(os.path.join(directory_in, row.folder_in, row.files))))
for index, filepath in enumerate(files):
logger.info('Reading ' + filepath)
src = rasterio.open(filepath)
src_profile = src.profile
if clone.crs == None:
logger.warning('*** clone file ' + clone_path + ' without CRS, CRS set to EPSG:4326 ***')
clone_profile['crs'] = rasterio.crs.CRS.from_epsg(4326)
shapes = rasterio_mask_shapes(clone, dst_res, src_profile, clone_profile)
logger.info('trim file ' + str(filepath))
out_grid, out_transform = mask(src, shapes, crop=True, all_touched=True)
nx, ny = out_grid[0].shape[1], out_grid[0].shape[0]
grid = out_grid[0] * float(row.mult_factor)
if row.parameter == 'LAI':
grid[np.where(grid == src_profile['nodata'] * float(row.mult_factor))] = 0.0
if src_profile['nodata'] != None:
grid[np.where(grid == src_profile['nodata'] * float(row.mult_factor))] = np.nan
logger.info('fill nodata for parameter ' + row.parameter)
grid = fill(grid)
scr_file = os.path.basename(files[index])
dst_tiff_file = scr_file
if scr_file.startswith('KsatVer'):
dst_map_file = '_'.join(scr_file.split('_')[0:2]) + '.map'
elif scr_file.startswith('lambda'):
dst_map_file = ('_'.join(scr_file.split('_')[0:2])).replace('lambda', 'c') + '.map'
elif scr_file.startswith('LAI'):
dst_map_file = scr_file.split('_')[0].ljust(8, '0') + '.' + scr_file.replace('.tif','').split('_')[-1].zfill(3)
elif scr_file.startswith('GLOBCOVER'):
dst_map_file = 'wflow_landuse.map'
elif scr_file.startswith('RootingDepth'):
dst_map_file = scr_file.replace('tif','') + 'map'
else:
dst_map_file = scr_file.split('_')[0] + '.map'
# update the relevant parts of the profiles
dst_profile = src.meta.copy()
dst_profile.update({
'transform': clone_profile['transform'],
'crs': clone_profile['crs'],
'dtype' : np.float32,
'width': clone_profile['width'],
'height': clone_profile['height']
})
src_profile.update({
'transform' : out_transform,
'width': nx,
'height': ny
})
if row.scale_method == 'average':
resample_method = rasterio.warp.Resampling.average
if row.scale_method == 'mode':
resample_method = rasterio.warp.Resampling.mode
if row.conversion == 'log':
grid = np.log(grid)
logger.info('Resample '+ row.parameter + ' to resolution ' + str(dst_res))
out = reproject_raster(grid, src_profile, dst_profile, resample_method, threads=4)
if row.conversion == 'log':
out = np.exp(out)
if (row.parameter == 'soilthickness') and (interp_soilthick):
logger.info('Interpolating/filling zeros for parameter ' + row.parameter)
out = fill(out, out==0)
# KsatHorFrac
if row.parameter == 'KsatHorFrac':
KsatVer = out_grid[0]
if src_profile['nodata'] != None:
KsatVer[np.where(KsatVer == src_profile['nodata'])] = np.nan
KsatVer = fill(KsatVer)
if row.conversion == 'log':
out = out/np.exp(reproject_raster(np.log(KsatVer), src_profile, dst_profile, resample_method, threads=4))
else:
out = out/reproject_raster(KsatVer, src_profile, dst_profile, resample_method, threads=4)
dst_tiff_file = 'KsatHorFrac.tif'
dst_map_file = 'KsatHorFrac.map'
if row.parameter == 'lambda':
logger.info('Convert '+ row.parameter + ' to parameter c')
out = (3. + (2./out))
path_tif = os.path.join(directory_out, dst_tiff_file)
logger.info('write resampled '+ row.parameter + ' to file ' + dst_tiff_file)
with rasterio.open(path_tif, 'w', **dst_profile) as dst:
dst.write(out,1)
path_map = os.path.join(directory_out, dst_map_file)
logger.info('convert ' + dst_tiff_file + ' to PCRaster file ' + dst_map_file)
gdal.Translate(path_map, path_tif, options = '-of PCRaster')
logger.info('calculating parameter M...')
files = []
files.extend((glob.glob(os.path.join(directory_out,"KsatVer*.tif"))))
files.extend((glob.glob(os.path.join(directory_out,"theta*.tif"))))
input_ksat = {}
input_theta = {}
for index, filepath in enumerate(files):
inputfile = os.path.basename(filepath)
logger.info('read file ' + inputfile )
if inputfile.startswith('KsatVer'):
input_ksat['_'.join(inputfile.split('_')[0:2])] = rasterio.open(filepath)
elif inputfile.startswith('theta'):
input_theta[inputfile.split('_')[0]] = rasterio.open(filepath)
create_M(input_ksat,input_theta,soilgrids_depths,M_method,M_minmax,directory_out,logger)
del input_ksat, input_theta, clone, src
for f in glob.glob(os.path.join(directory_out, "*.tif")):
try:
os.remove(f)
except:
logger.error('Could not remove ' + f)
for f in glob.glob(os.path.join(directory_out,"*.aux.xml")):
try:
os.remove(f)
except:
logger.error('Could not remove ' + f)
c_parameter_files = ['c_5cm.map','c_15cm.map','c_60cm.map','c_200cm.map']
for i,f in enumerate(c_parameter_files):
try:
shutil.copy(os.path.join(directory_out,f),os.path.join(directory_out,'c_'+ str(i) + '.map'))
except:
logger.error('Could not copy ' + f)
clim_dir = os.path.join(directory_out, "clim")
if not os.path.exists(clim_dir):
os.mkdir(clim_dir)
LAI_files = glob.glob(os.path.join(directory_out, 'LAI*'))
for f in LAI_files:
try:
shutil.move(f, os.path.join(clim_dir,os.path.basename(f)))
except:
logger.error('Could not move ' + f)
logger.info('Creating SoilMinThickness.map by copying and renaming SoilThickness.map')
try:
shutil.copy(os.path.join(directory_out,'SoilThickness.map'), os.path.join(directory_out,'SoilMinThickness.map'))
except:
logger.error('Could not copy and rename SoilThickness.map')
logger.info('Creating RootingDepth.map by copying and renaming RootingDepth_d75_300x300m.map')
try:
shutil.copy(os.path.join(directory_out,'RootingDepth_d75_300x300m.map'), os.path.join(directory_out,'RootingDepth.map'))
except:
logger.error('Could not copy and rename RootingDepth.map')
def check_key(dictionary, key, default_value):
"""
Returns the value assigned to the 'key' in the ini file.
Parameters:
dictionary : [dict]
key : [string|int]
default_value : [string]
Output: Value assigned to the 'key' into the 'dictionary' (or default value if not found)
"""
if key in dictionary.keys():
return dictionary[key]
else:
return default_value
def replaceLinesIniFile(keys, new, path, logger):
"""
Replaces a specific line in existing ini file with known contents.
Uses regular Python text parser instead of ConfigParser to circumvent issues with headers, comments and duplicate sections.
Parameters:
keys: [list] key(s) / line(s) where to replace value(s)
new : [list] value(s) to replace
path : [string] path to ini file
logger : [logging object] instance of Python logging module
Output: ini file adjusted with certain lines added or removed
"""
logger.info('Rewriting ini file at ' + path)
# open .ini file
with open(path, 'r') as f:
lines = f.readlines()
# contruct replacement content based on keys
if 'reservoirs' in keys:
# text specifically for reservoirs
txt_for_res = "# Reservoirs\n" \
"ReserVoirSimpleLocs=staticmaps/wflow_reservoirlocs.map,staticmap,0.0,0\n" \
"ResTargetFullFrac=intbl/ResTargetFullFrac.tbl,tbl,0.8,0,staticmaps/wflow_reservoirlocs.map\n" \
"ResTargetMinFrac=intbl/ResTargetMinFrac.tbl,tbl,0.4,0,staticmaps/wflow_reservoirlocs.map\n" \
"ResMaxVolume=intbl/ResMaxVolume.tbl,tbl,0.0,0,staticmaps/wflow_reservoirlocs.map\n" \
"ResMaxRelease=intbl/ResMaxRelease.tbl,tbl,1.0,0,staticmaps/wflow_reservoirlocs.map\n" \
"ResDemand=intbl/ResDemand.tbl,tbl,1.0,0,staticmaps/wflow_reservoirlocs.map\n" \
"ReservoirSimpleAreas=staticmaps/wflow_reservoirareas.map,staticmap,0.0,0\n" \
"ResSimpleArea = intbl/ResSimpleArea.tbl,tbl,0,0,staticmaps/wflow_reservoirlocs.map\n"
# check if present in ini file
try:
for txt_to_add_line in txt_for_res.split('\n')[0:-1]:
index = lines.index(txt_to_add_line+'\n')
res_in_ini = True
except:
res_in_ini = False
if keys == 'reservoirs_add':
if not res_in_ini:
line_below_which_to_add = "LAI=staticmaps/clim/LAI,monthlyclim,1.0,1\n"
index = lines.index(line_below_which_to_add)
lines[index] = line_below_which_to_add + '\n' + txt_for_res
else:
logger.info('Reservoir parameters already included in model ini file. Skipping rewrite.')
elif keys == 'reservoirs_rem':
if res_in_ini:
for txt_to_rem_line in txt_for_res.split('\n')[0:-1]:
index = lines.index(txt_to_rem_line+'\n')
lines[index] = ''
lines[index+1] = '' # to prevent double blank lines between segments
else:
logger.info('Reservoir parameters not found in model ini file. Cannot remove.')
elif 'lakes' in keys:
# text specifically for lakes
txt_for_lakes_1 = "# Lakes\n" \
"LakeLocs=staticmaps/wflow_lakelocs.map,staticmap,0.0,0\n" \
"LakeAreasMap=staticmaps/wflow_lakeareas.map,staticmap,0.0,0\n" \
"LinkedLakeLocs=intbl/LinkedLakeLocs.tbl,tbl,0,0,staticmaps/wflow_lakelocs.map\n" \
"LakeStorFunc=intbl/LakeStorFunc.tbl,tbl,1,0,staticmaps/wflow_lakelocs.map\n" \
"LakeOutflowFunc=intbl/LakeOutflowFunc.tbl,tbl,3,0,staticmaps/wflow_lakelocs.map\n" \
"LakeArea=intbl/LakeArea.tbl,tbl,1,0,staticmaps/wflow_lakelocs.map\n" \
"LakeAvgLevel=intbl/LakeAvgLevel.tbl,tbl,1,0,staticmaps/wflow_lakelocs.map\n" \
"LakeAvgOut=intbl/LakeAvgOut.tbl,tbl,1,0,staticmaps/wflow_lakelocs.map\n" \
"LakeThreshold=intbl/LakeThreshold.tbl,tbl,0,0,staticmaps/wflow_lakelocs.map\n" \
"Lake_b=intbl/Lake_b.tbl,tbl,50,0,staticmaps/wflow_lakelocs.map\n" \
"Lake_e=intbl/Lake_e.tbl,tbl,2.0,0,staticmaps/wflow_lakelocs.map\n"
txt_for_lakes_2 = "estimatelakethresh=0\n"
# check if present in ini file
try:
for txt_to_add_line in txt_for_lakes_1.split('\n')[0:-1]:
index = lines.index(txt_to_add_line+'\n')
for txt_to_add_line in txt_for_lakes_2.split('\n')[0:-1]:
index = lines.index(txt_to_add_line+'\n')
lakes_in_ini = True
except:
lakes_in_ini = False
if keys == 'lakes_add':
if not lakes_in_ini:
line_below_which_to_add = "LAI=staticmaps/clim/LAI,monthlyclim,1.0,1\n"
index = lines.index(line_below_which_to_add)
lines[index] = line_below_which_to_add + '\n' + txt_for_lakes_1
line_below_which_to_add_2 = "UStoreLayerThickness = 100,300,800\n"
index_2 = lines.index(line_below_which_to_add_2)
lines[index_2] = line_below_which_to_add_2 + txt_for_lakes_2
else:
logger.info('Lake parameters already included in model ini file. Skipping rewrite.')
elif keys == 'lakes_rem':
if lakes_in_ini:
for txt_to_rem_line in txt_for_lakes_1.split('\n')[0:-1]:
index = lines.index(txt_to_rem_line+'\n')
lines[index] = ''
lines[index+1] = '' # to prevent double blank lines between segments
for txt_to_rem_line in txt_for_lakes_2.split('\n')[0:-1]:
index = lines.index(txt_to_rem_line+'\n')
lines[index] = ''
lines[index+1] = '' # to prevent double blank lines between segments
else:
logger.info('Lake parameters not found in model ini file. Cannot remove.')
else:
# keys / default lines in .ini file
key_lines = {
'AnnualDischarge': 'AnnualDischarge\t= 2290\n',
'Alpha': 'Alpha\t\t= 120\n'
}
# helper function: rewrite key/line
def rewriteKey(key, value, logger=None):
logger.info('Rewriting ' + key + ' in ini file...')
to_replace = key_lines[key].split(' ')[-1]
try:
index = lines.index(key_lines[key])
lines[index] = key_lines[key].replace(to_replace, str(value) + '\n')
except ValueError:
try:
index = lines.index(key_lines[key].split(to_replace)[0] + str(value) + '\n')
logger.info('Specified replacement did already take place, skipping this rewrite.')
except ValueError as e:
logger.error(str(e))
# replace specified keys/lines
for i in range(len(keys)):
if keys[i] in key_lines.keys():
rewriteKey(keys[i], new[i], logger)
else:
logger.error("Specified key not recognized: '" + key[i] + "'. Cannot replace value for this key in ini file!")
# rewrite .ini file
with open(path, 'w') as new_f:
for line in lines:
new_f.write(line)
def copyFiles(dst, src_files, script_root, src_scripts, logger):
"""
Copies files from various locations to the specified directory.
Parameters:
dst : [string] destination path
src_files : [list] list of source paths (files)
src_scripts : [list] list of source paths (scripts)
logger : [logging object] instance of Python logging module
Output: ReadMe.txt file created in specified path
"""
for src_file in src_files:
try:
shutil.copy(src_file, os.path.join(dst, os.path.basename(src_file)))
except:
logger.warning('Could not copy ' + src_file)
for src_script in src_scripts:
try:
shutil.copy(os.path.join(script_root, src_script), os.path.join(dst, src_script))
except:
logger.warning('Could not copy ' + os.path.join(script_root, src_script))
def createReadMe(path_readme, setup_folder='setup'):
"""
Create a ReadMe file in each catchment folder, explaining the automated setup.
Parameters:
path_readme : [string] full path including file extension where to create file
setup_folder : [string] name of setup folder within the same directory
Output: ReadMe.txt file created in specified path
"""
lines = [
"This model was automatically set up using a combination of different Python scripts and data sources. \n\n",
"Several folders and files have been updated, including 'intbl', 'staticmaps' and the model .ini file. \n",
"This means that other folders and files, specifically those in the 'data\parameters' folder, might contain outdated information. \n\n",
"Setup and debug information is stored in the '" + setup_folder + "' folder. This contains, amongst others:\n",
"- log file, with the following levels:\n",
" - INFO: information deemed relevant during and/or after execution of setup\n"
" - WARNING: potential issue was encountered for which a workaround was in place, should probably be checked\n"
" - ERROR: potential issue was encountered for which no workaround was in place, should definitely be checked\n"
" - CRITICAL: could not execute code that is vital for success, no setup/processing could be carried out\n"
"- ini file\n",
"- settings file\n",
"- used scripts\n",
"- figures related to the set up of lakes and/or reservoir intbl's and the Annual Discharge parameter\n",
]
with open(os.path.join(path_readme, 'ReadMe.txt'), 'w') as read_me:
for line in lines:
read_me.write(line)
def changeGeoJSONgeomType(path, file_old='catchments_original.geojson', file_new='catchments_v2.geojson', geomType_old='Polygon', geomType_new='MultiLineString', remove_old=True, logger=None):
"""
Changes the geometry type in a (Geo)JSON file.
Parameters:
path : [string] path to directory where old/temporary file is located
file_old : [string] filename of old/temporary file
file_new : [string] filename of new/to-be-created file
geomType_old : [string] geometry type in old/temporary file
geomType_new : [string] geometry type for new/to-be-created file
remove_old : [boolean] delete/remove old/temporary file
logger : [logging object] instance of Python logging module
Output: ReadMe.txt file created in specified path
"""
logger.info('Changing geometry type in catchments file from ' + geomType_old + ' to ' + geomType_new)
with open(os.path.join(path, file_old), 'r') as f:
data = json.load(f)
for feature in data['features']:
if feature['geometry']['type'] == geomType_old:
feature['geometry']['type'] = geomType_new
else:
logger.warning('Feature in GeoJSON does not have geometry type ' + geomType_old + '!')
with open(os.path.join(path, file_new), 'w') as f:
json.dump(data, f)#, indent=2)
if remove_old:
os.remove(os.path.join(path, file_old))
def runModelbuilder(folder, catchment_id, resolution, modelbuilder_path, catchments_path, rivers_path, logger):
"""
Runs the modelbuilder to obtain new topographic base data.
Parameters:
folder : [string] path to current catchment/case
catchment_id : [string] id/name of current catchment/case
resolution : [float] resolution for model
modelbuilder_path : [string] path to modelbuilder scripts
rivers_path : [string] path to existing rivers geojson (potential modelbuilder input)
logger : [logging object] instance of Python logging module
Output: various files created with modelbuilder, including logs in newly created 'modelbuilder' folder within catchment/case
"""
# update modelbuilder settings file
modelbuilder_settings_path = os.path.join(modelbuilder_path, 'settings.json')
logger.info('Updating modelbuilder settings file at ' + modelbuilder_settings_path)
logger.info('Using catchment geometry from ' + catchments_path)
with open(modelbuilder_settings_path) as f:
modelbuilder_settings = geojson.load(f)
with open(catchments_path) as c:
catchment_json = geojson.load(c)
geom_type = catchment_json['features'][0]['geometry']['type']
modelbuilder_settings['features'][0]['properties'] = resolution
modelbuilder_settings['features'][0]['geometry']['type'] = geom_type
modelbuilder_settings['features'][0]['geometry']['coordinates'] = catchment_json['features'][0]['geometry']['coordinates']
with open(modelbuilder_settings_path, 'w') as f:
geojson.dump(modelbuilder_settings, f)
logger.info('Modelbuilder settings file updated.')
# clearing modelbuilder log files
modelbuilder_log_1 = 'wtools_create_grid.log'
modelbuilder_log_2 = 'wtools_static_maps.log'
modelbuilder_log_1_path = os.path.join(modelbuilder_path, modelbuilder_log_1)
modelbuilder_log_2_path = os.path.join(modelbuilder_path, modelbuilder_log_2)
logger.info('Clearing modelbuilder log files...')
if os.path.exists(modelbuilder_log_1_path):
os.remove(modelbuilder_log_1_path)
if os.path.exists(modelbuilder_log_2_path):
os.remove(modelbuilder_log_2_path)
# run modelbuilder from command line
logger.info('Running modelbuilder...')
curr_work_dir = os.getcwd()
try:
os.chdir(modelbuilder_path)
logger.info('Changed working directory to ' + modelbuilder_path)
except OSError:
logger.fatal('Could not change working directory to ' + modelbuilder_path)
sys.exit()
if rivers_path != None:
if geom_type != 'Point':
modelbuilder_command = "python modelbuilder.py --geojson-path settings.json --name " + catchment_id + " --cellsize " + str(resolution) + " --river-path " + rivers_path + " --region-filter region"
else:
modelbuilder_command = "python modelbuilder.py --geojson-path settings.json --name " + catchment_id + " --cellsize " + str(resolution) + " --river-path " + rivers_path
else:
if geom_type != 'Point':
modelbuilder_command = "python modelbuilder.py --geojson-path settings.json --name " + catchment_id + " --cellsize " + str(resolution) + " --region-filter region"
else:
modelbuilder_command = "python modelbuilder.py --geojson-path settings.json --name " + catchment_id + " --cellsize " + str(resolution)
logger.info('Running the following in command line: ' + modelbuilder_command)
try:
subprocess.run(modelbuilder_command)
except:
logger.fatal('Modelbuilder command line operation failed!')
sys.exit()
# copy/overwrite all modelbuilder data to current folder
logger.info('Replacing outdated directories of '+ folder + ' with modelbuilder output...')
to_be_replaced_dirs = os.listdir(os.path.join(modelbuilder_path, catchment_id))
for temp_dir in to_be_replaced_dirs:
if os.path.isdir(os.path.join(modelbuilder_path, catchment_id, temp_dir)):
if os.path.exists(os.path.join(folder, temp_dir)):
logger.info("Deleting outdated '" + temp_dir + "' folder")
shutil.rmtree(os.path.join(folder, temp_dir))
logger.info("Copying modelbuilder '" + temp_dir + "' folder")
shutil.copytree(os.path.join(modelbuilder_path, catchment_id, temp_dir), os.path.join(folder, temp_dir))
else:
logger.info("Overwriting outdated '" + temp_dir + "' file")
shutil.copy(os.path.join(modelbuilder_path, catchment_id, temp_dir), os.path.join(folder, temp_dir))
logger.info('Deleting modelbuilder output...')
shutil.rmtree(os.path.join(modelbuilder_path, catchment_id))
# copy modelbuilder files to new folder
modelbuilder_folder = os.path.join(folder, 'modelbuilder')
if not os.path.exists(modelbuilder_folder):
os.makedirs(modelbuilder_folder, exist_ok=True)
else:
# clear folder if it already existed
try:
for temp_file in os.listdir(modelbuilder_folder):
os.remove(os.path.join(modelbuilder_folder, temp_file))
except OSError:
pass
logger.info('Copying modelbuilder settings and log files to ' + modelbuilder_folder)
shutil.copy(modelbuilder_settings_path, os.path.join(modelbuilder_folder, 'settings.json'))
shutil.copy(modelbuilder_log_1_path, os.path.join(modelbuilder_folder, modelbuilder_log_1))
shutil.copy(modelbuilder_log_2_path, os.path.join(modelbuilder_folder, modelbuilder_log_2))
# change working directory back to what is was before modelbuilder run
try:
os.chdir(curr_work_dir)
logger.info('Changed working directory back to ' + curr_work_dir)
except OSError:
logger.fatal('Could not change working directory back to ' + curr_work_dir)
sys.exit()
logger.info('Modelbuilder process finished. Continuing with setup procedure...')
def getWaterbodiesCatchment(catchment, waterbodies_path):
"""
Obtain waterbodies (i.e. lakes or reservoirs) within catchment.
Parameters:
catchment : [geopandas object] catchment bounds
waterbodies_path : [string] path to dataset for lakes or reservoirs
Output: waterbodies within catchment
"""
#get catchment boundaries
c = catchment.geometry.bounds
#load data
waterbodies = gpd.read_file(waterbodies_path)
#intersect catchment bounding box with reservoirs
s_idx = waterbodies.sindex
inds = list(s_idx.intersection((c.minx[0],c.miny[0],c.maxx[0],c.maxy[0])))
if len(inds) > 0:
#intersect actual catchment bounds with reservoirs
match_intersect = waterbodies.iloc[inds]
catch_intersect = gpd.overlay(catchment, match_intersect, how='intersection')
if catch_intersect.geometry.size == 0:
catch_intersect = gpd.GeoDataFrame([])
else:
catch_intersect = gpd.GeoDataFrame([])
return catch_intersect
def checkWaterbodies(type, catchment, waterbodies_path, min_area=0, logger=None):
"""
Checks if waterbodies (i.e. lakes or reservoirs) of sufficient size are present within catchment.
Parameters:
type: [string] 'lake' or 'reservoir'
catchment : [geopandas object] catchment bounds
waterbodies_path : [string] path to dataset for lakes or reservoirs
min_area : [float) minimum area of waterbodies to include
logger : [logging object] instance of Python logging module
Output: dictionary with waterbodies within catchment and boolean whether to execute processing
"""
#initialize output variables
catch_intersect = None
do_process = False
#get catchment boundaries
c = catchment.geometry.bounds
#load data
waterbodies = gpd.read_file(waterbodies_path)
#intersect catchment bounding box with water bodies
s_idx = waterbodies.sindex
inds = list(s_idx.intersection((c.minx[0],c.miny[0],c.maxx[0],c.maxy[0])))
if len(inds) > 0:
#intersect actual catchment bounds with water bodies
match_intersect = waterbodies.iloc[inds]
catch_intersect = gpd.overlay(catchment, match_intersect, how='intersection')
if catch_intersect.geometry.size > 0:
if min_area > 0:
#filter on area
catch_intersect = catch_intersect[catch_intersect['Lake_area']>=min_area]
if catch_intersect.geometry.size > 0:
do_process = True
else:
logger.warning('No ' + type + 's of sufficient size found within catchment! Skipping ' + type + ' procedures!')
else:
do_process = True
#update area value (because only parts of water bodies might fall within catchment)
if do_process:
proj = partial(pyproj.transform, pyproj.Proj(init='epsg:4326'), pyproj.Proj(init='epsg:3857'))
def updateArea(polygon, proj=proj):
return transform(proj, polygon).area / 1e6 # km2
catch_intersect['Lake_area'] = catch_intersect.apply(lambda row: updateArea(row['geometry']), axis=1)
else:
logger.warning('No ' + type + 's found within catchment! Skipping ' + type + ' procedures!')
else:
logger.warning('No ' + type + 's found within catchment! Skipping ' + type + ' procedures!')
return {'data': catch_intersect, 'do_process':do_process}
def processWaterbodies(type, do_process, waterbodies, out_intbl_dir, out_clone_dir, model_ini_path, res_range_min, res_range_max, res_method, debug_path, logger):
"""
Processing for waterbodies (i.e. lakes or reservoirs).
Parameters:
type : [string] 'lake' or 'reservoir'
do_process : [boolean] whether to process/add for specified type
out_intbl_dir : [string] path to where newly created intbl files will be placed
out_clone_dir : [string] path to where newly created map files will be placed
model_ini_path : [string] path to model ini file
res_range_min : [list|float] range (min,max) for reservoir minimum fractions
res_range_max : [list|float] range (min,max) for reservoir full fractions
res_method : [int] method of intbl calculation for reservoirs (0, 1 or 2)
debug_path : [string] path to where debug/setup information is stored
logger : [logging object] instance of Python logging module
Output: new intbl and map files (when do_process=True) as well as an adjusted model ini file
"""
if do_process:
logger.info('Setting up ' + type + ' intbls in ' + out_intbl_dir)
if type == 'reservoir':
setup_reservoir_intbl.make_tbls(waterbodies, out_intbl_dir, range_min=res_range_min, range_max=res_range_max, method=res_method, debug_path=debug_path, logger=logger)
elif type == 'lake':
setup_lake_intbl.make_tbls(waterbodies, out_intbl_dir, debug_path=debug_path, logger=logger)
logger.info('Finished ' + type + ' intbls setup')
logger.info('Setting up ' + type + ' maps in ' + out_clone_dir)
if type == 'reservoir':
setup_waterbody_maps.make_maps(waterbodies, False, out_clone_dir, "wflow_reservoirareas", "wflow_reservoirlocs", logger=logger)
elif type == 'lake':
setup_waterbody_maps.make_maps(waterbodies, True, out_clone_dir, "wflow_lakeareas", "wflow_lakelocs", logger=logger)
logger.info('Finished ' + type + ' maps setup')
logger.info('Adding ' + type + ' parameters to model ini file...')
replaceLinesIniFile(type+'s_add', None, model_ini_path, logger=logger)
else:
logger.info('Removing ' + type + ' parameters from model ini file...')
replaceLinesIniFile(type+'s_rem', None, model_ini_path, logger=logger)
def createMapFile(data, outdir, mapfile, map_meta, logger):
"""
Helper function to create PCRaster .map file by first creating a temporary GeoTIFF file and then converting that to desired output.
"""
#create temporary GeoTIFF file
logger.info('Staring process to write ' + os.path.join(outdir, mapfile + '.map'))
logger.info('Writing temporary GeoTIFF file...')
map_meta.update(compress='lzw') # add compression
map_meta['driver']='GTiff' # make sure driver is set to GeoTIFF
with rasterio.open(os.path.join(outdir, mapfile + '.tif'), 'w', **map_meta) as out:
out.write(data, 1)
out.close()
#convert temporary GeoTIFF file to PCRaster .map file
logger.info('Converting temporary GeoTIFF file to PCRaster map file...')
gdal.Translate(os.path.join(outdir, mapfile + '.map'), os.path.join(outdir, mapfile + '.tif'), options = '-of PCRaster')
#delete temporary GeoTIFF file (and .map.aux.xml file created during conversion)
logger.info('Deleting temporary GeoTIFF file and .map.aux.xml file that is created during conversion..')
if os.path.exists(os.path.join(outdir, mapfile + '.tif')):
os.remove(os.path.join(outdir, mapfile + '.tif'))
else:
logger.warning('Could not find temporary GeoTIFF file! Skipping removal.')
if os.path.exists(os.path.join(outdir, mapfile + '.map.aux.xml')):
os.remove(os.path.join(outdir, mapfile + '.map.aux.xml'))
else:
logger.warning('Could not find .map.aux.xml file created during conversion! Skipping removal.')
def updateRiverWidths(path, reservoirs, lakes, flag=-2, logger=None):
"""
Removes large river widths from river width map at locations of lakes and reservoirs.
Parameters:
path : [string] path to current catchment/case maps folder (i.e. directory of river width map)
reservoirs : [GeoDataFrame] reservoirs within catchment
lakse : [GeoDataFrame] lakes within catchment
flag : [int] value to assign to waterbody pixels in riverwidth map
logger : [logging object] instance of Python logging module
Output: updated wflow_riverwidth map.
"""
logger.info('Updating wflow_riverwidth.map by replacing lake/reservoir pixels with a value of ' + str(flag))
#read in current riverwidths map and obtain map metadata
riverwidth_map = rasterio.open(os.path.join(path, 'wflow_riverwidth.map'), dtype=np.uint)
riverwidths = riverwidth_map.read(1)
map_meta = riverwidth_map.meta.copy()
if map_meta['crs'] == None:
map_meta['crs'] = rasterio.crs.CRS.from_epsg(4326)
if not lakes.empty:
#rasterize lakes
lake_ids = lakes.Hylak_id
out_arr = pcr.pcr2numpy(pcr.readmap(os.path.join(path, 'wflow_subcatch.map')), map_meta['nodata'])
out_arr = (out_arr/out_arr)-1 #make sure default array contains zeros only
lake_shapes = ((geom,value) for geom, value in zip(lakes.geometry, lake_ids))
lakes_raster = rasterio.features.rasterize(shapes=lake_shapes, fill=0, out=out_arr, transform=map_meta['transform'], all_touched=True)
#flag riverwidths at lakes
riverwidths = np.where(lakes_raster > 0, flag, riverwidths)
if not reservoirs.empty:
#rasterize reservoirs
reservoir_ids = reservoirs.ID
out_arr = pcr.pcr2numpy(pcr.readmap(os.path.join(path, 'wflow_subcatch.map')), map_meta['nodata'])
out_arr = (out_arr/out_arr)-1 #make sure default array contains zeros only
reservoir_shapes = ((geom,value) for geom, value in zip(reservoirs.geometry, reservoir_ids))
reservoirs_raster = rasterio.features.rasterize(shapes=reservoir_shapes, fill=0, out=out_arr, transform=map_meta['transform'], all_touched=True)
#flag riverwidths at reservoirs
riverwidths = np.where(reservoirs_raster> 0 , flag, riverwidths)
#copy original map
logger.info('Copying old map to wflow_riverwidth_original.map')
shutil.copy(os.path.join(path, 'wflow_riverwidth.map'), os.path.join(path, 'wflow_riverwidth_original.map'))
#save new map
createMapFile(riverwidths, path, 'wflow_riverwidth_new', map_meta, logger)
#overwrite original map with updated values
logger.info('Saving new map as wflow_riverwidth.map')
riverwidth_map.close()
os.remove(os.path.join(path, 'wflow_riverwidth.map'))
shutil.copy(os.path.join(path, 'wflow_riverwidth_new.map'), os.path.join(path, 'wflow_riverwidth.map'))
os.remove(os.path.join(path, 'wflow_riverwidth_new.map'))
def checkStaticmaps(staticmaps_check, out_clone_dir, logger):
"""
Checks if all staticmaps specified in setup ini file were created.
Parameters:
staticmaps_check : [list|string] files that should be present in staticmaps folder
out_clone_dir : [string] path to staticmaps folder
logger : [logging object] instance of Python logging module
Output: info in log (and can potentially delete some files that are deemed irrelevant)
"""
if staticmaps_check != None:
for temp_file in staticmaps_check:
if not os.path.exists(os.path.join(out_clone_dir, temp_file + '.map')):
logger.warning(temp_file + ' map from setup ini file was not created!')
# remove staticmaps that are no longer in use / not relevant
logger.info('Cleaning up staticmaps folder...')
for temp_file in os.listdir(out_clone_dir):
if temp_file != 'clim' and temp_file.split('.map')[0] not in staticmaps_check:
temp_path = os.path.join(out_clone_dir, temp_file)
logger.warning("Deleting unexpected/outdated file " + temp_path)
os.remove(temp_path)
def checkIntbl(intbls_check, out_intbl_dir, logger):
"""
Checks if all intbl's specified in setup ini file were created.
Parameters:
intbls_check : [list|string] files that should be present in intbl folder
out_intbl_dir : [string] path to intbl folder
logger : [logging object] instance of Python logging module
Output: info in log
"""
if intbls_check != None:
for temp_file in intbls_check:
if not os.path.exists(os.path.join(out_intbl_dir, temp_file + '.tbl')):
logger.error(temp_file + ' intbl from setup ini file not found as template intbl! Could not copy!')
def relocateStation(lat, lon, area, uparea_dataset, nodata=-9999, point_buffer_val=0.04, point_buffer_style=3, area_margin=0.5, digits_new_coords=4):
"""
Relocates gauging station based on upstream area.
Parameters:
lat : [float] latitude of gauging station location
lon : [float] longitude of gauging station location
uparea_dataset : [array] 2D array of upstream area (e.g. file loaded with rasterio)
nodata : [int|float] no data / missing value in upstream area dataset
point_buffer_style : [int] type of buffer around point (1=circle [default in function], 3=square [default here])
point_buffer_val : [float] search radius (buffer around lat/lon point)
area_margin : [float] margin for matching upstream area (0.5 = between 50% lower or higher)
digits_new_coords : [int] number of digits to store for output, i.e. new lat/lon coordinate
Output: relocated gauging station (new lat/lon coordinates)
"""
# get data around current location
data = rasterio.mask.mask(uparea_dataset, [Point(lon, lat).buffer(point_buffer_val, cap_style=point_buffer_style)], crop=True, all_touched=True)
# get data mask
mask_missings = data[0][0]==nodata#map_meta['nodata']
mask_margin = (data[0][0]>=area*(1-area_margin)) & (data[0][0]<=area*(1+area_margin))
mask_final = mask_missings | ~mask_margin
# check if location can be relocated
if not mask_final.all():
# mask data
data_masked = np.ma.masked_where(mask_final, data[0][0])
# get distances from current location
x_dist = np.arange(data_masked.shape[1])-data_masked.shape[1]/2+np.mod(data_masked.shape[1],2)/2
y_dist = data_masked.shape[0]/2-np.mod(data_masked.shape[0],2)/2-np.arange(data_masked.shape[0])
x_dist,y_dist = np.meshgrid(x_dist,y_dist)
dists = np.rint(np.sqrt(x_dist**2+y_dist**2)).astype(np.int)
# find location that fullfills criteria of exceedence and has minimum distance to source
ind_lat, ind_lon = np.unravel_index(np.ma.masked_where(mask_final, dists).argmin(), data[0][0].shape)
# construct coordinates of new location from indices
new_lat = np.round(data[1][5] + (ind_lat + 0.5) * data[1][4], digits_new_coords)
new_lon = np.round(data[1][2] + (ind_lon + 0.5) * data[1][0], digits_new_coords)
return Point(new_lon, new_lat)
else:
return np.NaN
def getabspath(path, root):
"return absolute path if relative path given"
if not os.path.isabs(path):
path = os.path.normpath(os.path.join(root, path))
return path
def makeSingleIniClones(ini_file):
"""
Does all setup processing for a single ini file. This can be for a single catchment, or for all catchments in a certain folder.
Parameters:
ini_file : [string] ini file with relevant configuration
Output: A lot of files created, deleted and/or changed. Returns logging statistics to be used in top level log file.
"""
# read the ini-file
root = os.path.dirname(os.path.abspath(ini_file))
script_root = os.path.dirname(os.path.realpath(__file__))
config = cp.ConfigParser()
try:
config.read(ini_file)
except:
sys.exit("ERROR: Not possible to open 'ini'- file.")
if config.has_section("STRUCTURE"):
directory_topo = getabspath(check_key(config["STRUCTURE"],"input_topo", r"p:/wflow_global/static_data/base/hydro_merit"), root)
directory_in = getabspath(check_key(config["STRUCTURE"],"input_other", r"p:/wflow_global"), root)
directory_out = getabspath(check_key(config["STRUCTURE"],"output", "output"), root)
paramsfolder = check_key(config["STRUCTURE"],"parameters", "static_data")
settingsfile = check_key(config["STRUCTURE"],"file", "settings_scaling.csv")
intblfolder = getabspath(check_key(config["STRUCTURE"],"intbl", r"p:/wflow_global/static_data/wflow_sbm_parameters/intbl_template"), root)
reservoirs_path = check_key(config["STRUCTURE"],"reservoirs", r"p:/wflow_global/static_data/base/waterbodies/reservoir-db.gpkg")
lakes_path = check_key(config["STRUCTURE"],"lakes", r"p:/wflow_global/static_data/base/waterbodies/lake-db.gpkg")
catchmentsfolder = check_key(config["STRUCTURE"],"catchments", r"data/catchments/catchments.geojson")
riversfolder = check_key(config["STRUCTURE"],"rivers", r"data/rivers/rivers.geojson")
modelbuilder_path = check_key(config["STRUCTURE"],"modelbuilder", "modelbuilder")
path = check_key(config["STRUCTURE"],"path", "staticmaps")
clonefile = check_key(config["STRUCTURE"],"clone", "wflow_dem.map")
clonefolder = check_key(config["STRUCTURE"],"clonefolder", "*/")
discharges_path = getabspath(check_key(config["STRUCTURE"],"discharges", r"p:/wflow_global/static_data/mean_discharge_1k/FLO1K.ts.1960.2015.qav.nc"), root)
path_grdc_stations = check_key(config["STRUCTURE"],"path_grdc_stations", r"p:/wflow_global/static_data/gauging_stations/grdc_stations.xlsx")
model_ini = check_key(config["STRUCTURE"],"model_ini", "wflow_sbm")
setup_folder = check_key(config["STRUCTURE"],"setup_info", "setup")
if config.has_section("CONTROLS"):
do_modelbuilder = bool(int(check_key(config["CONTROLS"],"do_modelbuilder", 0)))
use_current_rivers = bool(int(check_key(config["CONTROLS"],"use_current_rivers", 0)))
use_merit_derived = bool(int(check_key(config["CONTROLS"],"use_merit_derived", 1)))
use_pyflwdir_point = bool(int(check_key(config["CONTROLS"],"use_pyflwdir_point", 0)))
upstream_from_point = bool(int(check_key(config["CONTROLS"],"upstream_from_point", 0)))
min_stream_order = int(int(check_key(config["CONTROLS"],"min_stream_order", 6)))
get_custom_widths = bool(int(check_key(config["CONTROLS"],"get_custom_widths", 1)))
do_lakes = bool(int(check_key(config["CONTROLS"],"do_lakes", 0)))
do_reservoirs = bool(int(check_key(config["CONTROLS"],"do_reservoirs", 0)))
debug_discharge = bool(int(check_key(config["CONTROLS"],"debug_discharge", 1)))
template_ini = bool(int(check_key(config["CONTROLS"],"template_ini", 1)))
save_pyflwdir = bool(int(check_key(config["CONTROLS"],"save_pyflwdir", 0)))
get_pyflwdir_riv = bool(int(check_key(config["CONTROLS"],"get_pyflwdir_riv", 0)))
interp_soilthick = bool(int(check_key(config["CONTROLS"],"interp_soilthick", 1)))
get_grdc_gauges = bool(int(check_key(config["CONTROLS"],"grdc_gauges", 1)))
if config.has_section("PARS"):
resolution = float(check_key(config["PARS"],"resolution", 0.008333333333333333))
alpha = check_key(config["PARS"],"alpha", 60)
M_method = int(check_key(config["PARS"],"M_method", 2))
M_minmax = int(check_key(config["PARS"],"M_minmax", 100000))
riv_upa = float(check_key(config["PARS"],"pyflwdir_riv_upa", 30.))
smooth_len = float(check_key(config["PARS"],"pyflwdir_smooth_len", 1e4))
ucat_ratio = int(check_key(config["PARS"],"pyflwdir_ucat_ratio", 10))
res_min_area = float(check_key(config["PARS"],"res_min_area_km2", 0))
lake_min_area = float(check_key(config["PARS"],"lake_min_area_km2", 3))
res_intbl_method = int(check_key(config["PARS"],"res_intbl_method", 1))
res_minfrac_min = float(check_key(config["PARS"],"res_minfrac_min", 0.0))
res_minfrac_max = float(check_key(config["PARS"],"res_minfrac_max", 0.9))
res_fullfrac_min = float(check_key(config["PARS"],"res_fullfrac_min", 0.1))
res_fullfrac_max = float(check_key(config["PARS"],"res_fullfrac_max", 1.0))
if config.has_section("FILES"):
intbls_check = check_key(config["FILES"],"tbls", None)
intbls_check = intbls_check.split('\n')
staticmaps_check = check_key(config["FILES"],"maps", None)
staticmaps_check = staticmaps_check.split('\n')
if config.has_section("TESTING"):
tests = check_key(config["TESTING"],"do_test", None)
else:
tests = None
# tests
if tests != None:
tests = tests.split(',')
else:
tests = [tests]
# paths
clones = glob.glob(os.path.join(directory_out, clonefolder))
parameters_path = os.path.join(directory_in, paramsfolder)
#settings_path = os.path.join(directory_out, settingsfile)
settings_path = os.path.join(script_root, settingsfile)
# variable for storing information on each individual catchment
log_stats = []
# quick checks before going into loop
if use_merit_derived and do_modelbuilder:
sys.exit("ERROR: Running modelbuilder while using MERIT derived data! This will cause MERIT derived data to be overwritten with modelbuilder results, which makes it impossible to use MERIT derived data! Please choose one of the two, and note that it is advised to use MERIT derived data when possible.")
if get_custom_widths and not use_merit_derived:
sys.exit("ERROR: Custom river widths can only be obtained when using MERIT derived data! Please adjust setup ini file!")
if get_grdc_gauges and not use_merit_derived:
sys.exit("ERROR: GRDC stations for gauges map can only be used when also using MERIT derived data! Please adjust setup ini file!")
if use_merit_derived:
if not os.path.exists(directory_topo):
sys.exit("ERROR: Path to topographic base data does not exist! Check setup ini file and/or topographic input directory!")
if not os.path.exists(parameters_path):
sys.exit("ERROR: Path to parameter base data does not exist! Check setup ini file and/or input directory!")
if len(clones) == 0:
sys.exit("ERROR: Folder(s) where model should be set up do not exist! Check setup ini file and/or output directory!")
# loop over each catchment
for folder in clones:
# catchment
basin_id = -1
catchment_id = os.path.basename(os.path.normpath(folder))
catchments_path = os.path.join(folder, catchmentsfolder)
if not os.path.exists(os.path.split(catchments_path)[0]):
os.makedirs(os.path.split(catchments_path)[0], exist_ok=True)
model_ini_path = os.path.join(folder, model_ini + '.ini')
# create setup/debug folder
setup_path = os.path.join(folder, setup_folder)
if not os.path.exists(setup_path):
os.makedirs(setup_path, exist_ok=True)
else:
# clear folder if it already existed
try:
for temp_file in os.listdir(setup_path):
os.remove(os.path.join(setup_path, temp_file))
except OSError:
pass
# get logger
logger = setup_logging.setupLogging(setup_path)
logger.info('Starting setup procedure of catchment/case ' + catchment_id + ' (' + ini_file + ')')
logger.info('Running modelbuilder: ' + str(do_modelbuilder))
logger.info('Using MERIT derived data: ' + str(use_merit_derived))
logger.info('Deriving custom river widths: ' + str(get_custom_widths))
logger.info('Using GRDC stations for gauges: ' + str(get_grdc_gauges))
logger.info('Adding reservoirs: ' + str(do_reservoirs))
logger.info('Adding lakes: ' + str(do_lakes))
if not None in tests:
logger.warning(" <<< RUNNING SCRIPTS IN TEST MODE! >>> ")
if len(tests) > 1:
logger.info('Test parameters:')
for test_subject in tests:
logger.info(' - ' + test_subject)
else:
logger.info('Test parameter: ' + tests[0])
# modelbuilder
if do_modelbuilder:
# check potential input/paths
logger.info('Starting modelbuilder process...')
if not os.path.exists(modelbuilder_path):
sys.exit('ERROR: Path to modelbuilder could not be found! Check path in ini file!')
if use_current_rivers:
rivers_path = os.path.join(folder, riversfolder)
logger.info('Using existing rivers from ' + rivers_path)
if not os.path.exists(rivers_path):
logger.fatal('Could not find existing rivers!')
sys.exit()
else:
rivers_path = None
# run modelbuilder
runModelbuilder(folder, modelbuilder_path, catchments_path, rivers_path, logger)
# check on paths which are vital for setup
do_proceed = True
out_clone_dir = os.path.join(folder, path)
if not os.path.exists(out_clone_dir):
try:
os.mkdir(out_clone_dir)
except:
logger.fatal('Folder with/for maps does not exist and could not be created! Cannot set up maps! (' + out_clone_dir + ')')
do_proceed = False
if not os.path.exists(intblfolder):
logger.fatal('intbl template folder does not exists! Cannot copy files! (' + intblfolder + ')')
do_proceed = False
# get hydro MERIT basin ID and bounding box (if this is to be used)
if use_merit_derived:
if not use_pyflwdir_point:
xy2 = None
# get basin ID
if basin_id == -1:
if catchment_id.isnumeric():
logger.info('Reading hydro MERIT catchment ID from model folder name: ' + catchment_id)
basin_id = int(catchment_id) # model folder is basin id
else:
try:
check_files = glob.glob(os.path.join(folder, 'data', 'catchments', '*')) + glob.glob(os.path.join(folder, '*.basinid'))
for temp_file in check_files:
basin_id_str = os.path.basename(temp_file).split('.')[0]
if basin_id_str.isnumeric():
logger.info(f'Reading hydro MERIT catchment ID from file in catchments folder: {basin_id_str}')
basin_id = int(basin_id_str) # file in catchments folder is basin id
break
except:
pass
# check if basin ID can be found in CSV files
if basin_id != -1:
try:
# find relevant CSV file
pfaf_id = basin_id // 10**7
fn_outlets = os.path.join(directory_topo, f'pfaf{pfaf_id:02d}_outlets.csv')
merit_basin_lookup = pd.read_csv(fn_outlets, index_col='pfaf3')
# get basin bounding box listed in CSV
bbox = merit_basin_lookup.loc[basin_id, ['xmin', 'ymin', 'xmax', 'ymax']].values
except:
basin_id = -1
if basin_id == -1:
logger.fatal('Could not find correct hydro MERIT catchment ID! Cannot execute required setup for this catchment/case!')
do_proceed = False
else:
# get xy (lon,lat) point
try:
for temp_file in os.listdir(os.path.join(folder, 'data', 'catchments')) + glob.glob(os.path.join(folder, '*.xy')):
if temp_file != 'catchments.geojson':
# check if file contains xy coords tuple
fn_temp_file = getabspath(temp_file, os.path.join(folder, 'data', 'catchments'))
with open(fn_temp_file, 'r') as f:
xy = tuple(float(s) for s in f.readline().strip().split(','))
logger.info(f'Getting hydro MERIT catchment from point (x: {xy[0]}, y:{xy[1]})')
# get basin bounding box and ID from point
bbox, basin_id, xy2 = get_merit_basin_bbox(xy, directory_topo, upstream_from_point=upstream_from_point, min_sto=min_stream_order)
if not upstream_from_point:
logger.info(f'Hydro-MERIT catchment ID at point (x: {xy[0]}, y:{xy[1]}): {basin_id}')
xy2 = None # this is important to not add a pit at the point location later
else:
logger.info(f'Point snapped to (x: {xy2[0]:.5f}, y:{xy2[1]:5f}) based on a minimum required stream order of {min_stream_order}')
break
except:
logger.fatal('Could not find correct hydro MERIT catchment from point location! Cannot execute required setup for this catchment/case!')
do_proceed = False
# reservoir and lakes ini; moved forward incase do_proceed == False
res_catch_intersect = None
do_reservoirs_catchment = False
lakes_catch_intersect = None
do_lakes_catchment = False
# start of actual processing
if do_proceed:
if use_merit_derived:
# get topographic base data from hydro MERIT
logger.info('Starting pyflwdir processing...')
# get scale ratio from MERIT native resolution and desired model resolution
res = 1/1200. # hydro MERIT data has 3 arcsec resolution
scale_ratio = resolution / res
if scale_ratio.is_integer():
scale_ratio = int(scale_ratio)
logger.info('Scale ratio (model/MERIT) is ' + str(scale_ratio))
else:
logger.warning('Scale ratio (model/MERIT) is not an integer! (' + str(scale_ratio) + ')')
scale_ratio = round(scale_ratio)
logger.warning('Rounding scale ratio to ' + str(scale_ratio))
resolution = res * scale_ratio
logger.warning('Model resolution changed to ' + str(resolution))
# upscale flow direction and get subgrid basins (and update model resolution if required)
logger.info('Upscaling hydro MERIT data...')
upscale_merit_basin(scale_ratio, bbox, basin_id, directory_topo, folder, xy=xy2, logger=logger)
# use flow direction network to derive relevant topographic maps
logger.info('Obtaining topographic maps from hydro MERIT data...')
network_merit_basin(folder, smooth_len=smooth_len, ucat_ratio=ucat_ratio, riv_shape=get_pyflwdir_riv, basin_shape=True, logger=logger)
# perform simple resampling to model resolution of slope and elevation maps
logger.info('Resampling slope and elevation maps to model resolution...')
resample_merit_basin(directory_topo, folder)
# write wflow topographic static maps based on just created GTiff maps
logger.info('Writing wflow topographic staticmaps...')
wflow_topomaps(folder, riv_upa=riv_upa, logger=logger)
# move catchments geojson to its expected locations
logger.info('Copying catchments.geojson derived from hydro MERIT to ' + catchments_path)
shutil.copy(os.path.join(folder, 'catchments.geojson'), catchments_path)
os.remove(os.path.join(folder, 'catchments.geojson'))
# move other data created with pyflwdir to dedicated folder (or remove if specified in setup ini)
try:
shutil.rmtree(os.path.join(folder, 'pyflwdir'))
logger.info('Clearing any previously stored pyflwdir files...')
except:
logger.warning('Could not clear previously stored pyflwdir files!')
if save_pyflwdir:
logger.info('Moving data created with pyflwdir to ' + os.path.join(folder, 'pyflwdir'))
try:
os.mkdir(os.path.join(folder, 'pyflwdir'))
except:
pass
else:
logger.info('Removing all other data now created with pyflwdir...')
for file_or_dir in os.listdir(folder):
if os.path.isfile(os.path.join(folder, file_or_dir)):
if not file_or_dir.split('.')[-1] in ['txt', 'ini']:
if save_pyflwdir:
try:
shutil.copy(os.path.join(folder, file_or_dir), os.path.join(folder, 'pyflwdir', file_or_dir))
except:
logger.warning('Could not copy ' + file_or_dir)
try:
os.remove(os.path.join(folder, file_or_dir))
except:
logger.warning('Could not delete ' + file_or_dir)
else:
if file_or_dir in ['flwdir', 'river', 'upadff_1perc']:
if save_pyflwdir:
try:
os.mkdir(os.path.join(folder, 'pyflwdir', file_or_dir))
except:
pass
for file_or_dir_2 in os.listdir(os.path.join(folder, file_or_dir)):
if os.path.isfile(os.path.join(folder, file_or_dir, file_or_dir_2)):
try:
shutil.copy(os.path.join(folder, file_or_dir, file_or_dir_2), os.path.join(folder, 'pyflwdir', file_or_dir, file_or_dir_2))
except:
logger.warning('Could not copy ' + file_or_dir_2)
try:
shutil.rmtree(os.path.join(folder, file_or_dir))
except:
logger.warning('Could not delete ' + file_or_dir)
logger.info('Finished pyflwdir processing.')
if get_custom_widths:
logger.info('Overwriting MERIT Hydro riverwidths with custom riverwidths...')
setup_river_widths.createWidthsMap(out_clone_dir, path_grdc_stations, os.path.join(parameters_path, 'riverwidth'), debug_path=setup_path, logger=logger)
# read catchment geojson file, check if valid, fix if not
catchment = gpd.read_file(catchments_path)
if catchment.geometry.is_valid.all():
pass
else:
logger.warning('Catchment is not valid, probably due to self-intersection point(s)! Fixing...')
logger.info('Copying current catchment file to ' + setup_path)
shutil.copy(catchments_path, os.path.join(setup_path, 'catchments_original.geojson'))
# changing geometry type, saving to new file
changeGeoJSONgeomType(setup_path, logger=logger)
# read in new catchments geojson file and check if valid
catchment = gpd.read_file(os.path.join(setup_path, 'catchments_v2.geojson'))
if catchment.geometry.is_valid.all():
logger.info('Catchment is now valid. Converting from MultiLineStrings back to MultiPolygon(s).')
for catch_i in range(len(catchment.geometry)):
temp_polys = []
for catch_j in range(len(catchment.geometry[catch_i])):
temp_coords = catchment.geometry[catch_i][catch_j].coords
if temp_coords[0] == temp_coords[-1]:
temp_polys.append(Polygon(temp_coords))
else:
logger.error('First and last coordinate are not the same for feature '+ str((catch_i+1)*(catch_j+1)) + '! Cannot create valid closed polygon!')
temp_geom = MultiPolygon(temp_polys)
catchment.geometry[catch_i] = temp_geom
else:
logger.error('Catchment still not valid! Proceeding with setup but good results cannot be guaranteerd! Check lakes and reservoirs when finished!')
# get reservoirs of sufficient size within catchment
if do_reservoirs:
reservoirs_check = checkWaterbodies('reservoir', catchment, reservoirs_path, min_area=res_min_area, logger=logger)
res_catch_intersect = reservoirs_check['data']
do_reservoirs_catchment = reservoirs_check['do_process']
# get lakes of sufficient size within catchment
if do_lakes:
lakes_check = checkWaterbodies('lake', catchment, lakes_path, min_area=lake_min_area, logger=logger)
lakes_catch_intersect = lakes_check['data']
do_lakes_catchment = lakes_check['do_process']
# gauging stations
if get_grdc_gauges:
logger.info('Using GRDC stations to construct wflow_gauges.map file...')
if use_merit_derived:
uparea_src = rasterio.open(os.path.join(out_clone_dir, 'wflow_uparea.map'), 'r')
#uparea_data = uparea_src.read(1)
uparea_meta = uparea_src.meta
# read GRDC stations as DataFrame from Excel file
logger.info('Reading GRDC stations from ' + path_grdc_stations)
gauging_stations = pd.read_excel(path_grdc_stations, na_values='n.a.')
logger.info("Removing stations with no valid 'area' value...")
gauging_stations = gauging_stations.loc[~gauging_stations['area'].isnull()]
gauging_stations = gauging_stations.loc[gauging_stations['area']>0]
# convert to GeoDataFrame
gauging_stations = gpd.GeoDataFrame(gauging_stations, geometry=[Point(x, y) for x, y in zip(gauging_stations.long, gauging_stations.lat)])
# get only stations within catchment
gauging_stations = gauging_stations.loc[~gpd.tools.sjoin(gauging_stations, catchment, how='left')['index_right'].isnull()]
logger.info('Number of potentially valid GRDC stations found within catchment: ' + str(len(gauging_stations)))
# check if there are any stations to proceed
if not gauging_stations.empty:
# relocate stations
logger.info('Relocating GRDC stations so that they are located on model river cells...')
#relocateStation(lat, lon, area, uparea_dataset, nodata=-9999, point_buffer_val=0.04, point_buffer_style=3, area_margin=0.5, digits_new_coords=4)
gauging_stations['geometry'] = gauging_stations.apply(lambda row, uparea_src=uparea_src, uparea_meta=uparea_meta: relocateStation(row['lat'], row['long'], row['area'], uparea_src, uparea_meta['nodata']), axis=1)
# remove stations that could not be relocated (i.e. no upstream area within allowed margin found within window around base location)
gauging_stations = gauging_stations.loc[~gauging_stations['geometry'].isnull()]
logger.info('Number of GRDC stations left after relocating: ' + str(len(gauging_stations)))
# remove stations located in waterbodies
logger.warning('Step to remove GRDC stations located within lakes or reservoirs has not been implemented yet! Check if wflow_gauges.map is correct!')
# convert to map
out_arr = pcr.pcr2numpy(pcr.readmap(os.path.join(out_clone_dir, 'wflow_subcatch.map')), uparea_meta['nodata'])
out_arr = (out_arr/out_arr)-1 #make sure default array contains zeros only
shapes = ((geom,value) for geom, value in zip(gauging_stations.geometry, gauging_stations['grdc_no']))
gauges_arr = rasterio.features.rasterize(shapes=shapes, fill=0, out=out_arr, transform=uparea_meta['transform'], all_touched=False)
gauges_arr[gauges_arr==0] = uparea_meta['nodata'] # prevent zeros in output map
createMapFile(gauges_arr, out_clone_dir, 'wflow_gauges', uparea_meta, logger)
logger.info('New wflow_gauges.map created from GRDC stations.')
else:
logger.warning('No valid GRDC stations found within catchment, skipping this procedure!')
else:
logger.warning('Creation of wflow_gauges.map from GRDC stations only implemented with hydroMERIT! Cannot be executed with current setup!')
# catchment/model ini file (template)
if template_ini:
if model_ini.endswith('.ini'):
template_ini_path = getabspath(model_ini, root)
else:
template_ini_path = os.path.join(script_root, model_ini+'.ini')
logger.info('Overwriting model ini file with template from ' + template_ini_path)
if os.path.exists(template_ini_path):
if os.path.exists(model_ini_path):
os.remove(model_ini_path)
shutil.copy(template_ini_path, model_ini_path)
else:
logger.error('Could not find specified template ini file!')
# staticmaps
if not 'no_generic' in tests:
logger.info('Setting up maps in ' + out_clone_dir)
make_clone(resolution, settings_path, interp_soilthick, M_method, M_minmax, parameters_path, out_clone_dir, clonefile, logger)
else:
logger.info('TEST MODE: skipping creation of new generic map files.')
if not use_merit_derived:
if not 'no_slope' in tests:
get_slope(path_geojson=os.path.join(modelbuilder_path, 'settings.json'), path_model_abs=folder, path_DEM_rel=os.path.join('data', 'dem'), dst_res=0.005, region_filter="region", do_delete_temp=True, logger=logger)
else:
logger.info('TEST MODE: skipping creation of new upscaled slope map.')
# check if all staticmaps specified in setup ini file were created
if not 'no_generic' in tests:
checkStaticmaps(staticmaps_check, out_clone_dir, logger)
logger.info('Finished maps setup')
# update riverwidth map using all lakes and reservoirs within catchment (no restrictions on size)
if use_merit_derived and not get_custom_widths and not 'no_rivwidth' in tests:
reservoirs_all = getWaterbodiesCatchment(catchment, reservoirs_path)
lakes_all = getWaterbodiesCatchment(catchment, lakes_path)
updateRiverWidths(out_clone_dir, reservoirs_all, lakes_all, flag=-2, logger=logger)
# intbl's
out_intbl_dir = os.path.join(folder, 'intbl')
if not os.path.exists(out_intbl_dir):
os.makedirs(out_intbl_dir, exist_ok=True)
if not 'no_intbl' in tests:
logger.info("Clearing contents of intbl folder...")
if os.path.exists(out_intbl_dir):
for temp_file in os.listdir(out_intbl_dir):
os.remove(os.path.join(out_intbl_dir, temp_file))
logger.info("Copying template intbl's to intbl folder...")
for temp_file in os.listdir(intblfolder):
if temp_file.split('.tbl')[0] not in intbls_check:
logger.warning('Template intbl copied to intbl folder was not listed in setup ini file: ' + temp_file.split('.tbl')[0])
shutil.copy(os.path.join(intblfolder, temp_file), os.path.join(out_intbl_dir, temp_file))
# check if all intbl specified in setup ini file were created
checkIntbl(intbls_check, out_intbl_dir, logger)
logger.info('Finished intbl setup')
else:
logger.info("TEST MODE: skipping creation of new intbl's.")
# reservoirs/lakes
processWaterbodies('reservoir', do_reservoirs_catchment, res_catch_intersect, out_intbl_dir, out_clone_dir, model_ini_path, [res_minfrac_min,res_minfrac_max], [res_fullfrac_min,res_fullfrac_max], res_intbl_method, setup_path, logger)
processWaterbodies('lake', do_lakes_catchment, lakes_catch_intersect, out_intbl_dir, out_clone_dir, model_ini_path, None, None, None, setup_path, logger)
# catchment/model ini file
if not 'no_discharge' in tests:
logger.info('Calculating AnnualDischarge...')
if os.path.exists(discharges_path):
annualDischarge = flo1k.getAnnualDischarge(discharges_path, catchment, debug_discharge, debug_path=setup_path, logger=logger)
else:
logger.error('Path to discharge dataset is invalid! Could not calculate AnnualDischarge!')
else:
annualDischarge = 2290 # default value for testing, can be used to suppress slow calculation from data
logger.info('TEST MODE: skipping calculation of AnnualDischarge, using pre-set default value instead.')
logger.info('AnnualDischarge is ' + str(annualDischarge) + ' m3/s')
replaceLinesIniFile(['AnnualDischarge', 'Alpha'], [annualDischarge, alpha], model_ini_path, logger=logger)
# clear outdated folders
logger.info('Clearing outdated folders...')
outdated_folders = ['outstate']
for outdated_folder in outdated_folders:
outdated_path = os.path.join(folder, outdated_folder)
if os.path.exists(outdated_path) and os.path.isdir(outdated_path):
logger.info('Clearing ' + outdated_path)
try:
for outdated_file in os.listdir(outdated_path):
os.remove(os.path.join(outdated_path, outdated_file))
except:
logger.error('Could not clear file(s) from ' + outdated_path)
# copy relevant files to setup/debug folder
logger.info('Copying files (settings and scripts) used in setup procedure to ' + setup_path)
list_scripts = [sys.argv[0], ini_file, 'catchment_FLO1K.py', 'setup_logging.py']
if not use_merit_derived:
list_scripts.extend(['upscaled_slope.py'])
if (do_proceed) and (do_reservoirs_catchment or do_lakes_catchment):
list_scripts.extend(['waterbodies.py', 'wflow_lake_intbl.py', 'wflow_reservoir_intbl.py', 'reservoir_intbl_utils.py'])
copyFiles(setup_path, [settings_path], script_root, list_scripts, logger)
# ReadMe file
logger.info('Writing ReadMe file in ' + folder)
createReadMe(folder, setup_folder)
# check log stats
logger_stats = setup_logging.getStats(logger)
# add log stats with formatting
log_stats.append("Catchment ID: " + catchment_id + "\n")
log_stats.append("Warnings: " + str(logger_stats['warn']) + "\n")
log_stats.append("Errors: " + str(logger_stats['err']) + "\n")
log_stats.append("Criticals: " + str(logger_stats['crit']) + "\n\n")
# end of setup for this catchment/case
logger.info('Finished setup procedure of ' + catchment_id + ' (' + ini_file + ')')
setup_logging.closeLogging()
print('')
return log_stats
def make_clones(ini_files):
# initialize top level logging variable
top_lvl_log = ["This file contains collected logging statistics of each individual catchment.\n",
"Detailed log files for each catchment can be found in their respective directories.\n\n",
"Warning = Potential issue encountered, but a workaround was used instead. Good to check.\n",
"Error = Potential issue encountered without any workarounds. Should be checked.\n",
"Critical = Critical issue(s) that prevented processing of this catchment.\n\n",
"Timestamp: "+ datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\n\n"]
# run setup function for each ini file, returning top level logging information for each
print('')
if len(ini_files) == 1:
top_lvl_log.append("Ini file: " + ini_files[0] + "\n\n")
top_lvl_log.append(makeSingleIniClones(ini_files[0]))
elif len(ini_files) > 1:
for ini_file in ini_files:
top_lvl_log.append("Ini file: " + ini_file + "\n\n")
top_lvl_log.append(makeSingleIniClones(ini_file))
else:
sys.exit('ERROR: Please provide an ini file argument!')
# write top level logging information to file
log_path = os.path.join(os.getcwd(), 'LOG.txt')
print('Writing top level log file to ' + log_path)
with open(log_path, 'w') as log:
for line in top_lvl_log:
if isinstance(line, str):
log.write(line)
else:
for line2 in line:
log.write(line2)
if __name__ == "__main__":
make_clones(sys.argv[1:]) | [
"configparser.ConfigParser",
"pandas.read_csv",
"numpy.log",
"shapely.geometry.box",
"numpy.column_stack",
"shapely.geometry.Polygon",
"geopandas.overlay",
"sys.exit",
"numpy.sin",
"merit.merit_model_data.upscale_merit_basin",
"pandas.read_excel",
"numpy.arange",
"setup_logging.getStats",
... | [((1818, 1897), 'scipy.ndimage.distance_transform_edt', 'nd.distance_transform_edt', (['invalid'], {'return_distances': '(False)', 'return_indices': '(True)'}), '(invalid, return_distances=False, return_indices=True)\n', (1843, 1897), True, 'from scipy import ndimage as nd\n'), ((2578, 2602), 'numpy.radians', 'np.radians', (['coords[:, 0]'], {}), '(coords[:, 0])\n', (2588, 2602), True, 'import numpy as np\n'), ((2616, 2640), 'numpy.radians', 'np.radians', (['coords[:, 1]'], {}), '(coords[:, 1])\n', (2626, 2640), True, 'import numpy as np\n'), ((2895, 2921), 'numpy.column_stack', 'np.column_stack', (['(x, y, z)'], {}), '((x, y, z))\n', (2910, 2921), True, 'import numpy as np\n'), ((3310, 3591), 'rasterio.warp.reproject', 'rasterio.warp.reproject', ([], {'source': 'src', 'destination': 'arr', 'src_crs': "src_profile['crs']", 'src_nodata': "src_profile['nodata']", 'src_transform': "src_profile['transform']", 'dst_transform': "dst_profile['transform']", 'dst_crs': "dst_profile['crs']", 'resampling': 'resampling', 'num_threads': 'threads'}), "(source=src, destination=arr, src_crs=src_profile[\n 'crs'], src_nodata=src_profile['nodata'], src_transform=src_profile[\n 'transform'], dst_transform=dst_profile['transform'], dst_crs=\n dst_profile['crs'], resampling=resampling, num_threads=threads)\n", (3333, 3591), False, 'import rasterio\n'), ((7146, 7168), 'numpy.zeros', 'np.zeros', (['ks_[0].shape'], {}), '(ks_[0].shape)\n', (7154, 7168), True, 'import numpy as np\n'), ((7187, 7219), 'os.path.join', 'os.path.join', (['directory_out', '"""M"""'], {}), "(directory_out, 'M')\n", (7199, 7219), False, 'import os\n'), ((7234, 7272), 'os.path.join', 'os.path.join', (['directory_out', '"""KsatVer"""'], {}), "(directory_out, 'KsatVer')\n", (7246, 7272), False, 'import os\n'), ((9581, 9602), 'pandas.read_csv', 'pd.read_csv', (['settings'], {}), '(settings)\n', (9592, 9602), True, 'import pandas as pd\n'), ((9625, 9663), 'os.path.join', 'os.path.join', (['directory_out', 'clonefile'], {}), '(directory_out, clonefile)\n', (9637, 9663), False, 'import os\n'), ((9676, 9701), 'rasterio.open', 'rasterio.open', (['clone_path'], {}), '(clone_path)\n', (9689, 9701), False, 'import rasterio\n'), ((9769, 9807), 'numpy.array', 'np.array', (['[0, 5, 15, 30, 60, 100, 200]'], {}), '([0, 5, 15, 30, 60, 100, 200])\n', (9777, 9807), True, 'import numpy as np\n'), ((16294, 16329), 'os.path.join', 'os.path.join', (['directory_out', '"""clim"""'], {}), "(directory_out, 'clim')\n", (16306, 16329), False, 'import os\n'), ((29523, 29571), 'os.path.join', 'os.path.join', (['modelbuilder_path', '"""settings.json"""'], {}), "(modelbuilder_path, 'settings.json')\n", (29535, 29571), False, 'import os\n'), ((30569, 30620), 'os.path.join', 'os.path.join', (['modelbuilder_path', 'modelbuilder_log_1'], {}), '(modelbuilder_path, modelbuilder_log_1)\n', (30581, 30620), False, 'import os\n'), ((30651, 30702), 'os.path.join', 'os.path.join', (['modelbuilder_path', 'modelbuilder_log_2'], {}), '(modelbuilder_path, modelbuilder_log_2)\n', (30663, 30702), False, 'import os\n'), ((30764, 30803), 'os.path.exists', 'os.path.exists', (['modelbuilder_log_1_path'], {}), '(modelbuilder_log_1_path)\n', (30778, 30803), False, 'import os\n'), ((30855, 30894), 'os.path.exists', 'os.path.exists', (['modelbuilder_log_2_path'], {}), '(modelbuilder_log_2_path)\n', (30869, 30894), False, 'import os\n'), ((31043, 31054), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (31052, 31054), False, 'import os\n'), ((33512, 33548), 'os.path.join', 'os.path.join', (['folder', '"""modelbuilder"""'], {}), "(folder, 'modelbuilder')\n", (33524, 33548), False, 'import os\n'), ((35111, 35142), 'geopandas.read_file', 'gpd.read_file', (['waterbodies_path'], {}), '(waterbodies_path)\n', (35124, 35142), True, 'import geopandas as gpd\n'), ((36523, 36554), 'geopandas.read_file', 'gpd.read_file', (['waterbodies_path'], {}), '(waterbodies_path)\n', (36536, 36554), True, 'import geopandas as gpd\n'), ((50226, 50243), 'configparser.ConfigParser', 'cp.ConfigParser', ([], {}), '()\n', (50241, 50243), True, 'import configparser as cp\n'), ((55595, 55635), 'os.path.join', 'os.path.join', (['directory_in', 'paramsfolder'], {}), '(directory_in, paramsfolder)\n', (55607, 55635), False, 'import os\n'), ((55719, 55758), 'os.path.join', 'os.path.join', (['script_root', 'settingsfile'], {}), '(script_root, settingsfile)\n', (55731, 55758), False, 'import os\n'), ((1793, 1807), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (1801, 1807), True, 'import numpy as np\n'), ((2514, 2532), 'numpy.array', 'np.array', (['[coords]'], {}), '([coords])\n', (2522, 2532), True, 'import numpy as np\n'), ((2774, 2789), 'numpy.cos', 'np.cos', (['lon_rad'], {}), '(lon_rad)\n', (2780, 2789), True, 'import numpy as np\n'), ((2822, 2837), 'numpy.sin', 'np.sin', (['lon_rad'], {}), '(lon_rad)\n', (2828, 2837), True, 'import numpy as np\n'), ((2863, 2878), 'numpy.sin', 'np.sin', (['lat_rad'], {}), '(lat_rad)\n', (2869, 2878), True, 'import numpy as np\n'), ((3959, 3986), 'shapely.geometry.box', 'box', (['bnd0', 'bnd1', 'bnd2', 'bnd3'], {}), '(bnd0, bnd1, bnd2, bnd3)\n', (3962, 3986), False, 'from shapely.geometry import box\n'), ((4001, 4048), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (["{'geometry': bbox}"], {'index': '[0]'}), "({'geometry': bbox}, index=[0])\n", (4017, 4048), True, 'import geopandas as gpd\n'), ((4370, 4417), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (["{'geometry': bbox}"], {'index': '[0]'}), "({'geometry': bbox}, index=[0])\n", (4386, 4417), True, 'import geopandas as gpd\n'), ((5164, 5178), 'numpy.exp', 'np.exp', (['(-b * x)'], {}), '(-b * x)\n', (5170, 5178), True, 'import numpy as np\n'), ((5652, 5704), 'scipy.optimize.curve_fit', 'curve_fit', (['func', 'z_i_[idx]', 'ks[idx, row, col]'], {'p0': 'p0'}), '(func, z_i_[idx], ks[idx, row, col], p0=p0)\n', (5661, 5704), False, 'from scipy.optimize import curve_fit\n'), ((5933, 6006), 'gdal.Translate', 'gdal.Translate', (["(path + ext + '.map')", "(path + ext + '.tif')"], {'options': 'options'}), "(path + ext + '.map', path + ext + '.tif', options=options)\n", (5947, 6006), False, 'import gdal\n'), ((7078, 7091), 'numpy.array', 'np.array', (['ks_'], {}), '(ks_)\n', (7086, 7091), True, 'import numpy as np\n'), ((15144, 15170), 'os.path.basename', 'os.path.basename', (['filepath'], {}), '(filepath)\n', (15160, 15170), False, 'import os\n'), ((15642, 15678), 'os.path.join', 'os.path.join', (['directory_out', '"""*.tif"""'], {}), "(directory_out, '*.tif')\n", (15654, 15678), False, 'import os\n'), ((15813, 15853), 'os.path.join', 'os.path.join', (['directory_out', '"""*.aux.xml"""'], {}), "(directory_out, '*.aux.xml')\n", (15825, 15853), False, 'import os\n'), ((16344, 16368), 'os.path.exists', 'os.path.exists', (['clim_dir'], {}), '(clim_dir)\n', (16358, 16368), False, 'import os\n'), ((16378, 16396), 'os.mkdir', 'os.mkdir', (['clim_dir'], {}), '(clim_dir)\n', (16386, 16396), False, 'import os\n'), ((16428, 16463), 'os.path.join', 'os.path.join', (['directory_out', '"""LAI*"""'], {}), "(directory_out, 'LAI*')\n", (16440, 16463), False, 'import os\n'), ((28267, 28279), 'json.load', 'json.load', (['f'], {}), '(f)\n', (28276, 28279), False, 'import json\n'), ((28604, 28622), 'json.dump', 'json.dump', (['data', 'f'], {}), '(data, f)\n', (28613, 28622), False, 'import json\n'), ((29808, 29823), 'geojson.load', 'geojson.load', (['f'], {}), '(f)\n', (29820, 29823), False, 'import geojson\n'), ((29886, 29901), 'geojson.load', 'geojson.load', (['c'], {}), '(c)\n', (29898, 29901), False, 'import geojson\n'), ((30297, 30335), 'geojson.dump', 'geojson.dump', (['modelbuilder_settings', 'f'], {}), '(modelbuilder_settings, f)\n', (30309, 30335), False, 'import geojson\n'), ((30813, 30847), 'os.remove', 'os.remove', (['modelbuilder_log_1_path'], {}), '(modelbuilder_log_1_path)\n', (30822, 30847), False, 'import os\n'), ((30904, 30938), 'os.remove', 'os.remove', (['modelbuilder_log_2_path'], {}), '(modelbuilder_log_2_path)\n', (30913, 30938), False, 'import os\n'), ((31072, 31099), 'os.chdir', 'os.chdir', (['modelbuilder_path'], {}), '(modelbuilder_path)\n', (31080, 31099), False, 'import os\n'), ((32236, 32272), 'subprocess.run', 'subprocess.run', (['modelbuilder_command'], {}), '(modelbuilder_command)\n', (32250, 32272), False, 'import subprocess\n'), ((32565, 32610), 'os.path.join', 'os.path.join', (['modelbuilder_path', 'catchment_id'], {}), '(modelbuilder_path, catchment_id)\n', (32577, 32610), False, 'import os\n'), ((33395, 33440), 'os.path.join', 'os.path.join', (['modelbuilder_path', 'catchment_id'], {}), '(modelbuilder_path, catchment_id)\n', (33407, 33440), False, 'import os\n'), ((33560, 33595), 'os.path.exists', 'os.path.exists', (['modelbuilder_folder'], {}), '(modelbuilder_folder)\n', (33574, 33595), False, 'import os\n'), ((33605, 33652), 'os.makedirs', 'os.makedirs', (['modelbuilder_folder'], {'exist_ok': '(True)'}), '(modelbuilder_folder, exist_ok=True)\n', (33616, 33652), False, 'import os\n'), ((34029, 34079), 'os.path.join', 'os.path.join', (['modelbuilder_folder', '"""settings.json"""'], {}), "(modelbuilder_folder, 'settings.json')\n", (34041, 34079), False, 'import os\n'), ((34122, 34175), 'os.path.join', 'os.path.join', (['modelbuilder_folder', 'modelbuilder_log_1'], {}), '(modelbuilder_folder, modelbuilder_log_1)\n', (34134, 34175), False, 'import os\n'), ((34218, 34271), 'os.path.join', 'os.path.join', (['modelbuilder_folder', 'modelbuilder_log_2'], {}), '(modelbuilder_folder, modelbuilder_log_2)\n', (34230, 34271), False, 'import os\n'), ((34365, 34388), 'os.chdir', 'os.chdir', (['curr_work_dir'], {}), '(curr_work_dir)\n', (34373, 34388), False, 'import os\n'), ((35463, 35522), 'geopandas.overlay', 'gpd.overlay', (['catchment', 'match_intersect'], {'how': '"""intersection"""'}), "(catchment, match_intersect, how='intersection')\n", (35474, 35522), True, 'import geopandas as gpd\n'), ((35657, 35677), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (['[]'], {}), '([])\n', (35673, 35677), True, 'import geopandas as gpd\n'), ((36879, 36938), 'geopandas.overlay', 'gpd.overlay', (['catchment', 'match_intersect'], {'how': '"""intersection"""'}), "(catchment, match_intersect, how='intersection')\n", (36890, 36938), True, 'import geopandas as gpd\n'), ((41378, 41416), 'os.path.join', 'os.path.join', (['outdir', "(mapfile + '.map')"], {}), "(outdir, mapfile + '.map')\n", (41390, 41416), False, 'import os\n'), ((41418, 41456), 'os.path.join', 'os.path.join', (['outdir', "(mapfile + '.tif')"], {}), "(outdir, mapfile + '.tif')\n", (41430, 41456), False, 'import os\n'), ((41700, 41738), 'os.path.join', 'os.path.join', (['outdir', "(mapfile + '.tif')"], {}), "(outdir, mapfile + '.tif')\n", (41712, 41738), False, 'import os\n'), ((41914, 41960), 'os.path.join', 'os.path.join', (['outdir', "(mapfile + '.map.aux.xml')"], {}), "(outdir, mapfile + '.map.aux.xml')\n", (41926, 41960), False, 'import os\n'), ((42962, 43004), 'os.path.join', 'os.path.join', (['path', '"""wflow_riverwidth.map"""'], {}), "(path, 'wflow_riverwidth.map')\n", (42974, 43004), False, 'import os\n'), ((43165, 43197), 'rasterio.crs.CRS.from_epsg', 'rasterio.crs.CRS.from_epsg', (['(4326)'], {}), '(4326)\n', (43191, 43197), False, 'import rasterio\n'), ((43595, 43718), 'rasterio.features.rasterize', 'rasterio.features.rasterize', ([], {'shapes': 'lake_shapes', 'fill': '(0)', 'out': 'out_arr', 'transform': "map_meta['transform']", 'all_touched': '(True)'}), "(shapes=lake_shapes, fill=0, out=out_arr,\n transform=map_meta['transform'], all_touched=True)\n", (43622, 43718), False, 'import rasterio\n'), ((43772, 43817), 'numpy.where', 'np.where', (['(lakes_raster > 0)', 'flag', 'riverwidths'], {}), '(lakes_raster > 0, flag, riverwidths)\n', (43780, 43817), True, 'import numpy as np\n'), ((44259, 44387), 'rasterio.features.rasterize', 'rasterio.features.rasterize', ([], {'shapes': 'reservoir_shapes', 'fill': '(0)', 'out': 'out_arr', 'transform': "map_meta['transform']", 'all_touched': '(True)'}), "(shapes=reservoir_shapes, fill=0, out=out_arr,\n transform=map_meta['transform'], all_touched=True)\n", (44286, 44387), False, 'import rasterio\n'), ((44446, 44496), 'numpy.where', 'np.where', (['(reservoirs_raster > 0)', 'flag', 'riverwidths'], {}), '(reservoirs_raster > 0, flag, riverwidths)\n', (44454, 44496), True, 'import numpy as np\n'), ((44604, 44646), 'os.path.join', 'os.path.join', (['path', '"""wflow_riverwidth.map"""'], {}), "(path, 'wflow_riverwidth.map')\n", (44616, 44646), False, 'import os\n'), ((44648, 44699), 'os.path.join', 'os.path.join', (['path', '"""wflow_riverwidth_original.map"""'], {}), "(path, 'wflow_riverwidth_original.map')\n", (44660, 44699), False, 'import os\n'), ((44945, 44987), 'os.path.join', 'os.path.join', (['path', '"""wflow_riverwidth.map"""'], {}), "(path, 'wflow_riverwidth.map')\n", (44957, 44987), False, 'import os\n'), ((45005, 45051), 'os.path.join', 'os.path.join', (['path', '"""wflow_riverwidth_new.map"""'], {}), "(path, 'wflow_riverwidth_new.map')\n", (45017, 45051), False, 'import os\n'), ((45053, 45095), 'os.path.join', 'os.path.join', (['path', '"""wflow_riverwidth.map"""'], {}), "(path, 'wflow_riverwidth.map')\n", (45065, 45095), False, 'import os\n'), ((45111, 45157), 'os.path.join', 'os.path.join', (['path', '"""wflow_riverwidth_new.map"""'], {}), "(path, 'wflow_riverwidth_new.map')\n", (45123, 45157), False, 'import os\n'), ((46043, 46068), 'os.listdir', 'os.listdir', (['out_clone_dir'], {}), '(out_clone_dir)\n', (46053, 46068), False, 'import os\n'), ((48527, 48569), 'numpy.ma.masked_where', 'np.ma.masked_where', (['mask_final', 'data[0][0]'], {}), '(mask_final, data[0][0])\n', (48545, 48569), True, 'import numpy as np\n'), ((48864, 48891), 'numpy.meshgrid', 'np.meshgrid', (['x_dist', 'y_dist'], {}), '(x_dist, y_dist)\n', (48875, 48891), True, 'import numpy as np\n'), ((49254, 49324), 'numpy.round', 'np.round', (['(data[1][5] + (ind_lat + 0.5) * data[1][4])', 'digits_new_coords'], {}), '(data[1][5] + (ind_lat + 0.5) * data[1][4], digits_new_coords)\n', (49262, 49324), True, 'import numpy as np\n'), ((49343, 49413), 'numpy.round', 'np.round', (['(data[1][2] + (ind_lon + 0.5) * data[1][0])', 'digits_new_coords'], {}), '(data[1][2] + (ind_lon + 0.5) * data[1][0], digits_new_coords)\n', (49351, 49413), True, 'import numpy as np\n'), ((49429, 49452), 'shapely.geometry.Point', 'Point', (['new_lon', 'new_lat'], {}), '(new_lon, new_lat)\n', (49434, 49452), False, 'from shapely.geometry import Point\n'), ((49576, 49595), 'os.path.isabs', 'os.path.isabs', (['path'], {}), '(path)\n', (49589, 49595), False, 'import os\n'), ((50124, 50149), 'os.path.abspath', 'os.path.abspath', (['ini_file'], {}), '(ini_file)\n', (50139, 50149), False, 'import os\n'), ((50185, 50211), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (50201, 50211), False, 'import os\n'), ((55531, 55571), 'os.path.join', 'os.path.join', (['directory_out', 'clonefolder'], {}), '(directory_out, clonefolder)\n', (55543, 55571), False, 'import os\n'), ((55948, 56261), 'sys.exit', 'sys.exit', (['"""ERROR: Running modelbuilder while using MERIT derived data! This will cause MERIT derived data to be overwritten with modelbuilder results, which makes it impossible to use MERIT derived data! Please choose one of the two, and note that it is advised to use MERIT derived data when possible."""'], {}), "(\n 'ERROR: Running modelbuilder while using MERIT derived data! This will cause MERIT derived data to be overwritten with modelbuilder results, which makes it impossible to use MERIT derived data! Please choose one of the two, and note that it is advised to use MERIT derived data when possible.'\n )\n", (55956, 56261), False, 'import sys\n'), ((56312, 56442), 'sys.exit', 'sys.exit', (['"""ERROR: Custom river widths can only be obtained when using MERIT derived data! Please adjust setup ini file!"""'], {}), "(\n 'ERROR: Custom river widths can only be obtained when using MERIT derived data! Please adjust setup ini file!'\n )\n", (56320, 56442), False, 'import sys\n'), ((56491, 56631), 'sys.exit', 'sys.exit', (['"""ERROR: GRDC stations for gauges map can only be used when also using MERIT derived data! Please adjust setup ini file!"""'], {}), "(\n 'ERROR: GRDC stations for gauges map can only be used when also using MERIT derived data! Please adjust setup ini file!'\n )\n", (56499, 56631), False, 'import sys\n'), ((56840, 56871), 'os.path.exists', 'os.path.exists', (['parameters_path'], {}), '(parameters_path)\n', (56854, 56871), False, 'import os\n'), ((56881, 56998), 'sys.exit', 'sys.exit', (['"""ERROR: Path to parameter base data does not exist! Check setup ini file and/or input directory!"""'], {}), "(\n 'ERROR: Path to parameter base data does not exist! Check setup ini file and/or input directory!'\n )\n", (56889, 56998), False, 'import sys\n'), ((57022, 57149), 'sys.exit', 'sys.exit', (['"""ERROR: Folder(s) where model should be set up do not exist! Check setup ini file and/or output directory!"""'], {}), "(\n 'ERROR: Folder(s) where model should be set up do not exist! Check setup ini file and/or output directory!'\n )\n", (57030, 57149), False, 'import sys\n'), ((57348, 57386), 'os.path.join', 'os.path.join', (['folder', 'catchmentsfolder'], {}), '(folder, catchmentsfolder)\n', (57360, 57386), False, 'import os\n'), ((57553, 57593), 'os.path.join', 'os.path.join', (['folder', "(model_ini + '.ini')"], {}), "(folder, model_ini + '.ini')\n", (57565, 57593), False, 'import os\n'), ((57660, 57694), 'os.path.join', 'os.path.join', (['folder', 'setup_folder'], {}), '(folder, setup_folder)\n', (57672, 57694), False, 'import os\n'), ((58089, 58127), 'setup_logging.setupLogging', 'setup_logging.setupLogging', (['setup_path'], {}), '(setup_path)\n', (58115, 58127), False, 'import setup_logging\n'), ((59996, 60022), 'os.path.join', 'os.path.join', (['folder', 'path'], {}), '(folder, path)\n', (60008, 60022), False, 'import os\n'), ((83296, 83326), 'setup_logging.getStats', 'setup_logging.getStats', (['logger'], {}), '(logger)\n', (83318, 83326), False, 'import setup_logging\n'), ((83831, 83859), 'setup_logging.closeLogging', 'setup_logging.closeLogging', ([], {}), '()\n', (83857, 83859), False, 'import setup_logging\n'), ((85220, 85231), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (85229, 85231), False, 'import os\n'), ((2365, 2383), 'numpy.asarray', 'np.asarray', (['coords'], {}), '(coords)\n', (2375, 2383), True, 'import numpy as np\n'), ((2756, 2771), 'numpy.cos', 'np.cos', (['lat_rad'], {}), '(lat_rad)\n', (2762, 2771), True, 'import numpy as np\n'), ((2804, 2819), 'numpy.cos', 'np.cos', (['lat_rad'], {}), '(lat_rad)\n', (2810, 2819), True, 'import numpy as np\n'), ((3232, 3287), 'numpy.empty', 'np.empty', (["(dst_profile['height'], dst_profile['width'])"], {}), "((dst_profile['height'], dst_profile['width']))\n", (3240, 3287), True, 'import numpy as np\n'), ((5502, 5527), 'numpy.log', 'np.log', (['ks[idx, row, col]'], {}), '(ks[idx, row, col])\n', (5508, 5527), True, 'import numpy as np\n'), ((5758, 5812), 'rasterio.open', 'rasterio.open', (["(path + ext + '.tif')", '"""w"""'], {}), "(path + ext + '.tif', 'w', **dst_profile)\n", (5771, 5812), False, 'import rasterio\n'), ((10089, 10112), 'rasterio.open', 'rasterio.open', (['filepath'], {}), '(filepath)\n', (10102, 10112), False, 'import rasterio\n'), ((10581, 10627), 'rasterio.mask.mask', 'mask', (['src', 'shapes'], {'crop': '(True)', 'all_touched': '(True)'}), '(src, shapes, crop=True, all_touched=True)\n', (10585, 10627), False, 'from rasterio.mask import mask\n'), ((11218, 11248), 'os.path.basename', 'os.path.basename', (['files[index]'], {}), '(files[index])\n', (11234, 11248), False, 'import os\n'), ((14338, 14380), 'os.path.join', 'os.path.join', (['directory_out', 'dst_tiff_file'], {}), '(directory_out, dst_tiff_file)\n', (14350, 14380), False, 'import os\n'), ((14608, 14649), 'os.path.join', 'os.path.join', (['directory_out', 'dst_map_file'], {}), '(directory_out, dst_map_file)\n', (14620, 14649), False, 'import os\n'), ((14755, 14813), 'gdal.Translate', 'gdal.Translate', (['path_map', 'path_tif'], {'options': '"""-of PCRaster"""'}), "(path_map, path_tif, options='-of PCRaster')\n", (14769, 14813), False, 'import gdal\n'), ((14915, 14958), 'os.path.join', 'os.path.join', (['directory_out', '"""KsatVer*.tif"""'], {}), "(directory_out, 'KsatVer*.tif')\n", (14927, 14958), False, 'import os\n'), ((14989, 15030), 'os.path.join', 'os.path.join', (['directory_out', '"""theta*.tif"""'], {}), "(directory_out, 'theta*.tif')\n", (15001, 15030), False, 'import os\n'), ((15324, 15347), 'rasterio.open', 'rasterio.open', (['filepath'], {}), '(filepath)\n', (15337, 15347), False, 'import rasterio\n'), ((15706, 15718), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (15715, 15718), False, 'import os\n'), ((15880, 15892), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (15889, 15892), False, 'import os\n'), ((16767, 16815), 'os.path.join', 'os.path.join', (['directory_out', '"""SoilThickness.map"""'], {}), "(directory_out, 'SoilThickness.map')\n", (16779, 16815), False, 'import os\n'), ((16816, 16867), 'os.path.join', 'os.path.join', (['directory_out', '"""SoilMinThickness.map"""'], {}), "(directory_out, 'SoilMinThickness.map')\n", (16828, 16867), False, 'import os\n'), ((17081, 17141), 'os.path.join', 'os.path.join', (['directory_out', '"""RootingDepth_d75_300x300m.map"""'], {}), "(directory_out, 'RootingDepth_d75_300x300m.map')\n", (17093, 17141), False, 'import os\n'), ((17142, 17189), 'os.path.join', 'os.path.join', (['directory_out', '"""RootingDepth.map"""'], {}), "(directory_out, 'RootingDepth.map')\n", (17154, 17189), False, 'import os\n'), ((27159, 27198), 'os.path.join', 'os.path.join', (['path_readme', '"""ReadMe.txt"""'], {}), "(path_readme, 'ReadMe.txt')\n", (27171, 27198), False, 'import os\n'), ((28211, 28239), 'os.path.join', 'os.path.join', (['path', 'file_old'], {}), '(path, file_old)\n', (28223, 28239), False, 'import os\n'), ((28555, 28583), 'os.path.join', 'os.path.join', (['path', 'file_new'], {}), '(path, file_new)\n', (28567, 28583), False, 'import os\n'), ((28672, 28700), 'os.path.join', 'os.path.join', (['path', 'file_old'], {}), '(path, file_old)\n', (28684, 28700), False, 'import os\n'), ((31284, 31294), 'sys.exit', 'sys.exit', ([], {}), '()\n', (31292, 31294), False, 'import sys\n'), ((32361, 32371), 'sys.exit', 'sys.exit', ([], {}), '()\n', (32369, 32371), False, 'import sys\n'), ((32678, 32733), 'os.path.join', 'os.path.join', (['modelbuilder_path', 'catchment_id', 'temp_dir'], {}), '(modelbuilder_path, catchment_id, temp_dir)\n', (32690, 32733), False, 'import os\n'), ((33750, 33781), 'os.listdir', 'os.listdir', (['modelbuilder_folder'], {}), '(modelbuilder_folder)\n', (33760, 33781), False, 'import os\n'), ((34575, 34585), 'sys.exit', 'sys.exit', ([], {}), '()\n', (34583, 34585), False, 'import sys\n'), ((35600, 35620), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (['[]'], {}), '([])\n', (35616, 35620), True, 'import geopandas as gpd\n'), ((39444, 39620), 'wflow_reservoir_intbl.make_tbls', 'setup_reservoir_intbl.make_tbls', (['waterbodies', 'out_intbl_dir'], {'range_min': 'res_range_min', 'range_max': 'res_range_max', 'method': 'res_method', 'debug_path': 'debug_path', 'logger': 'logger'}), '(waterbodies, out_intbl_dir, range_min=\n res_range_min, range_max=res_range_max, method=res_method, debug_path=\n debug_path, logger=logger)\n', (39475, 39620), True, 'import wflow_reservoir_intbl as setup_reservoir_intbl\n'), ((39919, 40050), 'waterbodies.make_maps', 'setup_waterbody_maps.make_maps', (['waterbodies', '(False)', 'out_clone_dir', '"""wflow_reservoirareas"""', '"""wflow_reservoirlocs"""'], {'logger': 'logger'}), "(waterbodies, False, out_clone_dir,\n 'wflow_reservoirareas', 'wflow_reservoirlocs', logger=logger)\n", (39949, 40050), True, 'import waterbodies as setup_waterbody_maps\n'), ((40878, 40916), 'os.path.join', 'os.path.join', (['outdir', "(mapfile + '.map')"], {}), "(outdir, mapfile + '.map')\n", (40890, 40916), False, 'import os\n'), ((41116, 41154), 'os.path.join', 'os.path.join', (['outdir', "(mapfile + '.tif')"], {}), "(outdir, mapfile + '.tif')\n", (41128, 41154), False, 'import os\n'), ((41759, 41797), 'os.path.join', 'os.path.join', (['outdir', "(mapfile + '.tif')"], {}), "(outdir, mapfile + '.tif')\n", (41771, 41797), False, 'import os\n'), ((41981, 42027), 'os.path.join', 'os.path.join', (['outdir', "(mapfile + '.map.aux.xml')"], {}), "(outdir, mapfile + '.map.aux.xml')\n", (41993, 42027), False, 'import os\n'), ((48808, 48839), 'numpy.arange', 'np.arange', (['data_masked.shape[0]'], {}), '(data_masked.shape[0])\n', (48817, 48839), True, 'import numpy as np\n'), ((49629, 49653), 'os.path.join', 'os.path.join', (['root', 'path'], {}), '(root, path)\n', (49641, 49653), False, 'import os\n'), ((50303, 50355), 'sys.exit', 'sys.exit', (['"""ERROR: Not possible to open \'ini\'- file."""'], {}), '("ERROR: Not possible to open \'ini\'- file.")\n', (50311, 50355), False, 'import sys\n'), ((56663, 56693), 'os.path.exists', 'os.path.exists', (['directory_topo'], {}), '(directory_topo)\n', (56677, 56693), False, 'import os\n'), ((56707, 56838), 'sys.exit', 'sys.exit', (['"""ERROR: Path to topographic base data does not exist! Check setup ini file and/or topographic input directory!"""'], {}), "(\n 'ERROR: Path to topographic base data does not exist! Check setup ini file and/or topographic input directory!'\n )\n", (56715, 56838), False, 'import sys\n'), ((57296, 57320), 'os.path.normpath', 'os.path.normpath', (['folder'], {}), '(folder)\n', (57312, 57320), False, 'import os\n'), ((57710, 57736), 'os.path.exists', 'os.path.exists', (['setup_path'], {}), '(setup_path)\n', (57724, 57736), False, 'import os\n'), ((57750, 57788), 'os.makedirs', 'os.makedirs', (['setup_path'], {'exist_ok': '(True)'}), '(setup_path, exist_ok=True)\n', (57761, 57788), False, 'import os\n'), ((60038, 60067), 'os.path.exists', 'os.path.exists', (['out_clone_dir'], {}), '(out_clone_dir)\n', (60052, 60067), False, 'import os\n'), ((60334, 60361), 'os.path.exists', 'os.path.exists', (['intblfolder'], {}), '(intblfolder)\n', (60348, 60361), False, 'import os\n'), ((70367, 70397), 'geopandas.read_file', 'gpd.read_file', (['catchments_path'], {}), '(catchments_path)\n', (70380, 70397), True, 'import geopandas as gpd\n'), ((79187, 79216), 'os.path.join', 'os.path.join', (['folder', '"""intbl"""'], {}), "(folder, 'intbl')\n", (79199, 79216), False, 'import os\n'), ((85086, 85141), 'sys.exit', 'sys.exit', (['"""ERROR: Please provide an ini file argument!"""'], {}), "('ERROR: Please provide an ini file argument!')\n", (85094, 85141), False, 'import sys\n'), ((4102, 4120), 'numpy.repeat', 'np.repeat', (['bnd0', 'd'], {}), '(bnd0, d)\n', (4111, 4120), True, 'import numpy as np\n'), ((4122, 4148), 'numpy.linspace', 'np.linspace', (['bnd0', 'bnd2', 'd'], {}), '(bnd0, bnd2, d)\n', (4133, 4148), True, 'import numpy as np\n'), ((4149, 4167), 'numpy.repeat', 'np.repeat', (['bnd2', 'd'], {}), '(bnd2, d)\n', (4158, 4167), True, 'import numpy as np\n'), ((4169, 4195), 'numpy.linspace', 'np.linspace', (['bnd2', 'bnd0', 'd'], {}), '(bnd2, bnd0, d)\n', (4180, 4195), True, 'import numpy as np\n'), ((4225, 4251), 'numpy.linspace', 'np.linspace', (['bnd1', 'bnd3', 'd'], {}), '(bnd1, bnd3, d)\n', (4236, 4251), True, 'import numpy as np\n'), ((4253, 4271), 'numpy.repeat', 'np.repeat', (['bnd3', 'd'], {}), '(bnd3, d)\n', (4262, 4271), True, 'import numpy as np\n'), ((4273, 4299), 'numpy.linspace', 'np.linspace', (['bnd3', 'bnd1', 'd'], {}), '(bnd3, bnd1, d)\n', (4284, 4299), True, 'import numpy as np\n'), ((4301, 4319), 'numpy.repeat', 'np.repeat', (['bnd1', 'd'], {}), '(bnd1, d)\n', (4310, 4319), True, 'import numpy as np\n'), ((5426, 5449), 'numpy.log', 'np.log', (['ks[:, row, col]'], {}), '(ks[:, row, col])\n', (5432, 5449), True, 'import numpy as np\n'), ((5614, 5637), 'numpy.log', 'np.log', (['ks[:, row, col]'], {}), '(ks[:, row, col])\n', (5620, 5637), True, 'import numpy as np\n'), ((5843, 5859), 'numpy.float32', 'np.float32', (['data'], {}), '(data)\n', (5853, 5859), True, 'import numpy as np\n'), ((9910, 9962), 'os.path.join', 'os.path.join', (['directory_in', 'row.folder_in', 'row.files'], {}), '(directory_in, row.folder_in, row.files)\n', (9922, 9962), False, 'import os\n'), ((10344, 10376), 'rasterio.crs.CRS.from_epsg', 'rasterio.crs.CRS.from_epsg', (['(4326)'], {}), '(4326)\n', (10370, 10376), False, 'import rasterio\n'), ((12917, 12929), 'numpy.log', 'np.log', (['grid'], {}), '(grid)\n', (12923, 12929), True, 'import numpy as np\n'), ((13200, 13211), 'numpy.exp', 'np.exp', (['out'], {}), '(out)\n', (13206, 13211), True, 'import numpy as np\n'), ((14487, 14530), 'rasterio.open', 'rasterio.open', (['path_tif', '"""w"""'], {}), "(path_tif, 'w', **dst_profile)\n", (14500, 14530), False, 'import rasterio\n'), ((15443, 15466), 'rasterio.open', 'rasterio.open', (['filepath'], {}), '(filepath)\n', (15456, 15466), False, 'import rasterio\n'), ((16129, 16159), 'os.path.join', 'os.path.join', (['directory_out', 'f'], {}), '(directory_out, f)\n', (16141, 16159), False, 'import os\n'), ((25350, 25387), 'os.path.join', 'os.path.join', (['script_root', 'src_script'], {}), '(script_root, src_script)\n', (25362, 25387), False, 'import os\n'), ((25389, 25418), 'os.path.join', 'os.path.join', (['dst', 'src_script'], {}), '(dst, src_script)\n', (25401, 25418), False, 'import os\n'), ((32766, 32796), 'os.path.join', 'os.path.join', (['folder', 'temp_dir'], {}), '(folder, temp_dir)\n', (32778, 32796), False, 'import os\n'), ((33038, 33093), 'os.path.join', 'os.path.join', (['modelbuilder_path', 'catchment_id', 'temp_dir'], {}), '(modelbuilder_path, catchment_id, temp_dir)\n', (33050, 33093), False, 'import os\n'), ((33095, 33125), 'os.path.join', 'os.path.join', (['folder', 'temp_dir'], {}), '(folder, temp_dir)\n', (33107, 33125), False, 'import os\n'), ((33237, 33292), 'os.path.join', 'os.path.join', (['modelbuilder_path', 'catchment_id', 'temp_dir'], {}), '(modelbuilder_path, catchment_id, temp_dir)\n', (33249, 33292), False, 'import os\n'), ((33294, 33324), 'os.path.join', 'os.path.join', (['folder', 'temp_dir'], {}), '(folder, temp_dir)\n', (33306, 33324), False, 'import os\n'), ((39652, 39749), 'wflow_lake_intbl.make_tbls', 'setup_lake_intbl.make_tbls', (['waterbodies', 'out_intbl_dir'], {'debug_path': 'debug_path', 'logger': 'logger'}), '(waterbodies, out_intbl_dir, debug_path=\n debug_path, logger=logger)\n', (39678, 39749), True, 'import wflow_lake_intbl as setup_lake_intbl\n'), ((40088, 40208), 'waterbodies.make_maps', 'setup_waterbody_maps.make_maps', (['waterbodies', '(True)', 'out_clone_dir', '"""wflow_lakeareas"""', '"""wflow_lakelocs"""'], {'logger': 'logger'}), "(waterbodies, True, out_clone_dir,\n 'wflow_lakeareas', 'wflow_lakelocs', logger=logger)\n", (40118, 40208), True, 'import waterbodies as setup_waterbody_maps\n'), ((43334, 43374), 'os.path.join', 'os.path.join', (['path', '"""wflow_subcatch.map"""'], {}), "(path, 'wflow_subcatch.map')\n", (43346, 43374), False, 'import os\n'), ((43973, 44013), 'os.path.join', 'os.path.join', (['path', '"""wflow_subcatch.map"""'], {}), "(path, 'wflow_subcatch.map')\n", (43985, 44013), False, 'import os\n'), ((46189, 46227), 'os.path.join', 'os.path.join', (['out_clone_dir', 'temp_file'], {}), '(out_clone_dir, temp_file)\n', (46201, 46227), False, 'import os\n'), ((46325, 46345), 'os.remove', 'os.remove', (['temp_path'], {}), '(temp_path)\n', (46334, 46345), False, 'import os\n'), ((48640, 48671), 'numpy.arange', 'np.arange', (['data_masked.shape[1]'], {}), '(data_masked.shape[1])\n', (48649, 48671), True, 'import numpy as np\n'), ((48695, 48726), 'numpy.mod', 'np.mod', (['data_masked.shape[1]', '(2)'], {}), '(data_masked.shape[1], 2)\n', (48701, 48726), True, 'import numpy as np\n'), ((57902, 57924), 'os.listdir', 'os.listdir', (['setup_path'], {}), '(setup_path)\n', (57912, 57924), False, 'import os\n'), ((59250, 59283), 'os.path.exists', 'os.path.exists', (['modelbuilder_path'], {}), '(modelbuilder_path)\n', (59264, 59283), False, 'import os\n'), ((59301, 59389), 'sys.exit', 'sys.exit', (['"""ERROR: Path to modelbuilder could not be found! Check path in ini file!"""'], {}), "(\n 'ERROR: Path to modelbuilder could not be found! Check path in ini file!')\n", (59309, 59389), False, 'import sys\n'), ((59450, 59484), 'os.path.join', 'os.path.join', (['folder', 'riversfolder'], {}), '(folder, riversfolder)\n', (59462, 59484), False, 'import os\n'), ((60102, 60125), 'os.mkdir', 'os.mkdir', (['out_clone_dir'], {}), '(out_clone_dir)\n', (60110, 60125), False, 'import os\n'), ((65849, 65949), 'merit.merit_model_data.upscale_merit_basin', 'upscale_merit_basin', (['scale_ratio', 'bbox', 'basin_id', 'directory_topo', 'folder'], {'xy': 'xy2', 'logger': 'logger'}), '(scale_ratio, bbox, basin_id, directory_topo, folder, xy\n =xy2, logger=logger)\n', (65868, 65949), False, 'from merit.merit_model_data import get_merit_basin_bbox, upscale_merit_basin, network_merit_basin, resample_merit_basin\n'), ((66142, 66280), 'merit.merit_model_data.network_merit_basin', 'network_merit_basin', (['folder'], {'smooth_len': 'smooth_len', 'ucat_ratio': 'ucat_ratio', 'riv_shape': 'get_pyflwdir_riv', 'basin_shape': '(True)', 'logger': 'logger'}), '(folder, smooth_len=smooth_len, ucat_ratio=ucat_ratio,\n riv_shape=get_pyflwdir_riv, basin_shape=True, logger=logger)\n', (66161, 66280), False, 'from merit.merit_model_data import get_merit_basin_bbox, upscale_merit_basin, network_merit_basin, resample_merit_basin\n'), ((66492, 66536), 'merit.merit_model_data.resample_merit_basin', 'resample_merit_basin', (['directory_topo', 'folder'], {}), '(directory_topo, folder)\n', (66512, 66536), False, 'from merit.merit_model_data import get_merit_basin_bbox, upscale_merit_basin, network_merit_basin, resample_merit_basin\n'), ((66728, 66782), 'merit.wflow_topomaps.wflow_topomaps', 'wflow_topomaps', (['folder'], {'riv_upa': 'riv_upa', 'logger': 'logger'}), '(folder, riv_upa=riv_upa, logger=logger)\n', (66742, 66782), False, 'from merit.wflow_topomaps import wflow_topomaps\n'), ((67984, 68002), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (67994, 68002), False, 'import os\n'), ((77269, 77302), 'os.path.exists', 'os.path.exists', (['template_ini_path'], {}), '(template_ini_path)\n', (77283, 77302), False, 'import os\n'), ((79236, 79265), 'os.path.exists', 'os.path.exists', (['out_intbl_dir'], {}), '(out_intbl_dir)\n', (79250, 79265), False, 'import os\n'), ((79283, 79324), 'os.makedirs', 'os.makedirs', (['out_intbl_dir'], {'exist_ok': '(True)'}), '(out_intbl_dir, exist_ok=True)\n', (79294, 79324), False, 'import os\n'), ((79452, 79481), 'os.path.exists', 'os.path.exists', (['out_intbl_dir'], {}), '(out_intbl_dir)\n', (79466, 79481), False, 'import os\n'), ((79729, 79752), 'os.listdir', 'os.listdir', (['intblfolder'], {}), '(intblfolder)\n', (79739, 79752), False, 'import os\n'), ((81000, 81031), 'os.path.exists', 'os.path.exists', (['discharges_path'], {}), '(discharges_path)\n', (81014, 81031), False, 'import os\n'), ((82003, 82040), 'os.path.join', 'os.path.join', (['folder', 'outdated_folder'], {}), '(folder, outdated_folder)\n', (82015, 82040), False, 'import os\n'), ((16556, 16575), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (16572, 16575), False, 'import os\n'), ((25176, 25202), 'os.path.basename', 'os.path.basename', (['src_file'], {}), '(src_file)\n', (25192, 25202), False, 'import os\n'), ((32904, 32934), 'os.path.join', 'os.path.join', (['folder', 'temp_dir'], {}), '(folder, temp_dir)\n', (32916, 32934), False, 'import os\n'), ((33809, 33853), 'os.path.join', 'os.path.join', (['modelbuilder_folder', 'temp_file'], {}), '(modelbuilder_folder, temp_file)\n', (33821, 33853), False, 'import os\n'), ((37606, 37635), 'pyproj.Proj', 'pyproj.Proj', ([], {'init': '"""epsg:4326"""'}), "(init='epsg:4326')\n", (37617, 37635), False, 'import pyproj\n'), ((37637, 37666), 'pyproj.Proj', 'pyproj.Proj', ([], {'init': '"""epsg:3857"""'}), "(init='epsg:3857')\n", (37648, 37666), False, 'import pyproj\n'), ((45755, 45802), 'os.path.join', 'os.path.join', (['out_clone_dir', "(temp_file + '.map')"], {}), "(out_clone_dir, temp_file + '.map')\n", (45767, 45802), False, 'import os\n'), ((46841, 46888), 'os.path.join', 'os.path.join', (['out_intbl_dir', "(temp_file + '.tbl')"], {}), "(out_intbl_dir, temp_file + '.tbl')\n", (46853, 46888), False, 'import os\n'), ((48094, 48109), 'shapely.geometry.Point', 'Point', (['lon', 'lat'], {}), '(lon, lat)\n', (48099, 48109), False, 'from shapely.geometry import Point\n'), ((48775, 48806), 'numpy.mod', 'np.mod', (['data_masked.shape[0]', '(2)'], {}), '(data_masked.shape[0], 2)\n', (48781, 48806), True, 'import numpy as np\n'), ((48923, 48957), 'numpy.sqrt', 'np.sqrt', (['(x_dist ** 2 + y_dist ** 2)'], {}), '(x_dist ** 2 + y_dist ** 2)\n', (48930, 48957), True, 'import numpy as np\n'), ((49109, 49146), 'numpy.ma.masked_where', 'np.ma.masked_where', (['mask_final', 'dists'], {}), '(mask_final, dists)\n', (49127, 49146), True, 'import numpy as np\n'), ((57417, 57447), 'os.path.split', 'os.path.split', (['catchments_path'], {}), '(catchments_path)\n', (57430, 57447), False, 'import os\n'), ((57477, 57507), 'os.path.split', 'os.path.split', (['catchments_path'], {}), '(catchments_path)\n', (57490, 57507), False, 'import os\n'), ((59581, 59608), 'os.path.exists', 'os.path.exists', (['rivers_path'], {}), '(rivers_path)\n', (59595, 59608), False, 'import os\n'), ((59698, 59708), 'sys.exit', 'sys.exit', ([], {}), '()\n', (59706, 59708), False, 'import sys\n'), ((67001, 67043), 'os.path.join', 'os.path.join', (['folder', '"""catchments.geojson"""'], {}), "(folder, 'catchments.geojson')\n", (67013, 67043), False, 'import os\n'), ((67088, 67130), 'os.path.join', 'os.path.join', (['folder', '"""catchments.geojson"""'], {}), "(folder, 'catchments.geojson')\n", (67100, 67130), False, 'import os\n'), ((70723, 70778), 'os.path.join', 'os.path.join', (['setup_path', '"""catchments_original.geojson"""'], {}), "(setup_path, 'catchments_original.geojson')\n", (70735, 70778), False, 'import os\n'), ((71021, 71070), 'os.path.join', 'os.path.join', (['setup_path', '"""catchments_v2.geojson"""'], {}), "(setup_path, 'catchments_v2.geojson')\n", (71033, 71070), False, 'import os\n'), ((73516, 73567), 'pandas.read_excel', 'pd.read_excel', (['path_grdc_stations'], {'na_values': '"""n.a."""'}), "(path_grdc_stations, na_values='n.a.')\n", (73529, 73567), True, 'import pandas as pd\n'), ((77108, 77153), 'os.path.join', 'os.path.join', (['script_root', "(model_ini + '.ini')"], {}), "(script_root, model_ini + '.ini')\n", (77120, 77153), False, 'import os\n'), ((77327, 77357), 'os.path.exists', 'os.path.exists', (['model_ini_path'], {}), '(model_ini_path)\n', (77341, 77357), False, 'import os\n'), ((77429, 77475), 'shutil.copy', 'shutil.copy', (['template_ini_path', 'model_ini_path'], {}), '(template_ini_path, model_ini_path)\n', (77440, 77475), False, 'import shutil\n'), ((79520, 79545), 'os.listdir', 'os.listdir', (['out_intbl_dir'], {}), '(out_intbl_dir)\n', (79530, 79545), False, 'import os\n'), ((81071, 81182), 'catchment_FLO1K.getAnnualDischarge', 'flo1k.getAnnualDischarge', (['discharges_path', 'catchment', 'debug_discharge'], {'debug_path': 'setup_path', 'logger': 'logger'}), '(discharges_path, catchment, debug_discharge,\n debug_path=setup_path, logger=logger)\n', (81095, 81182), True, 'import catchment_FLO1K as flo1k\n'), ((82060, 82089), 'os.path.exists', 'os.path.exists', (['outdated_path'], {}), '(outdated_path)\n', (82074, 82089), False, 'import os\n'), ((82094, 82122), 'os.path.isdir', 'os.path.isdir', (['outdated_path'], {}), '(outdated_path)\n', (82107, 82122), False, 'import os\n'), ((2718, 2733), 'numpy.sin', 'np.sin', (['lat_rad'], {}), '(lat_rad)\n', (2724, 2733), True, 'import numpy as np\n'), ((13631, 13673), 'numpy.where', 'np.where', (["(KsatVer == src_profile['nodata'])"], {}), "(KsatVer == src_profile['nodata'])\n", (13639, 13673), True, 'import numpy as np\n'), ((25483, 25520), 'os.path.join', 'os.path.join', (['script_root', 'src_script'], {}), '(script_root, src_script)\n', (25495, 25520), False, 'import os\n'), ((57956, 57991), 'os.path.join', 'os.path.join', (['setup_path', 'temp_file'], {}), '(setup_path, temp_file)\n', (57968, 57991), False, 'import os\n'), ((62055, 62117), 'os.path.join', 'os.path.join', (['directory_topo', 'f"""pfaf{pfaf_id:02d}_outlets.csv"""'], {}), "(directory_topo, f'pfaf{pfaf_id:02d}_outlets.csv')\n", (62067, 62117), False, 'import os\n'), ((62163, 62205), 'pandas.read_csv', 'pd.read_csv', (['fn_outlets'], {'index_col': '"""pfaf3"""'}), "(fn_outlets, index_col='pfaf3')\n", (62174, 62205), True, 'import pandas as pd\n'), ((67318, 67350), 'os.path.join', 'os.path.join', (['folder', '"""pyflwdir"""'], {}), "(folder, 'pyflwdir')\n", (67330, 67350), False, 'import os\n'), ((68042, 68075), 'os.path.join', 'os.path.join', (['folder', 'file_or_dir'], {}), '(folder, file_or_dir)\n', (68054, 68075), False, 'import os\n'), ((70177, 70220), 'os.path.join', 'os.path.join', (['parameters_path', '"""riverwidth"""'], {}), "(parameters_path, 'riverwidth')\n", (70189, 70220), False, 'import os\n'), ((71900, 71924), 'shapely.geometry.MultiPolygon', 'MultiPolygon', (['temp_polys'], {}), '(temp_polys)\n', (71912, 71924), False, 'from shapely.geometry import MultiPolygon\n'), ((73165, 73212), 'os.path.join', 'os.path.join', (['out_clone_dir', '"""wflow_uparea.map"""'], {}), "(out_clone_dir, 'wflow_uparea.map')\n", (73177, 73212), False, 'import os\n'), ((76105, 76228), 'rasterio.features.rasterize', 'rasterio.features.rasterize', ([], {'shapes': 'shapes', 'fill': '(0)', 'out': 'out_arr', 'transform': "uparea_meta['transform']", 'all_touched': '(False)'}), "(shapes=shapes, fill=0, out=out_arr, transform=\n uparea_meta['transform'], all_touched=False)\n", (76132, 76228), False, 'import rasterio\n'), ((77383, 77408), 'os.remove', 'os.remove', (['model_ini_path'], {}), '(model_ini_path)\n', (77392, 77408), False, 'import os\n'), ((80001, 80037), 'os.path.join', 'os.path.join', (['intblfolder', 'temp_file'], {}), '(intblfolder, temp_file)\n', (80013, 80037), False, 'import os\n'), ((80039, 80077), 'os.path.join', 'os.path.join', (['out_intbl_dir', 'temp_file'], {}), '(out_intbl_dir, temp_file)\n', (80051, 80077), False, 'import os\n'), ((82255, 82280), 'os.listdir', 'os.listdir', (['outdated_path'], {}), '(outdated_path)\n', (82265, 82280), False, 'import os\n'), ((84552, 84566), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (84564, 84566), False, 'from datetime import datetime\n'), ((37747, 37771), 'shapely.ops.transform', 'transform', (['proj', 'polygon'], {}), '(proj, polygon)\n', (37756, 37771), False, 'from shapely.ops import transform\n'), ((62796, 62838), 'os.path.join', 'os.path.join', (['folder', '"""data"""', '"""catchments"""'], {}), "(folder, 'data', 'catchments')\n", (62808, 62838), False, 'import os\n'), ((62852, 62880), 'os.path.join', 'os.path.join', (['folder', '"""*.xy"""'], {}), "(folder, '*.xy')\n", (62864, 62880), False, 'import os\n'), ((63510, 63622), 'merit.merit_model_data.get_merit_basin_bbox', 'get_merit_basin_bbox', (['xy', 'directory_topo'], {'upstream_from_point': 'upstream_from_point', 'min_sto': 'min_stream_order'}), '(xy, directory_topo, upstream_from_point=\n upstream_from_point, min_sto=min_stream_order)\n', (63530, 63622), False, 'from merit.merit_model_data import get_merit_basin_bbox, upscale_merit_basin, network_merit_basin, resample_merit_basin\n'), ((67656, 67688), 'os.path.join', 'os.path.join', (['folder', '"""pyflwdir"""'], {}), "(folder, 'pyflwdir')\n", (67668, 67688), False, 'import os\n'), ((67748, 67780), 'os.path.join', 'os.path.join', (['folder', '"""pyflwdir"""'], {}), "(folder, 'pyflwdir')\n", (67760, 67780), False, 'import os\n'), ((78097, 78145), 'os.path.join', 'os.path.join', (['modelbuilder_path', '"""settings.json"""'], {}), "(modelbuilder_path, 'settings.json')\n", (78109, 78145), False, 'import os\n'), ((78183, 78210), 'os.path.join', 'os.path.join', (['"""data"""', '"""dem"""'], {}), "('data', 'dem')\n", (78195, 78210), False, 'import os\n'), ((79581, 79619), 'os.path.join', 'os.path.join', (['out_intbl_dir', 'temp_file'], {}), '(out_intbl_dir, temp_file)\n', (79593, 79619), False, 'import os\n'), ((13826, 13841), 'numpy.log', 'np.log', (['KsatVer'], {}), '(KsatVer)\n', (13832, 13841), True, 'import numpy as np\n'), ((63079, 63121), 'os.path.join', 'os.path.join', (['folder', '"""data"""', '"""catchments"""'], {}), "(folder, 'data', 'catchments')\n", (63091, 63121), False, 'import os\n'), ((73965, 73976), 'shapely.geometry.Point', 'Point', (['x', 'y'], {}), '(x, y)\n', (73970, 73976), False, 'from shapely.geometry import Point\n'), ((75767, 75816), 'os.path.join', 'os.path.join', (['out_clone_dir', '"""wflow_subcatch.map"""'], {}), "(out_clone_dir, 'wflow_subcatch.map')\n", (75779, 75816), False, 'import os\n'), ((82320, 82362), 'os.path.join', 'os.path.join', (['outdated_path', 'outdated_file'], {}), '(outdated_path, outdated_file)\n', (82332, 82362), False, 'import os\n'), ((61098, 61145), 'os.path.join', 'os.path.join', (['folder', '"""data"""', '"""catchments"""', '"""*"""'], {}), "(folder, 'data', 'catchments', '*')\n", (61110, 61145), False, 'import os\n'), ((61159, 61192), 'os.path.join', 'os.path.join', (['folder', '"""*.basinid"""'], {}), "(folder, '*.basinid')\n", (61171, 61192), False, 'import os\n'), ((68567, 68600), 'os.path.join', 'os.path.join', (['folder', 'file_or_dir'], {}), '(folder, file_or_dir)\n', (68579, 68600), False, 'import os\n'), ((69145, 69178), 'os.path.join', 'os.path.join', (['folder', 'file_or_dir'], {}), '(folder, file_or_dir)\n', (69157, 69178), False, 'import os\n'), ((69720, 69753), 'os.path.join', 'os.path.join', (['folder', 'file_or_dir'], {}), '(folder, file_or_dir)\n', (69732, 69753), False, 'import os\n'), ((71632, 71652), 'shapely.geometry.Polygon', 'Polygon', (['temp_coords'], {}), '(temp_coords)\n', (71639, 71652), False, 'from shapely.geometry import Polygon\n'), ((68286, 68319), 'os.path.join', 'os.path.join', (['folder', 'file_or_dir'], {}), '(folder, file_or_dir)\n', (68298, 68319), False, 'import os\n'), ((68321, 68366), 'os.path.join', 'os.path.join', (['folder', '"""pyflwdir"""', 'file_or_dir'], {}), "(folder, 'pyflwdir', file_or_dir)\n", (68333, 68366), False, 'import os\n'), ((68953, 68998), 'os.path.join', 'os.path.join', (['folder', '"""pyflwdir"""', 'file_or_dir'], {}), "(folder, 'pyflwdir', file_or_dir)\n", (68965, 68998), False, 'import os\n'), ((69235, 69283), 'os.path.join', 'os.path.join', (['folder', 'file_or_dir', 'file_or_dir_2'], {}), '(folder, file_or_dir, file_or_dir_2)\n', (69247, 69283), False, 'import os\n'), ((74158, 74214), 'geopandas.tools.sjoin', 'gpd.tools.sjoin', (['gauging_stations', 'catchment'], {'how': '"""left"""'}), "(gauging_stations, catchment, how='left')\n", (74173, 74214), True, 'import geopandas as gpd\n'), ((61299, 61326), 'os.path.basename', 'os.path.basename', (['temp_file'], {}), '(temp_file)\n', (61315, 61326), False, 'import os\n'), ((69387, 69435), 'os.path.join', 'os.path.join', (['folder', 'file_or_dir', 'file_or_dir_2'], {}), '(folder, file_or_dir, file_or_dir_2)\n', (69399, 69435), False, 'import os\n'), ((69437, 69497), 'os.path.join', 'os.path.join', (['folder', '"""pyflwdir"""', 'file_or_dir', 'file_or_dir_2'], {}), "(folder, 'pyflwdir', file_or_dir, file_or_dir_2)\n", (69449, 69497), False, 'import os\n')] |
from hypergan.samplers.base_sampler import BaseSampler
import numpy as np
import tensorflow as tf
class GridSampler(BaseSampler):
def __init__(self, gan, samples_per_row=8):
BaseSampler.__init__(self, gan, samples_per_row)
self.x = gan.session.run(gan.inputs.x)
batch = self.x.shape[0]
self.x = np.reshape(self.x[0], [1, self.x.shape[1], self.x.shape[2], self.x.shape[3]])
self.x = np.tile(self.x, [batch,1,1,1])
def _sample(self):
gan = self.gan
z_t = gan.latent.z
#This isn't doing any gridlike stuff. Need to feed this into feed dict(also check size)
y = np.linspace(0,1, 6)
z = np.mgrid[-0.999:0.999:0.6, -0.999:0.999:0.26].reshape(2,-1).T
z = np.reshape(z, [32,2])
#z = np.mgrid[-0.499:0.499:0.3, -0.499:0.499:0.13].reshape(2,-1).T
#z = np.mgrid[-0.299:0.299:0.15, -0.299:0.299:0.075].reshape(2,-1).T
needed = 32 / gan.batch_size()
gs = []
for i in range(int(needed)):
zi = z[i*gan.batch_size():(i+1)*gan.batch_size()]
g = gan.session.run(gan.generator.sample, feed_dict={z_t: zi, gan.inputs.x: self.x})
gs.append(g)
g = np.hstack(gs)
xshape = gan.ops.shape(gan.inputs.x)
g = np.reshape(gs, [4, 8, xshape[1], xshape[2], xshape[3]])
g = np.concatenate(g, axis=1)
g = np.concatenate(g, axis=1)
g = np.expand_dims(g, axis=0)
x_hat = gan.session.run(gan.autoencoded_x, feed_dict={gan.inputs.x: self.x})
#e = gan.session.run(gan.encoder.sample, feed_dict={gan.inputs.x: g})
return {
'generator':g
}
| [
"numpy.tile",
"numpy.reshape",
"numpy.hstack",
"hypergan.samplers.base_sampler.BaseSampler.__init__",
"numpy.linspace",
"numpy.concatenate",
"numpy.expand_dims"
] | [((188, 236), 'hypergan.samplers.base_sampler.BaseSampler.__init__', 'BaseSampler.__init__', (['self', 'gan', 'samples_per_row'], {}), '(self, gan, samples_per_row)\n', (208, 236), False, 'from hypergan.samplers.base_sampler import BaseSampler\n'), ((333, 410), 'numpy.reshape', 'np.reshape', (['self.x[0]', '[1, self.x.shape[1], self.x.shape[2], self.x.shape[3]]'], {}), '(self.x[0], [1, self.x.shape[1], self.x.shape[2], self.x.shape[3]])\n', (343, 410), True, 'import numpy as np\n'), ((428, 461), 'numpy.tile', 'np.tile', (['self.x', '[batch, 1, 1, 1]'], {}), '(self.x, [batch, 1, 1, 1])\n', (435, 461), True, 'import numpy as np\n'), ((642, 662), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(6)'], {}), '(0, 1, 6)\n', (653, 662), True, 'import numpy as np\n'), ((749, 771), 'numpy.reshape', 'np.reshape', (['z', '[32, 2]'], {}), '(z, [32, 2])\n', (759, 771), True, 'import numpy as np\n'), ((1211, 1224), 'numpy.hstack', 'np.hstack', (['gs'], {}), '(gs)\n', (1220, 1224), True, 'import numpy as np\n'), ((1282, 1337), 'numpy.reshape', 'np.reshape', (['gs', '[4, 8, xshape[1], xshape[2], xshape[3]]'], {}), '(gs, [4, 8, xshape[1], xshape[2], xshape[3]])\n', (1292, 1337), True, 'import numpy as np\n'), ((1350, 1375), 'numpy.concatenate', 'np.concatenate', (['g'], {'axis': '(1)'}), '(g, axis=1)\n', (1364, 1375), True, 'import numpy as np\n'), ((1388, 1413), 'numpy.concatenate', 'np.concatenate', (['g'], {'axis': '(1)'}), '(g, axis=1)\n', (1402, 1413), True, 'import numpy as np\n'), ((1426, 1451), 'numpy.expand_dims', 'np.expand_dims', (['g'], {'axis': '(0)'}), '(g, axis=0)\n', (1440, 1451), True, 'import numpy as np\n')] |
import numpy
from chainer import functions
from chainer import testing
@testing.parameterize(*testing.product_dict(
[
{'shape': (), 'pad_width': 1, 'mode': 'constant'},
{'shape': (2, 3), 'pad_width': 0, 'mode': 'constant'},
{'shape': (2, 3), 'pad_width': 1, 'mode': 'constant'},
{'shape': (2, 3), 'pad_width': (1, 2), 'mode': 'constant'},
{'shape': (2, 3), 'pad_width': ((1, 2), (3, 4)), 'mode': 'constant'},
{'shape': (2, 3, 2), 'pad_width': ((2, 5), (1, 2), (0, 7)),
'mode': 'constant'},
{'shape': (1, 3, 5, 2), 'pad_width': 2, 'mode': 'constant'}
],
[
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64}
]
))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestPadDefault(testing.FunctionTestCase):
def setUp(self):
self.check_backward_options = {}
if self.dtype == numpy.float16:
self.check_backward_options.update({'atol': 3e-2, 'rtol': 3e-2})
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
return x,
def forward(self, inputs, device):
x, = inputs
y = functions.pad(x, self.pad_width, self.mode)
return y,
def forward_expected(self, inputs):
x, = inputs
y_expected = numpy.pad(x, self.pad_width, self.mode)
return y_expected.astype(self.dtype),
@testing.parameterize(*testing.product_dict(
[
{'shape': (2, 3), 'pad_width': 1, 'mode': 'constant',
'constant_values': 1},
{'shape': (2, 3), 'pad_width': (1, 2), 'mode': 'constant',
'constant_values': (1, 2)},
{'shape': (2, 3), 'pad_width': ((1, 2), (3, 4)), 'mode': 'constant',
'constant_values': ((1, 2), (3, 4))},
],
[
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64}
]
))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
# Old numpy does not work with multi-dimensional constant_values
@testing.with_requires('numpy>=1.11.1')
class TestPad(testing.FunctionTestCase):
def setUp(self):
self.check_backward_options = {}
if self.dtype == numpy.float16:
self.check_backward_options.update({'atol': 3e-2, 'rtol': 3e-2})
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
return x,
def forward_expected(self, inputs):
x, = inputs
y_expected = numpy.pad(x, self.pad_width, mode=self.mode,
constant_values=self.constant_values)
return y_expected,
def forward(self, inputs, device):
x, = inputs
y = functions.pad(x, self.pad_width, mode=self.mode,
constant_values=self.constant_values)
return y,
testing.run_module(__name__, __file__)
| [
"chainer.functions.pad",
"chainer.testing.run_module",
"chainer.testing.product_dict",
"chainer.testing.product",
"chainer.testing.with_requires",
"numpy.random.uniform",
"numpy.pad"
] | [((2712, 2750), 'chainer.testing.with_requires', 'testing.with_requires', (['"""numpy>=1.11.1"""'], {}), "('numpy>=1.11.1')\n", (2733, 2750), False, 'from chainer import testing\n'), ((3521, 3559), 'chainer.testing.run_module', 'testing.run_module', (['__name__', '__file__'], {}), '(__name__, __file__)\n', (3539, 3559), False, 'from chainer import testing\n'), ((1542, 1585), 'chainer.functions.pad', 'functions.pad', (['x', 'self.pad_width', 'self.mode'], {}), '(x, self.pad_width, self.mode)\n', (1555, 1585), False, 'from chainer import functions\n'), ((1686, 1725), 'numpy.pad', 'numpy.pad', (['x', 'self.pad_width', 'self.mode'], {}), '(x, self.pad_width, self.mode)\n', (1695, 1725), False, 'import numpy\n'), ((97, 665), 'chainer.testing.product_dict', 'testing.product_dict', (["[{'shape': (), 'pad_width': 1, 'mode': 'constant'}, {'shape': (2, 3),\n 'pad_width': 0, 'mode': 'constant'}, {'shape': (2, 3), 'pad_width': 1,\n 'mode': 'constant'}, {'shape': (2, 3), 'pad_width': (1, 2), 'mode':\n 'constant'}, {'shape': (2, 3), 'pad_width': ((1, 2), (3, 4)), 'mode':\n 'constant'}, {'shape': (2, 3, 2), 'pad_width': ((2, 5), (1, 2), (0, 7)),\n 'mode': 'constant'}, {'shape': (1, 3, 5, 2), 'pad_width': 2, 'mode':\n 'constant'}]", "[{'dtype': numpy.float16}, {'dtype': numpy.float32}, {'dtype': numpy.float64}]"], {}), "([{'shape': (), 'pad_width': 1, 'mode': 'constant'}, {\n 'shape': (2, 3), 'pad_width': 0, 'mode': 'constant'}, {'shape': (2, 3),\n 'pad_width': 1, 'mode': 'constant'}, {'shape': (2, 3), 'pad_width': (1,\n 2), 'mode': 'constant'}, {'shape': (2, 3), 'pad_width': ((1, 2), (3, 4)\n ), 'mode': 'constant'}, {'shape': (2, 3, 2), 'pad_width': ((2, 5), (1, \n 2), (0, 7)), 'mode': 'constant'}, {'shape': (1, 3, 5, 2), 'pad_width': \n 2, 'mode': 'constant'}], [{'dtype': numpy.float16}, {'dtype': numpy.\n float32}, {'dtype': numpy.float64}])\n", (117, 665), False, 'from chainer import testing\n'), ((1001, 1099), 'chainer.testing.product', 'testing.product', (["{'use_chainerx': [True], 'chainerx_device': ['native:0', 'cuda:0', 'cuda:1']}"], {}), "({'use_chainerx': [True], 'chainerx_device': ['native:0',\n 'cuda:0', 'cuda:1']})\n", (1016, 1099), False, 'from chainer import testing\n'), ((3175, 3262), 'numpy.pad', 'numpy.pad', (['x', 'self.pad_width'], {'mode': 'self.mode', 'constant_values': 'self.constant_values'}), '(x, self.pad_width, mode=self.mode, constant_values=self.\n constant_values)\n', (3184, 3262), False, 'import numpy\n'), ((3388, 3479), 'chainer.functions.pad', 'functions.pad', (['x', 'self.pad_width'], {'mode': 'self.mode', 'constant_values': 'self.constant_values'}), '(x, self.pad_width, mode=self.mode, constant_values=self.\n constant_values)\n', (3401, 3479), False, 'from chainer import functions\n'), ((1797, 2191), 'chainer.testing.product_dict', 'testing.product_dict', (["[{'shape': (2, 3), 'pad_width': 1, 'mode': 'constant', 'constant_values': 1\n }, {'shape': (2, 3), 'pad_width': (1, 2), 'mode': 'constant',\n 'constant_values': (1, 2)}, {'shape': (2, 3), 'pad_width': ((1, 2), (3,\n 4)), 'mode': 'constant', 'constant_values': ((1, 2), (3, 4))}]", "[{'dtype': numpy.float16}, {'dtype': numpy.float32}, {'dtype': numpy.float64}]"], {}), "([{'shape': (2, 3), 'pad_width': 1, 'mode': 'constant',\n 'constant_values': 1}, {'shape': (2, 3), 'pad_width': (1, 2), 'mode':\n 'constant', 'constant_values': (1, 2)}, {'shape': (2, 3), 'pad_width':\n ((1, 2), (3, 4)), 'mode': 'constant', 'constant_values': ((1, 2), (3, 4\n ))}], [{'dtype': numpy.float16}, {'dtype': numpy.float32}, {'dtype':\n numpy.float64}])\n", (1817, 2191), False, 'from chainer import testing\n'), ((2526, 2624), 'chainer.testing.product', 'testing.product', (["{'use_chainerx': [True], 'chainerx_device': ['native:0', 'cuda:0', 'cuda:1']}"], {}), "({'use_chainerx': [True], 'chainerx_device': ['native:0',\n 'cuda:0', 'cuda:1']})\n", (2541, 2624), False, 'from chainer import testing\n'), ((848, 946), 'chainer.testing.product', 'testing.product', (["{'use_cuda': [True], 'use_cudnn': ['never', 'always'], 'cuda_device': [0, 1]}"], {}), "({'use_cuda': [True], 'use_cudnn': ['never', 'always'],\n 'cuda_device': [0, 1]})\n", (863, 946), False, 'from chainer import testing\n'), ((2373, 2471), 'chainer.testing.product', 'testing.product', (["{'use_cuda': [True], 'use_cudnn': ['never', 'always'], 'cuda_device': [0, 1]}"], {}), "({'use_cuda': [True], 'use_cudnn': ['never', 'always'],\n 'cuda_device': [0, 1]})\n", (2388, 2471), False, 'from chainer import testing\n'), ((1393, 1432), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', 'self.shape'], {}), '(-1, 1, self.shape)\n', (1413, 1432), False, 'import numpy\n'), ((3016, 3055), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', 'self.shape'], {}), '(-1, 1, self.shape)\n', (3036, 3055), False, 'import numpy\n')] |
import param
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
# from gym_film.utils.convert_reward import to_single_reward
# from matplotlib.patches import Patch
# Uses the following methods/attributes from env:
# - O, R (observation and reward)
# - jets_power
# - system_state
# - reward, t
matplotlib.rcParams.update({'font.size': 15})
control = param.show_control
NO_LEGEND = True
show = 1
save = 0
class FilmRender():
def __init__(self, env, PLOT_JETS=True):
self.PLOT_JETS = PLOT_JETS
self.env = env
self.Ob = self.env.Ob
self.R = self.env.R
self.blit = False
self.setup_plot()
def setup_h_plot(self, plot_regions=False):
# Plot h
self.hlines, = self.hax.plot(np.linspace(
0, param.L-param.dx, param.NUM), self.env.system_state[0],
label="y = h(x)", linewidth=2.5)
self.qlines, = self.hax.plot(np.linspace(
0, param.L, param.NUM), self.env.system_state[1], alpha=0.5,
label="y = q(x)", linestyle='--', linewidth=2.5)
# Add lims on axes
self.hax.set_xlim(param.start_h_plot, param.L)
self.hax.set_ylim(param.hq_base_value-param.max_h,
param.hq_base_value+param.max_h)
# self.hax.grid()
if self.PLOT_JETS:
# Plot jets
self.setup_jet(self.hax)
# # legend
# legend = self.hax.legend(["y = h(x)", "y = q(x)", "jets position",
# "observation space", "reward space", "jets power"],
# loc='lower left', ncol=2)
handles, labels = self.hax.get_legend_handles_labels()
# sort both labels and handles by labels
order = [0, 1, 4, 2, 3, 5]
self.hax.legend([handles[idx] for idx in order], [labels[idx]
for idx in order],
loc="lower left", ncol=2)
# ax = legend.axes
# handles, labels = ax.get_legend_handles_labels()
# # obs label
# handles.append(Patch(facecolor='orange', edgecolor='r'))
# labels.append("observation domain")
# # reward label
# handles.append(Patch(facecolor='orange', edgecolor='r'))
# labels.append("reward domain")
# legend._legend_box = None
# legend._init_legend_box(handles, labels)
# legend._set_loc(legend._loc)
# legend.set_title(legend.get_title().get_text())
else:
# legend
self.hax.legend(["y = h(x)", "y = q(x)"], loc='lower left')
if NO_LEGEND:
self.hax.get_legend().remove()
# self.text = self.hax.text(1.1, 0.1, 't = '+str(int(round(float(self.env.t)))))
self.text = self.hax.text(1.1, 0.1, 't = '+str(int(round(float(self.env.current_step*param.dt)))))
if plot_regions:
self.plot_regions(self.hax)
# adding x and y labels
no_x_label = False
if not no_x_label:
self.hax.set_xlabel('x')
no_y_label = False
if not no_y_label:
self.hax_ylabel = self.hax.set_ylabel('h, q', labelpad=5)
# changing color of ticks
self.hax.tick_params(colors='black')
no_ticks_x = False
if no_ticks_x:
# removing ticks
plt.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False) # labels along the bottom edge are off
no_ticks_y = False
if no_ticks_y:
# removing ticks
plt.tick_params(
axis='y', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False,
labelbottom=False) # labels along the bottom edge are off
self.hax.set_yticklabels([])
def plot_regions(self, ax):
x1 = 160
ax.axvspan(0, x1, facecolor='blue', alpha=0.1)
ax.axvline(x=x1, ymin=0.0, ymax=1.0, color='k',
linestyle='--', alpha=0.3)
x2 = 270
ax.axvspan(x1, x2, facecolor='green', alpha=0.1)
ax.axvline(x=x2, ymin=0.0, ymax=1.0, color='k',
linestyle='--', alpha=0.3)
ax.axvspan(x2, 340, facecolor='red', alpha=0.1)
textstr1 = "Exponential instability growth region"
textstr2 = "Pseudo-periodic region"
textstr3 = "Fully-developped\nchaotic region"
# these are matplotlib.patch.Patch properties
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
ax.text(0.09, 0.95, textstr1, transform=ax.transAxes,
verticalalignment='top', bbox=props)
ax.text(0.54, 0.95, textstr2, transform=ax.transAxes,
verticalalignment='top', bbox=props)
ax.text(0.83, 0.95, textstr3, transform=ax.transAxes,
verticalalignment='top', bbox=props)
def setup_jet(self, ax):
# self.jet_plot = ax.scatter(param.jets_position*param.dx, np.zeros(len(param.jets_position)), s=100*self.env.jets_power)
self.jet_spots = ax.scatter(
param.jets_position*param.dx,
[(0.9 - 0.095*(k)) for k in range(param.n_jets)],
label='jets position', s=30)
plt.plot([], [], label="observation domain", color="green")
plt.plot([], [], label="reward domain", color="red")
self.jet_rect = ax.bar(param.jets_position*param.dx,
self.env.jets_power, param.JET_WIDTH*param.dx, label="jets power", bottom=1)
# show the zone where the control is done as well
x_control_spots = np.array([self.Ob.obs_points+param.jets_position[i]
for i in range(param.n_jets)]).flatten()*param.dx
y_control_spots = np.concatenate(np.array(
[(np.zeros(len(self.Ob.obs_points)) + 0.9 - 0.095*(k)) for k in range(param.n_jets)]))
self.control_spots = ax.scatter(
x_control_spots, y_control_spots, s=1)
# shoz the zone where the reward is calculated
x_reward_spots = np.array([self.R.obs_points_to_reward+param.jets_position[i]
for i in range(param.n_jets)]).flatten()*param.dx
y_reward_spots = np.concatenate(np.array([(np.zeros(len(
self.R.obs_points_to_reward)) + 0.89 - 0.095*(k)) for k in range(param.n_jets)]))
self.reward_spots = ax.scatter(
x_reward_spots, y_reward_spots, s=1)
def update_h_plot(self):
self.hlines.set_ydata(self.env.system_state[0])
self.qlines.set_ydata(self.env.system_state[1])
def update_plot_jet(self, ax):
# self.jet_plot.remove()
# self.jet_plot = ax.scatter(param.jets_position * param.dx, np.zeros(len(param.jets_position)), s=100 * self.env.jets_power)
if self.render_plot:
for i in range(param.n_jets):
self.jet_rect[i].set_height(self.env.jets_power[i])
def setup_control_plot(self):
# Plot control/h as a function of time
self.control_ax.set_ylim(-1.5, 1.5)
self.control_ax.set_xlim(0, param.MAX_TIMEFRAME_CONTROL_PLOT-1)
self.x_t = np.arange(0, param.MAX_TIMEFRAME_CONTROL_PLOT)
self.y_sensor = [0 for i in range(param.MAX_TIMEFRAME_CONTROL_PLOT)]
self.y_control = [0 for i in range(param.MAX_TIMEFRAME_CONTROL_PLOT)]
self.y_reward = [0 for i in range(param.MAX_TIMEFRAME_CONTROL_PLOT)]
self.sensor_lines, = self.control_ax.plot(self.x_t, self.y_sensor)
self.control_lines, = self.control_ax.plot(self.x_t, self.y_control)
self.reward_lines, = self.control_ax.plot(self.x_t, self.y_reward)
# legend
self.control_ax.set_title(
"Some values at x=jets_position[0] as a function of time")
self.control_ax.legend(["y(t) = {}*h(x_jet, t)".format(param.obs_at_jet_render_param),
"jet power (proportion of max jet power)", "{} * reward".format(param.reward_multiplier_render)], loc='lower left')
def update_control_plot(self):
self.y_sensor.append(param.obs_at_jet_render_param*(
self.env.system_state[0, param.jets_position[0]]-param.hq_base_value))
self.y_sensor.pop(0)
self.y_control.append(self.env.jets_power[0])
self.y_control.pop(0)
self.y_reward.append(param.reward_multiplier_render *
self.reward_process(self.env.reward))
self.y_reward.pop(0)
if self.render_plot:
self.control_lines.set_ydata(self.y_control)
self.sensor_lines.set_ydata(self.y_sensor)
self.reward_lines.set_ydata(self.y_reward)
# self.control_ax.set_xlim(max(0, self.env.current_step-max_timeframe), self.env.current_step)
# setup everything - calls setup_jets and everything
def reward_process(self, reward):
if type(reward) is dict:
return np.mean([reward.get(
jet_position) for jet_position in sorted(reward.keys())])
return reward
def setup_plot(self):
standard_size = {'width': 10, 'height': 3}
divide_by = 1
self.figure = plt.figure(figsize=(standard_size.get(
'width')/divide_by, standard_size.get('height')/divide_by))
self.hax = self.figure.add_subplot(1, 1, 1)
# self.figure.subplots_adjust(wspace=0.2)
if control:
self.control_ax = self.figure.add_subplot(2, 1, 2)
self.figure.subplots_adjust(hspace=1)
# Plot h
self.setup_h_plot()
# Plot control
# self.setup_control_plot()
if self.blit:
# cache the background
self.haxbackground = self.figure.canvas.copy_from_bbox(
self.hax.bbox)
self.control_axbackground = self.figure.canvas.copy_from_bbox(
self.control_ax.bbox)
self.figure.canvas.draw()
if show:
plt.show(block=False)
self.counter = 0
# self.hax.set_title('t = {} \n global_reward = {}'.format(
# self.env.t, (to_single_reward(list(self.env.reward.values())) if param.method == '1env_1jet' else self.env.reward)))
self.save = save
if self.save:
self.save_fig()
def save_fig(self):
if self.counter == 0 or self.counter % param.SAVE_PERIOD == 0:
plt.tight_layout()
plt.savefig('fig'+str(self.counter)+str(id(self.env))[:2]+'.png')
self.counter += 1
# update everything
def update_plot(self):
# self.render_plot = self.render_clock == param.RENDER_PERIOD
self.render_plot = True
# # Update time value
# self.figure.suptitle('t = {} \n Reward : {}'.format(
# self.env.t, self.env.reward), fontsize=16)
# self.hax.set_title('t = {} \n global_reward = {}'.format(
# int(round(self.env.t)), (to_single_reward(list(self.env.reward.values())) if param.method == '1env_1jet' else self.env.reward)))
if self.save:
self.save_fig()
# Update data h
self.update_h_plot()
if self.PLOT_JETS:
# Update jet
self.update_plot_jet(self.hax)
# Update control as a function of time
if control:
self.update_control_plot()
self.text.set_text('t = '+str(int(round(float(self.env.current_step*param.simulation_step_time)))))
if self.render_plot:
if self.blit:
# restore background
self.figure.canvas.restore_region(self.haxbackground)
self.figure.canvas.restore_region(self.control_axbackground)
# redraw just the points
self.hax.draw_artist(self.hlines)
self.hax.draw_artist(self.qlines)
self.control_ax.draw_artist(self.sensor_lines)
self.control_ax.draw_artist(self.control_lines)
self.control_ax.draw_artist(self.reward_lines)
# fill in the axes rectangle
self.figure.canvas.blit(self.hax.bbox)
self.figure.canvas.blit(self.control_ax.bbox)
else:
# We draw here
self.figure.canvas.draw()
self.figure.canvas.flush_events()
self.render_clock = 0
self.render_clock += 1
| [
"matplotlib.rcParams.update",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.plot",
"numpy.linspace",
"matplotlib.pyplot.tight_layout",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((322, 367), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (["{'font.size': 15}"], {}), "({'font.size': 15})\n", (348, 367), False, 'import matplotlib\n'), ((5786, 5845), 'matplotlib.pyplot.plot', 'plt.plot', (['[]', '[]'], {'label': '"""observation domain"""', 'color': '"""green"""'}), "([], [], label='observation domain', color='green')\n", (5794, 5845), True, 'import matplotlib.pyplot as plt\n'), ((5854, 5906), 'matplotlib.pyplot.plot', 'plt.plot', (['[]', '[]'], {'label': '"""reward domain"""', 'color': '"""red"""'}), "([], [], label='reward domain', color='red')\n", (5862, 5906), True, 'import matplotlib.pyplot as plt\n'), ((7719, 7765), 'numpy.arange', 'np.arange', (['(0)', 'param.MAX_TIMEFRAME_CONTROL_PLOT'], {}), '(0, param.MAX_TIMEFRAME_CONTROL_PLOT)\n', (7728, 7765), True, 'import numpy as np\n'), ((771, 816), 'numpy.linspace', 'np.linspace', (['(0)', '(param.L - param.dx)', 'param.NUM'], {}), '(0, param.L - param.dx, param.NUM)\n', (782, 816), True, 'import numpy as np\n'), ((937, 971), 'numpy.linspace', 'np.linspace', (['(0)', 'param.L', 'param.NUM'], {}), '(0, param.L, param.NUM)\n', (948, 971), True, 'import numpy as np\n'), ((3453, 3540), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""x"""', 'which': '"""both"""', 'bottom': '(False)', 'top': '(False)', 'labelbottom': '(False)'}), "(axis='x', which='both', bottom=False, top=False,\n labelbottom=False)\n", (3468, 3540), True, 'import matplotlib.pyplot as plt\n'), ((3922, 4021), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""y"""', 'which': '"""both"""', 'bottom': '(False)', 'top': '(False)', 'left': '(False)', 'labelbottom': '(False)'}), "(axis='y', which='both', bottom=False, top=False, left=False,\n labelbottom=False)\n", (3937, 4021), True, 'import matplotlib.pyplot as plt\n'), ((10516, 10537), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (10524, 10537), True, 'import matplotlib.pyplot as plt\n'), ((10945, 10963), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10961, 10963), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
import matplotlib.pyplot as plt
from time import time
from numba import cuda
N = 640000
def main():
x = np.linspace(0, 1, N, endpoint=True)
from serial import sArray
start = time()
f = sArray(x)
elapsed = time() - start
print("--- Serial timing: %s seconds ---" % elapsed)
from parallel import sArray
start = time()
fpar = sArray(x)
elapsed = time() - start
print("--- 1st parallel timing: %s seconds ---" % elapsed)
start = time()
fpar = sArray(x)
elapsed = time() - start
print("--- 2nd parallel timing: %s seconds ---" % elapsed)
if __name__ == '__main__':
main() | [
"numpy.linspace",
"parallel.sArray",
"time.time"
] | [((126, 161), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {'endpoint': '(True)'}), '(0, 1, N, endpoint=True)\n', (137, 161), True, 'import numpy as np\n'), ((198, 204), 'time.time', 'time', ([], {}), '()\n', (202, 204), False, 'from time import time\n'), ((210, 219), 'parallel.sArray', 'sArray', (['x'], {}), '(x)\n', (216, 219), False, 'from parallel import sArray\n'), ((339, 345), 'time.time', 'time', ([], {}), '()\n', (343, 345), False, 'from time import time\n'), ((354, 363), 'parallel.sArray', 'sArray', (['x'], {}), '(x)\n', (360, 363), False, 'from parallel import sArray\n'), ((459, 465), 'time.time', 'time', ([], {}), '()\n', (463, 465), False, 'from time import time\n'), ((474, 483), 'parallel.sArray', 'sArray', (['x'], {}), '(x)\n', (480, 483), False, 'from parallel import sArray\n'), ((231, 237), 'time.time', 'time', ([], {}), '()\n', (235, 237), False, 'from time import time\n'), ((375, 381), 'time.time', 'time', ([], {}), '()\n', (379, 381), False, 'from time import time\n'), ((495, 501), 'time.time', 'time', ([], {}), '()\n', (499, 501), False, 'from time import time\n')] |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
# Local imports
from jetmontecarlo.tests.simple_tests.test_simpleSampler import *
from jetmontecarlo.montecarlo.integrator import *
from jetmontecarlo.utils.color_utils import *
# Parameters
NUM_SAMPLES = int(1e4)
NUM_BINS = 10
EPSILON = 1e-5
showPlots = True
savePlots = False
def test_weight(x, y, n, m):
weight = (n+1.)*x**n * (m+1.)*y**m
return weight
# ------------------------------------
# Linear Integrators:
# ------------------------------------
def test_Simple2DLinIntegrator_firstbin(plot_2d=False):
# Sampling
testSampler_1 = simpleSampler('lin')
testSampler_1.generateSamples(NUM_SAMPLES)
samples_1 = testSampler_1.getSamples()
testSampler_2 = simpleSampler('lin')
testSampler_2.generateSamples(NUM_SAMPLES)
samples_2 = testSampler_2.getSamples()
# Setting up integrator
testInt = integrator_2d()
testInt.setFirstBinBndCondition(0.)
testInt.setBins(NUM_BINS, [samples_1, samples_2], 'lin')
for n in range(4):
m = 1
# Weights, binned observables, and area
weights = test_weight(samples_1, samples_2, n, m)
jacs = (np.array(testSampler_1.jacobians)
* np.array(testSampler_2.jacobians))
obs = [samples_1, samples_2]
area = testSampler_1.area * testSampler_2.area
testInt.setDensity(obs, weights * jacs, area)
testInt.integrate()
integral = testInt.integral
int_err = testInt.integralErr
xs = testInt.bins[0][1:]
ys = testInt.bins[1][1:]
testInt.makeInterpolatingFn()
interp_mc = testInt.interpFn
integral_interp = interp_mc(xs, ys)
xs, ys = np.meshgrid(xs, ys)
zs = [integral, integral_interp, xs**(n+1) * ys**(m+1)]
zs.append(abs(zs[0] - zs[1]))
zlims = [(0, 1), (0, 1), (0, 1), (0, .1)]
titles = ['Monte Carlo', 'Interpolation',
'Analytic', '|Difference|']
projection = '3d'
figsize = plt.figaspect(0.5)
if plot_2d:
projection = None
figsize = (15, 4)
fig = plt.figure(figsize=figsize)
fig.suptitle('MC Integration to determine '
+ 'x^{} y^{}'.format(n+1, m+1))
axes = []
for i in range(4):
ax = fig.add_subplot(1, 4, i+1, projection=projection)
ax.set_title(titles[i])
if plot_2d:
axes.append(ax)
im = ax.pcolormesh(xs, ys, zs[i], vmin=0, vmax=1)
else:
my_col = cm.coolwarm(zs[i])
ax.plot_surface(xs, ys, zs[i],
rstride=1, cstride=1,
facecolors=my_col,
linewidth=0, antialiased=False)
ax.set_zlim(zlims[i])
if i == 0 or i == 3:
# Plotting errorbars
fx = xs.flatten()
fy = ys.flatten()
fz = zs[i].flatten()
fzerr = int_err.flatten()
fcols = my_col.reshape(fx.shape[0], 4)
for j in np.arange(0, len(fx)):
ax.plot([fx[j], fx[j]], [fy[j], fy[j]],
[fz[j]+fzerr[j], fz[j]-fzerr[j]],
marker="|", color=fcols[j], zorder=5)
if plot_2d:
axes = np.array(axes)
fig.colorbar(im, ax=axes.ravel().tolist())
fig.savefig('simple_2d_lin_firstbin_test_'
+ str(n+1) + '_' + str(m+1) + '.pdf',
format='pdf')
def test_Simple2DLinIntegrator_lastbin(plot_2d=False):
# Sampling
testSampler_1 = simpleSampler('lin')
testSampler_1.generateSamples(NUM_SAMPLES)
samples_1 = testSampler_1.getSamples()
testSampler_2 = simpleSampler('lin')
testSampler_2.generateSamples(NUM_SAMPLES)
samples_2 = testSampler_2.getSamples()
# Setting up integrator
testInt = integrator_2d()
testInt.setLastBinBndCondition([0., 'plus'])
testInt.setBins(NUM_BINS, [samples_1, samples_2], 'lin')
for n in range(4):
m = 1
# Weights, binned observables, and area
weights = test_weight(samples_1, samples_2, n, m)
jacs = (np.array(testSampler_1.jacobians)
* np.array(testSampler_2.jacobians))
obs = [samples_1, samples_2]
area = testSampler_1.area * testSampler_2.area
testInt.setDensity(obs, weights * jacs, area)
testInt.integrate()
integral = testInt.integral
int_err = testInt.integralErr
xs = testInt.bins[0][:-1]
ys = testInt.bins[1][:-1]
testInt.makeInterpolatingFn()
interp_mc = testInt.interpFn
integral_interp = interp_mc(xs, ys)
xs, ys = np.meshgrid(xs, ys)
zs = [integral, integral_interp,
(1-xs**(n+1)) * (1-ys**(n+1))]
zs.append(abs(zs[0] - zs[1]))
zlims = [(0, 1), (0, 1), (0, 1), (0, .1)]
titles = ['Monte Carlo', 'Interpolation',
'Analytic', '|Difference|']
projection = '3d'
figsize = plt.figaspect(0.5)
if plot_2d:
projection = None
figsize = (15, 4)
fig = plt.figure(figsize=figsize)
fig.suptitle('MC Integration to determine '
+ '(1-x^{})(1-y^{})'.format(n+1, m+1))
axes = []
for i in range(4):
ax = fig.add_subplot(1, 4, i+1, projection=projection)
ax.set_title(titles[i])
if plot_2d:
axes.append(ax)
im = ax.pcolormesh(xs, ys, zs[i], vmin=0, vmax=1)
else:
my_col = cm.coolwarm(zs[i])
ax.plot_surface(xs, ys, zs[i],
rstride=1, cstride=1,
facecolors=my_col,
linewidth=0, antialiased=False)
ax.set_zlim(zlims[i])
if i == 0 or i == 3:
# Plotting errorbars
fx = xs.flatten()
fy = ys.flatten()
fz = zs[i].flatten()
fzerr = int_err.flatten()
fcols = my_col.reshape(fx.shape[0], 4)
for j in np.arange(0, len(fx)):
ax.plot([fx[j], fx[j]], [fy[j], fy[j]],
[fz[j]+fzerr[j], fz[j]-fzerr[j]],
marker="|", color=fcols[j], zorder=5)
if plot_2d:
axes = np.array(axes)
fig.colorbar(im, ax=axes.ravel().tolist())
fig.savefig('simple_2d_lin_lastbin_test_'
+ str(n+1) + '_' + str(m+1) + '.pdf',
format='pdf')
# ------------------------------------
# Logarithmic Integrators:
# ------------------------------------
def test_Simple2DLogIntegrator_firstbin(plot_2d=False):
# Sampling
testSampler_1 = simpleSampler('log', epsilon=EPSILON)
testSampler_1.generateSamples(NUM_SAMPLES)
samples_1 = testSampler_1.getSamples()
testSampler_2 = simpleSampler('log', epsilon=EPSILON)
testSampler_2.generateSamples(NUM_SAMPLES)
samples_2 = testSampler_2.getSamples()
# Setting up integrator
testInt = integrator_2d()
testInt.setFirstBinBndCondition(0.)
testInt.setBins(NUM_BINS, [samples_1, samples_2], 'log')
for n in range(4):
m = 1
# Weights, binned observables, and area
weights = test_weight(samples_1, samples_2, n, m)
jacs = (np.array(testSampler_1.jacobians)
* np.array(testSampler_2.jacobians))
obs = [samples_1, samples_2]
area = testSampler_1.area * testSampler_2.area
testInt.setDensity(obs, weights * jacs, area)
testInt.integrate()
integral = testInt.integral
int_err = testInt.integralErr
xs = testInt.bins[0][1:]
ys = testInt.bins[1][1:]
testInt.makeInterpolatingFn()
interp_mc = testInt.interpFn
integral_interp = interp_mc(xs, ys)
xs, ys = np.meshgrid(xs, ys)
zs = [integral, integral_interp, xs**(n+1) * ys**(m+1)]
zs.append(abs(zs[0] - zs[1]))
xs = np.log10(xs)
ys = np.log10(ys)
zlims = [(0, 1), (0, 1), (0, 1), (0, .1)]
titles = ['Monte Carlo', 'Interpolation',
'Analytic', '|Difference|']
projection = '3d'
figsize = plt.figaspect(0.5)
if plot_2d:
projection = None
figsize = (15, 4)
fig = plt.figure(figsize=figsize)
fig.suptitle('MC Integration to determine '
+ 'x^{} y^{}'.format(n+1, m+1))
axes = []
for i in range(4):
ax = fig.add_subplot(1, 4, i+1, projection=projection)
ax.set_title(titles[i])
if plot_2d:
axes.append(ax)
im = ax.pcolormesh(xs, ys, zs[i], vmin=0, vmax=1)
else:
my_col = cm.coolwarm(zs[i])
ax.plot_surface(xs, ys, zs[i],
rstride=1, cstride=1,
facecolors=my_col,
linewidth=0, antialiased=False)
ax.set_zlim(zlims[i])
if i == 0 or i == 3:
# Plotting errorbars
fx = xs.flatten()
fy = ys.flatten()
fz = zs[i].flatten()
fzerr = int_err.flatten()
fcols = my_col.reshape(fx.shape[0], 4)
for j in np.arange(0, len(fx)):
ax.plot([fx[j], fx[j]], [fy[j], fy[j]],
[fz[j]+fzerr[j], fz[j]-fzerr[j]],
marker="|", color=fcols[j], zorder=5)
if plot_2d:
axes = np.array(axes)
fig.colorbar(im, ax=axes.ravel().tolist())
fig.savefig('simple_2d_log_firstbin_test_'
+ str(n+1) + '_' + str(m+1) + '.pdf',
format='pdf')
def test_Simple2DLogIntegrator_lastbin(plot_2d=False):
# Sampling
testSampler_1 = simpleSampler('log', epsilon=EPSILON)
testSampler_1.generateSamples(NUM_SAMPLES)
samples_1 = testSampler_1.getSamples()
testSampler_2 = simpleSampler('log', epsilon=EPSILON)
testSampler_2.generateSamples(NUM_SAMPLES)
samples_2 = testSampler_2.getSamples()
# Setting up integrator
testInt = integrator_2d()
testInt.setLastBinBndCondition([0., 'plus'])
testInt.setBins(NUM_BINS, [samples_1, samples_2], 'log')
for n in range(4):
m = 1
# Weights, binned observables, and area
weights = test_weight(samples_1, samples_2, n, m)
jacs = (np.array(testSampler_1.jacobians)
* np.array(testSampler_2.jacobians))
obs = [samples_1, samples_2]
area = testSampler_1.area * testSampler_2.area
testInt.setDensity(obs, weights * jacs, area)
testInt.integrate()
integral = testInt.integral
int_err = testInt.integralErr
xs = testInt.bins[0][:-1]
ys = testInt.bins[1][:-1]
testInt.makeInterpolatingFn()
interp_mc = testInt.interpFn
integral_interp = interp_mc(xs, ys)
xs, ys = np.meshgrid(xs, ys)
zs = [integral, integral_interp,
(1-xs**(n+1)) * (1-ys**(m+1))]
zs.append(abs(zs[0] - zs[1]))
xs = np.log10(xs)
ys = np.log10(ys)
zlims = [(0, 1), (0, 1), (0, 1), (0, .1)]
titles = ['Monte Carlo', 'Interpolation',
'Analytic', '|Difference|']
projection = '3d'
figsize = plt.figaspect(0.5)
if plot_2d:
projection = None
figsize = (15, 4)
fig = plt.figure(figsize=figsize)
fig.suptitle('MC Integration to determine '
+ '(1-x^{})(1-y^{})'.format(n+1, m+1))
axes = []
for i in range(4):
ax = fig.add_subplot(1, 4, i+1, projection=projection)
ax.set_title(titles[i])
if plot_2d:
axes.append(ax)
im = ax.pcolormesh(xs, ys, zs[i], vmin=0, vmax=1)
else:
my_col = cm.coolwarm(zs[i])
ax.plot_surface(xs, ys, zs[i],
rstride=1, cstride=1,
facecolors=my_col,
linewidth=0, antialiased=False)
ax.set_zlim(zlims[i])
if i == 0 or i == 3:
# Plotting errorbars
fx = xs.flatten()
fy = ys.flatten()
fz = zs[i].flatten()
fzerr = int_err.flatten()
fcols = my_col.reshape(fx.shape[0], 4)
for j in np.arange(0, len(fx)):
ax.plot([fx[j], fx[j]], [fy[j], fy[j]],
[fz[j]+fzerr[j], fz[j]-fzerr[j]],
marker="|", color=fcols[j], zorder=5)
if plot_2d:
axes = np.array(axes)
fig.colorbar(im, ax=axes.ravel().tolist())
fig.savefig('simple_2d_log_lastbin_test_'
+ str(n+1) + '_' + str(m+1) + '.pdf',
format='pdf')
# Implementing tests
if __name__ == '__main__':
test_Simple2DLinIntegrator_firstbin()
test_Simple2DLinIntegrator_lastbin()
test_Simple2DLogIntegrator_firstbin()
test_Simple2DLogIntegrator_lastbin()
| [
"numpy.log10",
"matplotlib.pyplot.figaspect",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.cm.coolwarm",
"numpy.meshgrid"
] | [((1745, 1764), 'numpy.meshgrid', 'np.meshgrid', (['xs', 'ys'], {}), '(xs, ys)\n', (1756, 1764), True, 'import numpy as np\n'), ((2059, 2077), 'matplotlib.pyplot.figaspect', 'plt.figaspect', (['(0.5)'], {}), '(0.5)\n', (2072, 2077), True, 'import matplotlib.pyplot as plt\n'), ((2173, 2200), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (2183, 2200), True, 'import matplotlib.pyplot as plt\n'), ((4905, 4924), 'numpy.meshgrid', 'np.meshgrid', (['xs', 'ys'], {}), '(xs, ys)\n', (4916, 4924), True, 'import numpy as np\n'), ((5241, 5259), 'matplotlib.pyplot.figaspect', 'plt.figaspect', (['(0.5)'], {}), '(0.5)\n', (5254, 5259), True, 'import matplotlib.pyplot as plt\n'), ((5355, 5382), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (5365, 5382), True, 'import matplotlib.pyplot as plt\n'), ((8223, 8242), 'numpy.meshgrid', 'np.meshgrid', (['xs', 'ys'], {}), '(xs, ys)\n', (8234, 8242), True, 'import numpy as np\n'), ((8359, 8371), 'numpy.log10', 'np.log10', (['xs'], {}), '(xs)\n', (8367, 8371), True, 'import numpy as np\n'), ((8385, 8397), 'numpy.log10', 'np.log10', (['ys'], {}), '(ys)\n', (8393, 8397), True, 'import numpy as np\n'), ((8590, 8608), 'matplotlib.pyplot.figaspect', 'plt.figaspect', (['(0.5)'], {}), '(0.5)\n', (8603, 8608), True, 'import matplotlib.pyplot as plt\n'), ((8704, 8731), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (8714, 8731), True, 'import matplotlib.pyplot as plt\n'), ((11471, 11490), 'numpy.meshgrid', 'np.meshgrid', (['xs', 'ys'], {}), '(xs, ys)\n', (11482, 11490), True, 'import numpy as np\n'), ((11629, 11641), 'numpy.log10', 'np.log10', (['xs'], {}), '(xs)\n', (11637, 11641), True, 'import numpy as np\n'), ((11655, 11667), 'numpy.log10', 'np.log10', (['ys'], {}), '(ys)\n', (11663, 11667), True, 'import numpy as np\n'), ((11860, 11878), 'matplotlib.pyplot.figaspect', 'plt.figaspect', (['(0.5)'], {}), '(0.5)\n', (11873, 11878), True, 'import matplotlib.pyplot as plt\n'), ((11974, 12001), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (11984, 12001), True, 'import matplotlib.pyplot as plt\n'), ((1204, 1237), 'numpy.array', 'np.array', (['testSampler_1.jacobians'], {}), '(testSampler_1.jacobians)\n', (1212, 1237), True, 'import numpy as np\n'), ((1256, 1289), 'numpy.array', 'np.array', (['testSampler_2.jacobians'], {}), '(testSampler_2.jacobians)\n', (1264, 1289), True, 'import numpy as np\n'), ((3484, 3498), 'numpy.array', 'np.array', (['axes'], {}), '(axes)\n', (3492, 3498), True, 'import numpy as np\n'), ((4362, 4395), 'numpy.array', 'np.array', (['testSampler_1.jacobians'], {}), '(testSampler_1.jacobians)\n', (4370, 4395), True, 'import numpy as np\n'), ((4414, 4447), 'numpy.array', 'np.array', (['testSampler_2.jacobians'], {}), '(testSampler_2.jacobians)\n', (4422, 4447), True, 'import numpy as np\n'), ((6673, 6687), 'numpy.array', 'np.array', (['axes'], {}), '(axes)\n', (6681, 6687), True, 'import numpy as np\n'), ((7682, 7715), 'numpy.array', 'np.array', (['testSampler_1.jacobians'], {}), '(testSampler_1.jacobians)\n', (7690, 7715), True, 'import numpy as np\n'), ((7734, 7767), 'numpy.array', 'np.array', (['testSampler_2.jacobians'], {}), '(testSampler_2.jacobians)\n', (7742, 7767), True, 'import numpy as np\n'), ((10016, 10030), 'numpy.array', 'np.array', (['axes'], {}), '(axes)\n', (10024, 10030), True, 'import numpy as np\n'), ((10928, 10961), 'numpy.array', 'np.array', (['testSampler_1.jacobians'], {}), '(testSampler_1.jacobians)\n', (10936, 10961), True, 'import numpy as np\n'), ((10980, 11013), 'numpy.array', 'np.array', (['testSampler_2.jacobians'], {}), '(testSampler_2.jacobians)\n', (10988, 11013), True, 'import numpy as np\n'), ((13293, 13307), 'numpy.array', 'np.array', (['axes'], {}), '(axes)\n', (13301, 13307), True, 'import numpy as np\n'), ((2619, 2637), 'matplotlib.cm.coolwarm', 'cm.coolwarm', (['zs[i]'], {}), '(zs[i])\n', (2630, 2637), False, 'from matplotlib import cm\n'), ((5808, 5826), 'matplotlib.cm.coolwarm', 'cm.coolwarm', (['zs[i]'], {}), '(zs[i])\n', (5819, 5826), False, 'from matplotlib import cm\n'), ((9151, 9169), 'matplotlib.cm.coolwarm', 'cm.coolwarm', (['zs[i]'], {}), '(zs[i])\n', (9162, 9169), False, 'from matplotlib import cm\n'), ((12428, 12446), 'matplotlib.cm.coolwarm', 'cm.coolwarm', (['zs[i]'], {}), '(zs[i])\n', (12439, 12446), False, 'from matplotlib import cm\n')] |
import numpy as np
import torch, functools
import table_batched_embeddings, table_batched_embeddings_ops
np.random.seed(42)
def div_round_up(a, b):
return int((a + b - 1) // b) * b
def get_table_batched_offsets_from_dense(merged_indices):
(T, B, L) = merged_indices.size()
lengths = np.ones((T, B)) * L
flat_lengths = lengths.flatten()
return (
merged_indices.int().contiguous().view(-1).cuda(),
torch.tensor(([0] + np.cumsum(flat_lengths).tolist())).int().cuda(),
)
def benchmark_torch_function(iters, warmup_iters, f, *args, **kwargs):
for _ in range(warmup_iters): # Warmup
f(*args, **kwargs)
torch.cuda.synchronize()
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
total_time = 0.0
for _ in range(iters):
torch.cuda.synchronize()
start_event.record()
f(*args, **kwargs)
end_event.record()
torch.cuda.synchronize()
total_time += start_event.elapsed_time(end_event) * 1.0e-3
return total_time / iters
def benchmark_conv(batch_size, H, W, IC, OC, stride, dilation, FH, FW, is_dw, iters, warmup_iters, backward=False):
input_feature = torch.randn(batch_size, IC, H, W, requires_grad=True).cuda() # NCHW
padding = []
for f in [FH, FW]:
padding.append((f - 1) // 2) # Only consider SAME with dilation = 1 for now
conv = torch.nn.Conv2d(IC, OC, (FH, FW), stride=stride, dilation=dilation, padding=padding, groups=(IC if is_dw else 1)).cuda()
if not backward:
time_per_iter = benchmark_torch_function(
iters,
warmup_iters,
conv,
input_feature
)
else:
out = conv(input_feature)
time_per_iter = benchmark_torch_function(
iters,
warmup_iters,
out.mean().backward,
retain_graph=True
)
return time_per_iter
def benchmark_linear(M, N, K, iters, warmup_iters, backward=False):
A = torch.randn(M, K, requires_grad=True).cuda()
linear = torch.nn.Linear(K, N).cuda()
if not backward:
time_per_iter = benchmark_torch_function(
iters,
warmup_iters,
linear,
A
)
else:
out = linear(A)
out_mean = out.mean()
time_per_iter = benchmark_torch_function(
iters,
warmup_iters,
out_mean.backward,
retain_graph=True,
)
return time_per_iter
def benchmark_fc(batch_size, M, N, K, iters, warmup_iters, backward=False):
if batch_size == 1:
A = torch.randn(M, K, requires_grad=True).cuda()
B = torch.randn(N, K, requires_grad=True).cuda()
C = torch.randn(M, N, requires_grad=True).cuda()
if not backward:
time_per_iter = benchmark_torch_function(
iters,
warmup_iters,
torch.addmm,
C, A, B.T,
)
else:
torch.addmm(C, A, B.T)
time_per_iter = benchmark_torch_function(
iters,
warmup_iters,
C.mean().backward,
retain_graph=True,
)
return time_per_iter
else:
A = torch.randn(batch_size, M, K, requires_grad=True).cuda()
B = torch.randn(batch_size, N, K, requires_grad=True).cuda()
if not backward:
time_per_iter = benchmark_torch_function(
iters,
warmup_iters,
torch.bmm,
A, torch.transpose(B, 1, 2),
)
else:
C = torch.bmm(A, torch.transpose(B, 1, 2))
time_per_iter = benchmark_torch_function(
iters,
warmup_iters,
C.mean().backward,
retain_graph=True,
)
return time_per_iter
def benchmark_tril(batch_size, M, N, diag, iters, warmup_iters, backward=False):
assert M == N, "Input tensor should be square!"
Z = torch.randn(batch_size, M, N, requires_grad=True).cuda()
li = torch.tensor([i for i in range(M) for j in range(i + diag)])
lj = torch.tensor([j for i in range(N) for j in range(i + diag)])
def zflat_wrapper(Z, i, j):
return Z[:, i, j]
if not backward:
time_per_iter = benchmark_torch_function(
iters,
warmup_iters,
zflat_wrapper,
Z,
li,
lj
)
else:
out = zflat_wrapper(Z, li, lj)
time_per_iter = benchmark_torch_function(
iters,
warmup_iters,
out.mean().backward,
retain_graph=True,
)
return time_per_iter
def benchmark_bn(batch_size, H, W, OC, iters, warmup_iters, backward=False):
out_feature = torch.randn(batch_size, OC, H, W, requires_grad=True).cuda()
bn = torch.nn.BatchNorm2d(OC).cuda()
if not backward:
time_per_iter = benchmark_torch_function(
iters,
warmup_iters,
bn,
out_feature
)
else:
output = bn(out_feature)
time_per_iter = benchmark_torch_function(
iters,
warmup_iters,
output.mean().backward,
retain_graph=True,
)
return time_per_iter
def benchmark_concat(sizes, dim, iters, warmup_iters):
tensors = [torch.randn(size) for size in sizes]
time_per_iter = benchmark_torch_function(
iters,
warmup_iters,
torch.cat,
tensors,
dim=dim
)
return time_per_iter
def benchmark_memcpy(size, iters, warmup_iters):
A = torch.randn(size)
time_per_iter = benchmark_torch_function(
iters,
warmup_iters,
A.to,
device="cuda"
)
return time_per_iter
def benchmark_transpose(batch_size, M, N, trans_type, iters, warmup_iters):
A = torch.randn(batch_size, M, N).cuda()
if trans_type == 0:
time_per_iter = benchmark_torch_function(
iters,
warmup_iters,
A.permute(0, 2, 1).contiguous
)
elif trans_type == 1:
time_per_iter = benchmark_torch_function(
iters,
warmup_iters,
A.permute(2, 1, 0).contiguous
)
else: # 2
time_per_iter = benchmark_torch_function(
iters,
warmup_iters,
A.permute(1, 0, 2).contiguous
)
return time_per_iter
def benchmark_pool(batch_size, H, W, OC, stride, dilation, FH, FW, pool_type, iters, warmup_iters, backward=False):
A = torch.randn(batch_size, OC, H, W, requires_grad=True).cuda()
padding = []
for f in [FH, FW]:
padding.append((f - 1) // 2) # Only consider SAME with dilation = 1 for now
if pool_type == "max": # Max
pool = torch.nn.MaxPool2d((FH, FW), stride=stride, dilation=dilation, padding=padding)
else: # Avg
pool = torch.nn.AvgPool2d((FH, FW), stride=stride, padding=padding)
if not backward:
time_per_iter = benchmark_torch_function(
iters,
warmup_iters,
pool,
A
)
else:
output = torch.pool(A)
time_per_iter = benchmark_torch_function(
iters,
warmup_iters,
output.mean().backward,
retain_graph=True,
)
return time_per_iter
def benchmark_relu(size, iters, warmup_iters, backward=False):
A = torch.randn(size, requires_grad=True).cuda()
if not backward:
time_per_iter = benchmark_torch_function(
iters,
warmup_iters,
torch.relu,
A
)
else:
output = torch.relu(A)
time_per_iter = benchmark_torch_function(
iters,
warmup_iters,
output.mean().backward,
retain_graph=True,
)
return time_per_iter
def benchmark_embedding_lookup(B, E, T, L, D, BT_block_size, iters, warmup_iters, backward, shmem=False, sgd=False, fp16=False, managed=False, mixed=False):
Es = [int(x) for x in E.split('-')] if isinstance(E, list) else [E]
if len(Es) == 1:
Es = Es * T
assert len(Es) == T
if mixed:
mixed_D = [
div_round_up(np.random.randint(low=int(0.5 * D), high=int(1.5 * D)), 4)
for _ in range(T)
]
D = np.average(mixed_D)
cc = (
table_batched_embeddings_ops.TableBatchedEmbeddingBags(
T,
Es,
D,
optimizer=table_batched_embeddings_ops.Optimizer.APPROX_ROWWISE_ADAGRAD,
learning_rate=0.1,
managed=table_batched_embeddings_ops.EmbeddingLocation.DEVICE
if not managed
else table_batched_embeddings_ops.EmbeddingLocation.HOST_MAPPED,
eps=0.1,
stochastic_rounding=False,
fp16=fp16,
).cuda()
if not mixed
else table_batched_embeddings_ops.MixedDimTableBatchedEmbeddingBags(
[(Es, d) for d in mixed_D],
optimizer=table_batched_embeddings_ops.Optimizer.APPROX_ROWWISE_ADAGRAD,
learning_rate=0.1,
managed=table_batched_embeddings_ops.EmbeddingLocation.DEVICE
if not managed
else table_batched_embeddings_ops.EmbeddingLocation.HOST_MAPPED,
eps=0.1,
stochastic_rounding=False,
fp16=fp16,
).cuda()
)
R = False
def w2(c):
if not R:
return c
@functools.wraps(c)
def z(w, o, x, *args):
c(w, o, x.random_(0, E - 1), *args)
return z
def w3(c):
if not R:
return c
@functools.wraps(c)
def z(g, w, o, x, *args):
c(g, w, o, x.random_(0, E - 1), *args)
return z
def w4(c):
if not R:
return c
@functools.wraps(c)
def z(g, w, o, a, x, *args):
c(g, w, o, a, x.random_(0, E - 1), *args)
return z
def w6(c):
if not R:
return c
@functools.wraps(c)
def z(g, w, o, a, b, d, x, *args):
c(g, w, o, a, b, d, x.random_(0, E - 1), *args)
return z
idxs = []
for x in range(T):
idxs.append(torch.randint(low=0, high=Es[x] - 1, size=(B, L)).int().cuda())
merged_indices = torch.stack(idxs, dim=0)
(indices, offsets) = get_table_batched_offsets_from_dense(merged_indices)
assert indices.shape[0] == B * T * L
assert all(
l == L for l in (offsets[1:] - offsets[:-1]).detach().cpu().numpy().tolist()
)
per_sample_weights = None
stochastic = False # TODO: Fix this
exact = 1
y0 = (
table_batched_embeddings.forward(
cc.embedding_weights,
cc.table_offsets,
indices,
offsets,
per_sample_weights,
L,
1,
shmem,
)
if not mixed
else table_batched_embeddings.forward_mixed_D(
cc.embedding_weights,
cc.table_offsets,
cc.dim_offsets,
cc.total_D,
indices,
offsets,
per_sample_weights,
L,
1,
shmem,
)
)
y = (
table_batched_embeddings.forward(
cc.embedding_weights,
cc.table_offsets,
indices,
offsets,
per_sample_weights,
L,
BT_block_size,
shmem,
)
if not mixed
else table_batched_embeddings.forward_mixed_D(
cc.embedding_weights,
cc.table_offsets,
cc.dim_offsets,
cc.total_D,
indices,
offsets,
per_sample_weights,
L,
BT_block_size,
False,
)
)
torch.testing.assert_allclose(y, y0)
if not backward:
time_per_iter = (
benchmark_torch_function(
iters,
warmup_iters,
w2(table_batched_embeddings.forward),
cc.embedding_weights,
cc.table_offsets,
indices,
offsets,
per_sample_weights,
L,
BT_block_size,
shmem,
)
if not mixed
else benchmark_torch_function(
iters,
warmup_iters,
w4(table_batched_embeddings.forward_mixed_D),
cc.embedding_weights,
cc.table_offsets,
cc.dim_offsets,
cc.total_D,
indices,
offsets,
per_sample_weights,
L,
BT_block_size,
shmem,
)
)
else: # backward
go = torch.randn_like(y0)
learning_rate = 0.05
eps = 0.01
if sgd:
time_per_iter = benchmark_torch_function(
iters,
warmup_iters,
w3(table_batched_embeddings.backward_sgd),
go,
cc.embedding_weights,
cc.table_offsets,
indices,
offsets,
learning_rate,
L,
BT_block_size,
shmem,
)
else: # adagrad
if not exact:
time_per_iter = (
benchmark_torch_function(
iters,
warmup_iters,
w3(table_batched_embeddings.backward_approx_adagrad),
go,
cc.embedding_weights,
cc.table_offsets,
indices,
offsets,
per_sample_weights,
cc.optimizer_state,
learning_rate,
eps,
L,
stochastic,
BT_block_size,
)
if not mixed
else benchmark_torch_function(
iters,
warmup_iters,
w6(
table_batched_embeddings.backward_approx_adagrad_mixed_D
),
go,
cc.embedding_weights,
cc.table_offsets,
cc.table_dim_offsets,
cc.dim_offsets,
cc.total_D,
indices,
offsets,
per_sample_weights,
cc.optimizer_state,
learning_rate,
eps,
L,
stochastic,
BT_block_size,
)
)
else:
time_per_iter = (
benchmark_torch_function(
iters,
warmup_iters,
w3(table_batched_embeddings.backward_exact_adagrad),
go,
cc.embedding_weights,
cc.table_offsets,
indices,
offsets,
per_sample_weights,
cc.optimizer_state,
learning_rate,
eps,
stochastic,
BT_block_size,
)
if not mixed
else benchmark_torch_function(
iters,
warmup_iters,
w6(table_batched_embeddings.backward_exact_adagrad_mixed_D),
go,
cc.embedding_weights,
cc.table_offsets,
cc.table_dim_offsets,
cc.dim_offsets,
cc.total_D,
indices,
offsets,
per_sample_weights,
cc.optimizer_state,
learning_rate,
eps,
stochastic,
BT_block_size,
)
)
return time_per_iter
| [
"table_batched_embeddings_ops.TableBatchedEmbeddingBags",
"torch.cuda.synchronize",
"table_batched_embeddings.forward",
"torch.nn.AvgPool2d",
"table_batched_embeddings_ops.MixedDimTableBatchedEmbeddingBags",
"torch.testing.assert_allclose",
"torch.nn.BatchNorm2d",
"torch.addmm",
"torch.relu",
"fun... | [((105, 123), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (119, 123), True, 'import numpy as np\n'), ((657, 681), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (679, 681), False, 'import torch, functools\n'), ((700, 736), 'torch.cuda.Event', 'torch.cuda.Event', ([], {'enable_timing': '(True)'}), '(enable_timing=True)\n', (716, 736), False, 'import torch, functools\n'), ((753, 789), 'torch.cuda.Event', 'torch.cuda.Event', ([], {'enable_timing': '(True)'}), '(enable_timing=True)\n', (769, 789), False, 'import torch, functools\n'), ((5716, 5733), 'torch.randn', 'torch.randn', (['size'], {}), '(size)\n', (5727, 5733), False, 'import torch, functools\n'), ((10452, 10476), 'torch.stack', 'torch.stack', (['idxs'], {'dim': '(0)'}), '(idxs, dim=0)\n', (10463, 10476), False, 'import torch, functools\n'), ((11979, 12015), 'torch.testing.assert_allclose', 'torch.testing.assert_allclose', (['y', 'y0'], {}), '(y, y0)\n', (12008, 12015), False, 'import torch, functools\n'), ((298, 313), 'numpy.ones', 'np.ones', (['(T, B)'], {}), '((T, B))\n', (305, 313), True, 'import numpy as np\n'), ((846, 870), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (868, 870), False, 'import torch, functools\n'), ((962, 986), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (984, 986), False, 'import torch, functools\n'), ((5453, 5470), 'torch.randn', 'torch.randn', (['size'], {}), '(size)\n', (5464, 5470), False, 'import torch, functools\n'), ((6894, 6973), 'torch.nn.MaxPool2d', 'torch.nn.MaxPool2d', (['(FH, FW)'], {'stride': 'stride', 'dilation': 'dilation', 'padding': 'padding'}), '((FH, FW), stride=stride, dilation=dilation, padding=padding)\n', (6912, 6973), False, 'import torch, functools\n'), ((7005, 7065), 'torch.nn.AvgPool2d', 'torch.nn.AvgPool2d', (['(FH, FW)'], {'stride': 'stride', 'padding': 'padding'}), '((FH, FW), stride=stride, padding=padding)\n', (7023, 7065), False, 'import torch, functools\n'), ((7252, 7265), 'torch.pool', 'torch.pool', (['A'], {}), '(A)\n', (7262, 7265), False, 'import torch, functools\n'), ((7777, 7790), 'torch.relu', 'torch.relu', (['A'], {}), '(A)\n', (7787, 7790), False, 'import torch, functools\n'), ((8455, 8474), 'numpy.average', 'np.average', (['mixed_D'], {}), '(mixed_D)\n', (8465, 8474), True, 'import numpy as np\n'), ((9608, 9626), 'functools.wraps', 'functools.wraps', (['c'], {}), '(c)\n', (9623, 9626), False, 'import torch, functools\n'), ((9789, 9807), 'functools.wraps', 'functools.wraps', (['c'], {}), '(c)\n', (9804, 9807), False, 'import torch, functools\n'), ((9976, 9994), 'functools.wraps', 'functools.wraps', (['c'], {}), '(c)\n', (9991, 9994), False, 'import torch, functools\n'), ((10169, 10187), 'functools.wraps', 'functools.wraps', (['c'], {}), '(c)\n', (10184, 10187), False, 'import torch, functools\n'), ((10808, 10935), 'table_batched_embeddings.forward', 'table_batched_embeddings.forward', (['cc.embedding_weights', 'cc.table_offsets', 'indices', 'offsets', 'per_sample_weights', 'L', '(1)', 'shmem'], {}), '(cc.embedding_weights, cc.table_offsets,\n indices, offsets, per_sample_weights, L, 1, shmem)\n', (10840, 10935), False, 'import table_batched_embeddings, table_batched_embeddings_ops\n'), ((11073, 11241), 'table_batched_embeddings.forward_mixed_D', 'table_batched_embeddings.forward_mixed_D', (['cc.embedding_weights', 'cc.table_offsets', 'cc.dim_offsets', 'cc.total_D', 'indices', 'offsets', 'per_sample_weights', 'L', '(1)', 'shmem'], {}), '(cc.embedding_weights, cc.\n table_offsets, cc.dim_offsets, cc.total_D, indices, offsets,\n per_sample_weights, L, 1, shmem)\n', (11113, 11241), False, 'import table_batched_embeddings, table_batched_embeddings_ops\n'), ((11389, 11528), 'table_batched_embeddings.forward', 'table_batched_embeddings.forward', (['cc.embedding_weights', 'cc.table_offsets', 'indices', 'offsets', 'per_sample_weights', 'L', 'BT_block_size', 'shmem'], {}), '(cc.embedding_weights, cc.table_offsets,\n indices, offsets, per_sample_weights, L, BT_block_size, shmem)\n', (11421, 11528), False, 'import table_batched_embeddings, table_batched_embeddings_ops\n'), ((11666, 11846), 'table_batched_embeddings.forward_mixed_D', 'table_batched_embeddings.forward_mixed_D', (['cc.embedding_weights', 'cc.table_offsets', 'cc.dim_offsets', 'cc.total_D', 'indices', 'offsets', 'per_sample_weights', 'L', 'BT_block_size', '(False)'], {}), '(cc.embedding_weights, cc.\n table_offsets, cc.dim_offsets, cc.total_D, indices, offsets,\n per_sample_weights, L, BT_block_size, False)\n', (11706, 11846), False, 'import table_batched_embeddings, table_batched_embeddings_ops\n'), ((12987, 13007), 'torch.randn_like', 'torch.randn_like', (['y0'], {}), '(y0)\n', (13003, 13007), False, 'import torch, functools\n'), ((1222, 1275), 'torch.randn', 'torch.randn', (['batch_size', 'IC', 'H', 'W'], {'requires_grad': '(True)'}), '(batch_size, IC, H, W, requires_grad=True)\n', (1233, 1275), False, 'import torch, functools\n'), ((1425, 1541), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['IC', 'OC', '(FH, FW)'], {'stride': 'stride', 'dilation': 'dilation', 'padding': 'padding', 'groups': '(IC if is_dw else 1)'}), '(IC, OC, (FH, FW), stride=stride, dilation=dilation, padding\n =padding, groups=IC if is_dw else 1)\n', (1440, 1541), False, 'import torch, functools\n'), ((2032, 2069), 'torch.randn', 'torch.randn', (['M', 'K'], {'requires_grad': '(True)'}), '(M, K, requires_grad=True)\n', (2043, 2069), False, 'import torch, functools\n'), ((2090, 2111), 'torch.nn.Linear', 'torch.nn.Linear', (['K', 'N'], {}), '(K, N)\n', (2105, 2111), False, 'import torch, functools\n'), ((3036, 3058), 'torch.addmm', 'torch.addmm', (['C', 'A', 'B.T'], {}), '(C, A, B.T)\n', (3047, 3058), False, 'import torch, functools\n'), ((4077, 4126), 'torch.randn', 'torch.randn', (['batch_size', 'M', 'N'], {'requires_grad': '(True)'}), '(batch_size, M, N, requires_grad=True)\n', (4088, 4126), False, 'import torch, functools\n'), ((4872, 4925), 'torch.randn', 'torch.randn', (['batch_size', 'OC', 'H', 'W'], {'requires_grad': '(True)'}), '(batch_size, OC, H, W, requires_grad=True)\n', (4883, 4925), False, 'import torch, functools\n'), ((4942, 4966), 'torch.nn.BatchNorm2d', 'torch.nn.BatchNorm2d', (['OC'], {}), '(OC)\n', (4962, 4966), False, 'import torch, functools\n'), ((5971, 6000), 'torch.randn', 'torch.randn', (['batch_size', 'M', 'N'], {}), '(batch_size, M, N)\n', (5982, 6000), False, 'import torch, functools\n'), ((6660, 6713), 'torch.randn', 'torch.randn', (['batch_size', 'OC', 'H', 'W'], {'requires_grad': '(True)'}), '(batch_size, OC, H, W, requires_grad=True)\n', (6671, 6713), False, 'import torch, functools\n'), ((7536, 7573), 'torch.randn', 'torch.randn', (['size'], {'requires_grad': '(True)'}), '(size, requires_grad=True)\n', (7547, 7573), False, 'import torch, functools\n'), ((2649, 2686), 'torch.randn', 'torch.randn', (['M', 'K'], {'requires_grad': '(True)'}), '(M, K, requires_grad=True)\n', (2660, 2686), False, 'import torch, functools\n'), ((2706, 2743), 'torch.randn', 'torch.randn', (['N', 'K'], {'requires_grad': '(True)'}), '(N, K, requires_grad=True)\n', (2717, 2743), False, 'import torch, functools\n'), ((2763, 2800), 'torch.randn', 'torch.randn', (['M', 'N'], {'requires_grad': '(True)'}), '(M, N, requires_grad=True)\n', (2774, 2800), False, 'import torch, functools\n'), ((3302, 3351), 'torch.randn', 'torch.randn', (['batch_size', 'M', 'K'], {'requires_grad': '(True)'}), '(batch_size, M, K, requires_grad=True)\n', (3313, 3351), False, 'import torch, functools\n'), ((3371, 3420), 'torch.randn', 'torch.randn', (['batch_size', 'N', 'K'], {'requires_grad': '(True)'}), '(batch_size, N, K, requires_grad=True)\n', (3382, 3420), False, 'import torch, functools\n'), ((3606, 3630), 'torch.transpose', 'torch.transpose', (['B', '(1)', '(2)'], {}), '(B, 1, 2)\n', (3621, 3630), False, 'import torch, functools\n'), ((3689, 3713), 'torch.transpose', 'torch.transpose', (['B', '(1)', '(2)'], {}), '(B, 1, 2)\n', (3704, 3713), False, 'import torch, functools\n'), ((8494, 8861), 'table_batched_embeddings_ops.TableBatchedEmbeddingBags', 'table_batched_embeddings_ops.TableBatchedEmbeddingBags', (['T', 'Es', 'D'], {'optimizer': 'table_batched_embeddings_ops.Optimizer.APPROX_ROWWISE_ADAGRAD', 'learning_rate': '(0.1)', 'managed': '(table_batched_embeddings_ops.EmbeddingLocation.DEVICE if not managed else\n table_batched_embeddings_ops.EmbeddingLocation.HOST_MAPPED)', 'eps': '(0.1)', 'stochastic_rounding': '(False)', 'fp16': 'fp16'}), '(T, Es, D, optimizer=\n table_batched_embeddings_ops.Optimizer.APPROX_ROWWISE_ADAGRAD,\n learning_rate=0.1, managed=table_batched_embeddings_ops.\n EmbeddingLocation.DEVICE if not managed else\n table_batched_embeddings_ops.EmbeddingLocation.HOST_MAPPED, eps=0.1,\n stochastic_rounding=False, fp16=fp16)\n', (8548, 8861), False, 'import table_batched_embeddings, table_batched_embeddings_ops\n'), ((9024, 9417), 'table_batched_embeddings_ops.MixedDimTableBatchedEmbeddingBags', 'table_batched_embeddings_ops.MixedDimTableBatchedEmbeddingBags', (['[(Es, d) for d in mixed_D]'], {'optimizer': 'table_batched_embeddings_ops.Optimizer.APPROX_ROWWISE_ADAGRAD', 'learning_rate': '(0.1)', 'managed': '(table_batched_embeddings_ops.EmbeddingLocation.DEVICE if not managed else\n table_batched_embeddings_ops.EmbeddingLocation.HOST_MAPPED)', 'eps': '(0.1)', 'stochastic_rounding': '(False)', 'fp16': 'fp16'}), '([(Es, d) for\n d in mixed_D], optimizer=table_batched_embeddings_ops.Optimizer.\n APPROX_ROWWISE_ADAGRAD, learning_rate=0.1, managed=\n table_batched_embeddings_ops.EmbeddingLocation.DEVICE if not managed else\n table_batched_embeddings_ops.EmbeddingLocation.HOST_MAPPED, eps=0.1,\n stochastic_rounding=False, fp16=fp16)\n', (9086, 9417), False, 'import table_batched_embeddings, table_batched_embeddings_ops\n'), ((10367, 10416), 'torch.randint', 'torch.randint', ([], {'low': '(0)', 'high': '(Es[x] - 1)', 'size': '(B, L)'}), '(low=0, high=Es[x] - 1, size=(B, L))\n', (10380, 10416), False, 'import torch, functools\n'), ((455, 478), 'numpy.cumsum', 'np.cumsum', (['flat_lengths'], {}), '(flat_lengths)\n', (464, 478), True, 'import numpy as np\n')] |
from tensor_operator import TensorOperator
import re
import numpy as np
class MemorySurface:
__format__ = {'FeatureMap','weight','image'}
#__data_type__ = {'int8', 'int16', 'float16'}
def __init__(self, name):
self.name = name
self.seed = 0
self.pattern = None
self.memory_surface_data = None
self.tensor_nchw = None
self.max_byte_per_line = 16
def set_seed(self):
np.random.seed(self.seed)
def print_info(self):
print("===== Information of %s with pattern %s, begin =====" % (self.name, self.pattern))
print("----- surface data shape of %s, begin ----" % self.name)
print(self.memory_surface_data.shape)
print("----- surface data shape of %s, end -----" % self.name)
print("----- surface data of %s, begin ----" % self.name)
print(self.memory_surface_data)
print("----- surface data of %s, end -----" % self.name)
print("----- tensor data of %s, begin -----" % self.name)
print(self.tensor_nchw)
print("----- tensor data of %s, end -----" % self.name)
print("===== Information of %s with pattern %s, end =======" % (self.name, self.pattern))
def save_tensor_to_file(self):
#TODO, need arch to confirm the file format
pass
def write_line_to_file(self, file_handler, offset, content):
# convert to hex bytes
content_str = content.tobytes().hex()
line_offset = offset
line_byte_list = []
line_byte_count = 0
for byte in map(''.join, zip(*[iter(content_str)]*2)):
byte_str = '0x'+byte
line_byte_list.append(byte_str)
line_byte_count += 1
if (line_byte_count == self.max_byte_per_line):
file_handler.write("{offset:0x%x, size:%d, payload:%s},\n" % (line_offset, self.max_byte_per_line, ' '.join(line_byte_list)))
line_byte_count = 0
line_byte_list = []
line_offset += self.max_byte_per_line
# content byte size is not aligned with max_byte_per_line, write the last line to file
if (0 != line_byte_count):
file_handler.write("{offset:0x%x, size:%d, payload:%s},\n" % (line_offset, len(line_byte_list), ' '.join(line_byte_list)))
class MemorySurfaceFeatureMap(MemorySurface):
# TODO, need to implement pattern supported_pattern = {nhwc_index'}
supported_pattern = {'nchw_index', 'nsh_wxatom_index', 'shwan_index', 'random', 'ones', 'zeros'}
def __init__(self, name):
super().__init__(name)
self.width = 0
self.height = 0
self.channel = 0
self.batch = 0
self.component = 0
self.atomic_memory = 0 # ATOMIC_MEMORY
self.line_stride = 0
self.surface_stride = 0
self.surface_number = 0
self.batch_stride = 0
self.data_type = 0
self.pattern = ''
self.file_name = ''
def set_configs(self, config):
try:
seed, width, height, channel, component, batch, atomic_memory, line_stride, surface_stride, batch_stride, data_type, pattern, file_name = config
except ValueError:
raise Exception('Error in feature memory surface creation', '''
Required parameter for feature map surface are:
seed, width, height, channel, component, batch, atomic_memory, line_stride, surface_stride, batch_stride, data_type, pattern, file_name
''')
assert(width > 0)
assert(height > 0)
assert(channel > 0)
assert(batch > 0)
assert(component > 0)
assert(not((batch>1) and (component>1)))
assert(atomic_memory > 0)
assert(line_stride >= width*atomic_memory)
assert(surface_stride >= height*line_stride)
# TODO assert(batch_stride > surface_stride)
assert(data_type in TensorOperator.supported_data_type)
assert(pattern in self.supported_pattern)
self.seed, self.width, self.height, self.channel, self.component, self.batch, self.atomic_memory, self.line_stride, self.surface_stride, self.batch_stride, self.data_type, self.pattern, self.file_name = config
self.surface_number = (channel + atomic_memory - 1)//atomic_memory
self.set_seed()
def convert_tensor_nchw_to_memory_surface(self, memory_surface_shape='nsh_wxatom'):
nchw_compensated = TensorOperator.append_c_compensation_to_nchw_tensor(self.tensor_nchw, self.atomic_memory)
if 'nsh_wxatom' == memory_surface_shape:
self.memory_surface_data = TensorOperator.convert_tensor_from_nchw_to_nch_wxatom(nchw_compensated, self.atomic_memory)
elif 'sh_wan' == memory_surface_shape:
self.memory_surface_data = TensorOperator.convert_tensor_from_nchw_to_sh_wan(nchw_compensated, self.atomic_memory)
def convert_memory_surface_to_tensor_nchw(self, memory_surface_shape='nsh_wxatom'):
if 'nsh_wxatom' == memory_surface_shape:
nchw_compensated = TensorOperator.convert_tensor_from_nch_wxatom_to_nchw(self.memory_surface_data, self.atomic_memory)
elif 'sh_wan' == memory_surface_shape:
nchw_compensated = TensorOperator.convert_tensor_from_sh_wan_to_nchw(self.memory_surface_data, self.atomic_memory, self.component)
self.tensor_nchw = TensorOperator.remove_c_compensation_from_nchw_tensor(nchw_compensated, self.channel)
# config = (width, height, channel, batch, atomic_memory, line_stride, surface_stride, batch_stride, data_type, pattern)
def generate_memory_surface(self, *config):
print(config)
self.set_configs(config)
if (1 == self.component):
if self.pattern in ['nchw_index']:
self.tensor_nchw = TensorOperator.create_tensor((self.batch, self.channel, self.height, self.width), self.data_type, 'index')
#print('self.tensor_nchw')
#print(self.tensor_nchw)
self.convert_tensor_nchw_to_memory_surface()
elif self.pattern in ['random', 'ones', 'zeros']:
self.tensor_nchw = TensorOperator.create_tensor((self.batch, self.channel, self.height, self.width), self.data_type, self.pattern)
self.convert_tensor_nchw_to_memory_surface()
elif self.pattern in ['nch_wxatom_index']:
self.memory_surface_data = TensorOperator.create_tensor((self.batch, self.surface_number, self.height, self.width*self.atomic_memory), self.data_type, 'index')
#print('self.memory_surface_data')
#print(self.memory_surface_data)
self.convert_memory_surface_to_tensor_nchw()
else:
raise Exception('MemorySurfaceFeatureMap::generate_memory_surface','Not supported pattern:%s' % self.pattern)
else:
## multi component, single batch
if self.pattern in ['nchw_index']:
self.tensor_nchw = TensorOperator.create_tensor((self.component, self.channel, self.height, self.width), self.data_type, 'index')
#print('self.tensor_nchw',self.tensor_nchw)
self.convert_tensor_nchw_to_memory_surface('sh_wan')
elif self.pattern in ['random', 'ones', 'zeros']:
self.tensor_nchw = TensorOperator.create_tensor((self.component, self.channel, self.height, self.width), self.data_type, self.pattern)
#print('self.tensor_nchw',self.tensor_nchw)
self.convert_tensor_nchw_to_memory_surface('sh_wan')
elif self.pattern in ['shwan_index']:
self.memory_surface_data = TensorOperator.create_tensor((1, self.surface_number, self.height, self.component*self.width*self.atomic_memory), self.data_type, 'index')
#print('self.memory_surface_data', self.memory_surface_data)
self.convert_memory_surface_to_tensor_nchw('sh_wan')
else:
raise Exception('MemorySurfaceFeatureMap::generate_memory_surface','Not supported pattern:%s' % self.pattern)
def dump_memory_surface_to_file(self):
print('MemorySurfaceFeatureMap::dump_memory_surface_to_file')
#element_number = self.memory_surface_data.size
#element_size = self.memory_surface_data.dtype.itemsize
# MSF, batch surface line
#print('self.memory_surface_data', self.memory_surface_data)
with open (self.file_name, 'w') as f:
f.write('{\n')
offset = 0
for batch_index in range(self.batch):
for surface_index in range(self.surface_number):
for line_index in range(self.height):
offset = batch_index*self.batch_stride + surface_index*self.surface_stride + line_index*self.line_stride
#print('offset:', offset)
#print('batch_index:', batch_index)
#print('surface_index:', surface_index)
#print('line_index:', line_index)
self.write_line_to_file(f, offset, self.memory_surface_data[batch_index, surface_index, line_index])
f.write('}\n')
class MemorySurfaceWeight(MemorySurface):
supported_pattern = {'nchw_index', 'nhwc_index', 'random', 'ones', 'zeros', 'gshwp_index'}
def __init__(self, name):
super().__init__(name)
self.width = 0
self.height = 0
self.channel = 0
self.kernel_number = 0
self.element_per_atom = 0 # ATOMIC_CHANNEL
self.kernel_per_group = 0
self.data_type = 0
self.pattern = ''
self.group_number = 0
self.is_compressed = False
self.weight_compressed = None
self.weight_mask = None
self.weight_group_size = None
self.debug_decompressed = None
def set_configs(self, config):
try:
#self.width, self.height, self.channel, self.kernel_number, self.element_per_atom, self.kernel_per_group, self.data_type, self.pattern = config
seed, width, height, channel, kernel_number, element_per_atom, kernel_per_group, data_type, alignment_in_byte, pattern, is_compressed, file_name = config
except ValueError:
raise Exception('Error in weight memory surface creation', '''
Required parameter for weight map surface are:
width, height, channel, kernel_number, data_type, pattern
''')
assert(width > 0)
assert(height > 0)
assert(channel > 0)
assert(kernel_number > 0)
assert(element_per_atom > 0)
assert(kernel_per_group>0)
assert(data_type in TensorOperator.supported_data_type)
assert(pattern in self.supported_pattern)
assert(len(file_name)>0)
assert( ((is_compressed == True) and (type(file_name) in [list,tuple]) and (len(file_name)==3)) or ((is_compressed == False) and (type(file_name) is str)) )
#assert()
self.seed, self.width, self.height, self.channel, self.kernel_number, self.element_per_atom, self.kernel_per_group, self.data_type, self.alignment_in_byte, self.pattern, self.is_compressed, self.file_name = config
self.group_number = (self.kernel_number+self.kernel_per_group-1)//self.kernel_per_group
self.set_seed()
def convert_tensor_nchw_to_memory_surface(self):
self.memory_surface_data = TensorOperator.align_array_size(TensorOperator.convert_nchw_to_weight_memory_surface(self.tensor_nchw, self.element_per_atom, self.kernel_per_group), 0, self.alignment_in_byte)
def convert_memory_surface_to_tensor_nchw(self):
self.tensor_nchw = TensorOperator.convert_weight_memory_surface_to_nchw(self.memory_surface_data,
self.kernel_number, self.channel, self.height, self.width,
self.kernel_per_group, self.element_per_atom)
def generate_memory_surface(self, *config):
self.set_configs(config)
if self.pattern in ['nchw_index']:
self.tensor_nchw = TensorOperator.create_tensor((self.kernel_number, self.channel, self.height, self.width), self.data_type, 'index')
self.convert_tensor_nchw_to_memory_surface()
elif self.pattern in ['nhwc_index']:
self.tensor_nchw = TensorOperator.create_tensor((self.kernel_number, self.height, self.width, self.channel), self.data_type, 'index')
self.tensor_nchw = TensorOperator.convert_nhwc_to_nchw(self.tensor_nchw)
self.convert_tensor_nchw_to_memory_surface()
elif self.pattern in ['ones','zeros', 'random']:
self.tensor_nchw = TensorOperator.create_tensor((self.kernel_number, self.channel, self.height, self.width), self.data_type, self.pattern)
self.convert_tensor_nchw_to_memory_surface()
elif self.pattern in ['gshwp_index']:
self.memory_surface_data = TensorOperator.align_array_size(TensorOperator.create_tensor((self.kernel_number*self.channel*self.height*self.width,), self.data_type, 'index'), 0, self.alignment_in_byte)
self.convert_memory_surface_to_tensor_nchw()
if self.is_compressed:
element_per_group = self.width*self.height*self.channel*self.kernel_per_group
self.weight_compressed, self.weight_mask, self.weight_group_size = TensorOperator.compress_array_by_element(
self.memory_surface_data, element_per_group, self.element_per_atom
)
self.debug_decompressed = TensorOperator.decompress_array_by_element(self.weight_compressed, self.weight_mask, self.weight_group_size)
def print_info(self):
print("===== Information of %s with pattern %s, begin =====" % (self.name, self.pattern))
if self.is_compressed:
print("----- surface data of %s, begin ----" % self.name)
print(self.memory_surface_data)
print("----- surface data of %s, end -----" % self.name)
print("----- compressed weight data of %s, begin ----" % self.name)
print(self.weight_compressed)
print("----- compressed weight data of %s, end -----" % self.name)
print("----- compressed weight data of %s, begin ----" % self.name)
print(self.weight_mask)
print("----- compressed weight data of %s, end -----" % self.name)
print("----- compressed weight data of %s, begin ----" % self.name)
print(self.weight_group_size)
print("----- compressed weight data of %s, end -----" % self.name)
print("----- debug decompressed weight data of %s, begin ----" % self.name)
print(self.debug_decompressed)
print("----- debug decompressed weight data of %s, end -----" % self.name)
else:
print("----- surface data of %s, begin ----" % self.name)
print(self.memory_surface_data)
print("----- surface data of %s, end -----" % self.name)
print("----- tensor data of %s, begin -----" % self.name)
print(self.tensor_nchw)
print("----- tensor data of %s, end -----" % self.name)
print("===== Information of %s with pattern %s, end =======" % (self.name, self.pattern))
def dump_memory_surface_to_file(self):
print('MemorySurfaceWeight::dump_memory_surface_to_file')
#element_number = self.memory_surface_data.size
#element_size = self.memory_surface_data.dtype.itemsize
# MSF, batch surface line
if self.is_compressed:
with open (self.file_name[0], 'w') as f:
f.write('{\n')
self.write_line_to_file(f, 0, self.weight_compressed)
f.write('}\n')
with open (self.file_name[1], 'w') as f:
f.write('{\n')
self.write_line_to_file(f, 0, self.weight_mask)
f.write('}\n')
with open (self.file_name[2], 'w') as f:
f.write('{\n')
self.write_line_to_file(f, 0, self.weight_group_size)
f.write('}\n')
else:
with open (self.file_name, 'w') as f:
f.write('{\n')
self.write_line_to_file(f, 0, self.memory_surface_data)
f.write('}\n')
class MemorySurfaceImagePitch(MemorySurface):
supported_pattern = {'nchw_index', 'nsh_wxatom_index', 'random', 'ones', 'zeros'}
def reset(self):
self.width = 0
self.height = 0
self.channel = 0
self.atomic_memory = 0 # ATOMIC_MEMORY
self.line_stride = []
self.offset_x = 0
self.pixel_format_name = ''
self.plane_number = 0
self.data_type = None
self.pattern = ''
self.file_name = []
self.channel_name_list = []
self.channel_per_plane = []
self.pad_num_line_start = []
self.pad_num_line_end = []
# memory surface data size may not be same, cannot use numpy array for memory_surface_data
self.memory_surface_data = []
def __init__(self, name):
super().__init__(name)
self.reset()
def set_configs(self, config):
self.reset()
try:
seed, width, height, channel, atomic_memory, line_stride, offset_x, pixel_format_name, data_type, pattern, file_name = config
except ValueError:
raise Exception('Error in feature memory surface creation', '''
Required parameter for feature map surface are:
seed, width, height, channel, atomic_memory, line_stride, offset_x, pixel_format_name, data_type, pattern, file_name
''')
assert(width > 0)
assert(height > 0)
assert(channel > 0)
# assert(pixel_format_name in self.supported_pixel_format)
#assert(data_type in TensorOperator.supported_data_type)
assert(pattern in self.supported_pattern)
self.seed, self.width, self.height, self.channel, self.atomic_memory, line_stride, self.offset_x, self.pixel_format_name, self.data_type, self.pattern, file_name = config
self.file_name = file_name.split(',')
self.line_stride = list(int(x, 0) for x in line_stride.split(','))
#assert(len(self.file_name) == len(self.line_stride)) # file number and line_stride shall be the same as surface number
self.extract_surface_setting()
self.set_seed()
def extract_surface_setting (self):
# Pixel format name examples:
# T_B8G8R8X8
# T_A2R10G10B10
# T_Y8___U8V8_N444
pixel_format_name = self.pixel_format_name.replace('T_','').replace('_N444','')
if '_F' in pixel_format_name:
assert('float16' == self.data_type)
plane_separator_anchor = re.compile(r'___')
hdr_anchor = re.compile(r'(^A2)|(A2$)')
channel_anchor = re.compile(r'(?P<channel_name>[A-Z])(?P<bit_width>\d+)')
is_reverse = False
is_hdr = False
is_a2_msb = False
plane_name_list = pixel_format_name.split('___')
self.plane_number = len(plane_name_list)
element_byte_size = np.dtype(self.data_type).itemsize
for plane_name in plane_name_list:
# a plane could be: R8G8B8A8, Y8, U8V8
result = hdr_anchor.search(plane_name)
if result:
is_hdr = True
is_a2_msb = (0 == plane_name.index('A2'))
# 'R8G8B8A8' -> [('R', '8'), ('G', '8'), ('B', '8'), ('A', '8')]
channel_list = channel_anchor.findall(plane_name)
channel_name_tuple, channel_bit_width_tuple = zip(*channel_list)
self.channel_name_list.extend(channel_name_tuple)
channel_per_plane = len(channel_list)
element_alignment = self.atomic_memory//(element_byte_size*channel_per_plane)
#print('MemorySurfaceImagePitch::element_alignment', element_alignment, sep='\n')
self.channel_per_plane.append(channel_per_plane)
pad_num_line_start = self.offset_x % element_alignment
self.pad_num_line_start.append(pad_num_line_start)
self.pad_num_line_end.append( (pad_num_line_start + self.width + element_alignment -1 )//element_alignment * element_alignment - (pad_num_line_start + self.width) )
#print('MemorySurfaceImagePitch::pad_num_line_start', self.pad_num_line_start, sep='\n')
#print('MemorySurfaceImagePitch::pad_num_line_end', self.pad_num_line_end, sep='\n')
def convert_memory_surface_to_tensor_nchw(self):
# Remove pad zeros in line start and line end which is come from offset_x and atomic_m alignment
for plane_idx in range(self.plane_number):
self.memory_surface_data[plane_idx] = self.memory_surface_data[plane_idx][:,:,:, self.pad_num_line_start[plane_idx]:-pad_num_line_end[plane_idx]]
tensor_list = []
# convert plane memory to plane tensor
for plane_idx in range(self.plane_number):
tensor_list.append(TensorOperator.convert_tensor_from_nch_wxatom_to_nchw(self.memory_surface_data[plane_idx], self.channel_per_plane[plane_idx]))
# merge plane tensors on dimemsion channel
self.tensor_nchw = TensorOperator.merge_nchw_tensors_by_dimemsion(tensor_list, 1)
# Adjust order to ['R','G','B','Y','U','V','X','A']
channel_order = []
for channel in ['R','G','B','Y','U','V','X','A']:
if channel in self.channel_name_list:
channel_order.append(self.channel_name_list.index(channel))
self.tensor_nchw = TensorOperator.adjust_channel_order(TensorOperator.tensor_nchw, channel_order)
def convert_tensor_nchw_to_memory_surface(self):
# Adjust order from ['R','G','B','Y','U','V','X','A'] to pixel format
channel_name_new_list = []
channel_order = []
tensor_channel_name_list = []
for channel in self.channel_name_list:
if channel in ['R','G','B','Y','U','V','X','A']:
tensor_channel_name_list.append(channel)
for channel in self.channel_name_list:
if channel in tensor_channel_name_list:
channel_order.append(tensor_channel_name_list.index(channel))
self.tensor_nchw = TensorOperator.adjust_channel_order(self.tensor_nchw, channel_order)
# split tensor to plane tensor on dimension channel
dimension_channel_axis_id = 1
boarder_channel_idx = [self.channel_per_plane[0]]
tensor_list = TensorOperator.split_tensor_nchw_by_dimemsion(self.tensor_nchw, boarder_channel_idx, dimension_channel_axis_id)
# convert plane tensor to plane memory
for plane_idx in range(self.plane_number):
self.memory_surface_data.append(TensorOperator.convert_tensor_from_nchw_to_nch_wxatom(tensor_list[plane_idx], self.channel_per_plane[plane_idx]))
# apply offset_x and atomic_m alignment, pad zeros in line start and line end
for plane_idx in range(self.plane_number):
self.memory_surface_data[plane_idx] = np.pad(self.memory_surface_data[plane_idx], ((0,0), (0,0), (0,0), (self.pad_num_line_start[plane_idx]*self.channel_per_plane[plane_idx], self.pad_num_line_end[plane_idx]*self.channel_per_plane[plane_idx])), 'constant')
def generate_memory_surface(self, *config):
print(config)
self.set_configs(config)
supported_pattern = {'nchw_index', 'nsh_wxatom_index', 'random', 'ones', 'zeros'}
if self.pattern in ['nchw_index']:
self.tensor_nchw = TensorOperator.create_tensor((1, self.channel, self.height, self.width), self.data_type, 'index')
self.convert_tensor_nchw_to_memory_surface()
elif self.pattern in ['random', 'ones', 'zeros']:
self.tensor_nchw = TensorOperator.create_tensor((1, self.channel, self.height, self.width), self.data_type, self.pattern)
self.convert_tensor_nchw_to_memory_surface()
elif self.pattern in ['nsh_wxatom_index']:
for plane_idx in range(self.plane_number):
self.memory_surface_data.append( TensorOperator.create_tensor((1, 1, self.height, (self.width + self.pad_num_line_start[plane_idx] + pad_num_line_end[plane_idx])*self.channel_per_plane[plane_idx]), self.data_type, 'index') )
#print('self.memory_surface_data')
#print(self.memory_surface_data)
self.convert_memory_surface_to_tensor_nchw()
else:
raise Exception('MemorySurfaceImagePitch::generate_memory_surface','Not supported pattern:%s' % self.pattern)
def dump_memory_surface_to_file(self):
print('MemorySurfaceImagePitch::dump_memory_surface_to_file')
#print('self.memory_surface_data', self.memory_surface_data)
for plane_index in range(self.plane_number):
with open (self.file_name[plane_index], 'w') as f:
f.write('{\n')
offset = 0
for line_index in range(self.height):
offset = line_index*self.line_stride[plane_index]
self.write_line_to_file(f, offset, self.memory_surface_data[plane_index][0, 0, line_index])
f.write('}\n')
def print_info(self):
print("===== Information of %s with pattern %s, begin =====" % (self.name, self.pattern))
print("----- surface data shape of %s, begin ----" % self.name)
for plane_idx in range(self.plane_number):
print(self.memory_surface_data[plane_idx].shape)
print("----- surface data shape of %s, end -----" % self.name)
print("----- surface data of %s, begin ----" % self.name)
for plane_idx in range(self.plane_number):
print(self.memory_surface_data[plane_idx])
print("----- surface data of %s, end -----" % self.name)
print("----- tensor data of %s, begin -----" % self.name)
print(self.tensor_nchw)
print("----- tensor data of %s, end -----" % self.name)
print("===== Information of %s with pattern %s, end =======" % (self.name, self.pattern))
class MemorySurfaceFactory():
@classmethod
def creat(cls, format):
format_class_name = 'MemorySurface'+format
return globals()[format_class_name]
if __name__ == "__main__":
#msf = MemorySurfaceFactory.creat('FeatureMap')('AA')
##msf = MSFM('AA')
#msf.generate_memory_surface(0, 2, 3, 4, 1, 1, 8, 16, 48, 48, 'int8', 'ones', 'AA_ones.dat')
#msf.print_info()
#msf.generate_memory_surface(0, 2, 3, 4, 1, 1, 8, 16, 48, 48, 'int8', 'zeros', 'AA_zeros.dat')
#msf.print_info()
#msf.generate_memory_surface(0, 2, 3, 4, 1, 1, 8, 16, 48, 48, 'int8', 'random', 'AA_random.dat')
#msf.print_info()
#msf.generate_memory_surface(0, 2, 3, 4, 1, 1, 8, 16, 48, 48, 'int8', 'nchw_index', 'AA_nchw_index.dat')
#msf.print_info()
#msf.generate_memory_surface(0, 2, 3, 4, 1, 1, 8, 16, 48, 48, 'int8', 'nch_wxatom_index', 'AA_nch_wxatom_index.dat')
#msf.print_info()
#msf.generate_memory_surface(0, 2, 3, 4, 1, 2, 8, 16, 48, 48, 'int8', 'shwan_index', 'AA_shwan_index.dat')
#msf.print_info()
#msf.dump_memory_surface_to_file()
#msf.generate_memory_surface(0, 1, 1, 1, 1, 2, 8, 32, 32, 32, 'int16', 'nchw_index', 'AA_nchw_index_int16.dat')
#msf.print_info()
#msf.dump_memory_surface_to_file()
#raise
#msw = MemorySurfaceFactory.creat('Weight')('BB')
##width, height, channel, kernel_number, element_per_atom, kernel_per_group, data_type, pattern
#msw.generate_memory_surface(0, 3, 3, 3, 5, 2, 2, 'int8', 8, 'nchw_index', False, 'weight_nchw_index.dat')
#msw.print_info()
#msw.dump_memory_surface_to_file()
#
#msw.generate_memory_surface(0, 3, 3, 3, 5, 2, 2, 'int16', 8, 'gshwp_index', False, 'weight_gshwp_index.dat')
#msw.print_info()
#msw.dump_memory_surface_to_file()
#
#msw.generate_memory_surface(0, 3, 3, 4, 4, 2, 2, 'int16', 8, 'nhwc_index', False, 'weight_nhwc_index_int16.dat')
#msw.print_info()
#msw.dump_memory_surface_to_file()
#
#msw.generate_memory_surface(0, 3, 3, 4, 4, 2, 2, 'int8', 8, 'gshwp_index', False, 'weight_gshwp_index_int8_aligned.dat')
#msw.print_info()
#msw.dump_memory_surface_to_file()
#msw.generate_memory_surface(0, 3, 3, 4, 4, 2, 2, 'int8', 16, 'gshwp_index', True, ('weight_gshwp_index_int8_aligned_cmp.dat', 'weight_gshwp_index_int8_aligned_wmb.dat', 'weight_gshwp_index_int8_aligned_wgs.dat', ))
#msw.print_info()
#msw.dump_memory_surface_to_file()
#msw.generate_memory_surface(0, 3, 3, 3, 5, 2, 2, 'int16', 16, 'gshwp_index', True, ('weight_gshwp_index_cmp.dat', 'weight_gshwp_index_wmb.dat', 'weight_gshwp_index_wgs.dat', ))
#msw.print_info()
#msw.dump_memory_surface_to_file()
msp = MemorySurfaceFactory.creat('ImagePitch')('CC')
#seed, width, height, channel, atomic_memory, line_stride, offset_x, pixel_format_name, data_type, pattern, file_name
msp.generate_memory_surface(0, 3, 3, 4, 8, '32', 0, 'T_A8R8G8B8', 'int8', 'nchw_index', 'pitchlinear_l0.dat')
msp.print_info()
msp.dump_memory_surface_to_file()
msp.generate_memory_surface(1, 3, 3, 4, 8, '32', 1, 'T_A8R8G8B8', 'int8', 'nchw_index', 'pitchlinear_l1.dat')
msp.print_info()
msp.dump_memory_surface_to_file()
msp.generate_memory_surface(1, 3, 3, 3, 8, '32,32', 3, 'T_Y8___U8V8_N444', 'int8', 'nchw_index', 'pitchlinear_l3_0.dat,pitchlinear_l3_1.dat')
msp.print_info()
msp.dump_memory_surface_to_file()
msp.generate_memory_surface(1, 3, 3, 3, 8, '32,32', 7, 'T_Y8___U8V8_N444', 'int8', 'nchw_index', 'pitchlinear_l7_0.dat,pitchlinear_l7_1.dat')
msp.print_info()
msp.dump_memory_surface_to_file()
| [
"re.compile",
"tensor_operator.TensorOperator.convert_weight_memory_surface_to_nchw",
"tensor_operator.TensorOperator.append_c_compensation_to_nchw_tensor",
"tensor_operator.TensorOperator.convert_nchw_to_weight_memory_surface",
"tensor_operator.TensorOperator.merge_nchw_tensors_by_dimemsion",
"tensor_ope... | [((455, 480), 'numpy.random.seed', 'np.random.seed', (['self.seed'], {}), '(self.seed)\n', (469, 480), True, 'import numpy as np\n'), ((4493, 4587), 'tensor_operator.TensorOperator.append_c_compensation_to_nchw_tensor', 'TensorOperator.append_c_compensation_to_nchw_tensor', (['self.tensor_nchw', 'self.atomic_memory'], {}), '(self.tensor_nchw, self.\n atomic_memory)\n', (4544, 4587), False, 'from tensor_operator import TensorOperator\n'), ((5434, 5523), 'tensor_operator.TensorOperator.remove_c_compensation_from_nchw_tensor', 'TensorOperator.remove_c_compensation_from_nchw_tensor', (['nchw_compensated', 'self.channel'], {}), '(nchw_compensated,\n self.channel)\n', (5487, 5523), False, 'from tensor_operator import TensorOperator\n'), ((11840, 12032), 'tensor_operator.TensorOperator.convert_weight_memory_surface_to_nchw', 'TensorOperator.convert_weight_memory_surface_to_nchw', (['self.memory_surface_data', 'self.kernel_number', 'self.channel', 'self.height', 'self.width', 'self.kernel_per_group', 'self.element_per_atom'], {}), '(self.\n memory_surface_data, self.kernel_number, self.channel, self.height,\n self.width, self.kernel_per_group, self.element_per_atom)\n', (11892, 12032), False, 'from tensor_operator import TensorOperator\n'), ((19171, 19188), 're.compile', 're.compile', (['"""___"""'], {}), "('___')\n", (19181, 19188), False, 'import re\n'), ((19225, 19250), 're.compile', 're.compile', (['"""(^A2)|(A2$)"""'], {}), "('(^A2)|(A2$)')\n", (19235, 19250), False, 'import re\n'), ((19285, 19341), 're.compile', 're.compile', (['"""(?P<channel_name>[A-Z])(?P<bit_width>\\\\d+)"""'], {}), "('(?P<channel_name>[A-Z])(?P<bit_width>\\\\d+)')\n", (19295, 19341), False, 'import re\n'), ((21690, 21752), 'tensor_operator.TensorOperator.merge_nchw_tensors_by_dimemsion', 'TensorOperator.merge_nchw_tensors_by_dimemsion', (['tensor_list', '(1)'], {}), '(tensor_list, 1)\n', (21736, 21752), False, 'from tensor_operator import TensorOperator\n'), ((22057, 22135), 'tensor_operator.TensorOperator.adjust_channel_order', 'TensorOperator.adjust_channel_order', (['TensorOperator.tensor_nchw', 'channel_order'], {}), '(TensorOperator.tensor_nchw, channel_order)\n', (22092, 22135), False, 'from tensor_operator import TensorOperator\n'), ((22750, 22818), 'tensor_operator.TensorOperator.adjust_channel_order', 'TensorOperator.adjust_channel_order', (['self.tensor_nchw', 'channel_order'], {}), '(self.tensor_nchw, channel_order)\n', (22785, 22818), False, 'from tensor_operator import TensorOperator\n'), ((23001, 23116), 'tensor_operator.TensorOperator.split_tensor_nchw_by_dimemsion', 'TensorOperator.split_tensor_nchw_by_dimemsion', (['self.tensor_nchw', 'boarder_channel_idx', 'dimension_channel_axis_id'], {}), '(self.tensor_nchw,\n boarder_channel_idx, dimension_channel_axis_id)\n', (23046, 23116), False, 'from tensor_operator import TensorOperator\n'), ((4673, 4768), 'tensor_operator.TensorOperator.convert_tensor_from_nchw_to_nch_wxatom', 'TensorOperator.convert_tensor_from_nchw_to_nch_wxatom', (['nchw_compensated', 'self.atomic_memory'], {}), '(nchw_compensated,\n self.atomic_memory)\n', (4726, 4768), False, 'from tensor_operator import TensorOperator\n'), ((5114, 5218), 'tensor_operator.TensorOperator.convert_tensor_from_nch_wxatom_to_nchw', 'TensorOperator.convert_tensor_from_nch_wxatom_to_nchw', (['self.memory_surface_data', 'self.atomic_memory'], {}), '(self.\n memory_surface_data, self.atomic_memory)\n', (5167, 5218), False, 'from tensor_operator import TensorOperator\n'), ((11611, 11732), 'tensor_operator.TensorOperator.convert_nchw_to_weight_memory_surface', 'TensorOperator.convert_nchw_to_weight_memory_surface', (['self.tensor_nchw', 'self.element_per_atom', 'self.kernel_per_group'], {}), '(self.tensor_nchw, self\n .element_per_atom, self.kernel_per_group)\n', (11663, 11732), False, 'from tensor_operator import TensorOperator\n'), ((12347, 12465), 'tensor_operator.TensorOperator.create_tensor', 'TensorOperator.create_tensor', (['(self.kernel_number, self.channel, self.height, self.width)', 'self.data_type', '"""index"""'], {}), "((self.kernel_number, self.channel, self.height,\n self.width), self.data_type, 'index')\n", (12375, 12465), False, 'from tensor_operator import TensorOperator\n'), ((13646, 13758), 'tensor_operator.TensorOperator.compress_array_by_element', 'TensorOperator.compress_array_by_element', (['self.memory_surface_data', 'element_per_group', 'self.element_per_atom'], {}), '(self.memory_surface_data,\n element_per_group, self.element_per_atom)\n', (13686, 13758), False, 'from tensor_operator import TensorOperator\n'), ((13834, 13947), 'tensor_operator.TensorOperator.decompress_array_by_element', 'TensorOperator.decompress_array_by_element', (['self.weight_compressed', 'self.weight_mask', 'self.weight_group_size'], {}), '(self.weight_compressed, self.\n weight_mask, self.weight_group_size)\n', (13876, 13947), False, 'from tensor_operator import TensorOperator\n'), ((19566, 19590), 'numpy.dtype', 'np.dtype', (['self.data_type'], {}), '(self.data_type)\n', (19574, 19590), True, 'import numpy as np\n'), ((23562, 23801), 'numpy.pad', 'np.pad', (['self.memory_surface_data[plane_idx]', '((0, 0), (0, 0), (0, 0), (self.pad_num_line_start[plane_idx] * self.\n channel_per_plane[plane_idx], self.pad_num_line_end[plane_idx] * self.\n channel_per_plane[plane_idx]))', '"""constant"""'], {}), "(self.memory_surface_data[plane_idx], ((0, 0), (0, 0), (0, 0), (self.\n pad_num_line_start[plane_idx] * self.channel_per_plane[plane_idx], self\n .pad_num_line_end[plane_idx] * self.channel_per_plane[plane_idx])),\n 'constant')\n", (23568, 23801), True, 'import numpy as np\n'), ((24058, 24159), 'tensor_operator.TensorOperator.create_tensor', 'TensorOperator.create_tensor', (['(1, self.channel, self.height, self.width)', 'self.data_type', '"""index"""'], {}), "((1, self.channel, self.height, self.width),\n self.data_type, 'index')\n", (24086, 24159), False, 'from tensor_operator import TensorOperator\n'), ((4853, 4945), 'tensor_operator.TensorOperator.convert_tensor_from_nchw_to_sh_wan', 'TensorOperator.convert_tensor_from_nchw_to_sh_wan', (['nchw_compensated', 'self.atomic_memory'], {}), '(nchw_compensated, self.\n atomic_memory)\n', (4902, 4945), False, 'from tensor_operator import TensorOperator\n'), ((5294, 5409), 'tensor_operator.TensorOperator.convert_tensor_from_sh_wan_to_nchw', 'TensorOperator.convert_tensor_from_sh_wan_to_nchw', (['self.memory_surface_data', 'self.atomic_memory', 'self.component'], {}), '(self.memory_surface_data,\n self.atomic_memory, self.component)\n', (5343, 5409), False, 'from tensor_operator import TensorOperator\n'), ((5873, 5984), 'tensor_operator.TensorOperator.create_tensor', 'TensorOperator.create_tensor', (['(self.batch, self.channel, self.height, self.width)', 'self.data_type', '"""index"""'], {}), "((self.batch, self.channel, self.height, self.\n width), self.data_type, 'index')\n", (5901, 5984), False, 'from tensor_operator import TensorOperator\n'), ((7089, 7203), 'tensor_operator.TensorOperator.create_tensor', 'TensorOperator.create_tensor', (['(self.component, self.channel, self.height, self.width)', 'self.data_type', '"""index"""'], {}), "((self.component, self.channel, self.height,\n self.width), self.data_type, 'index')\n", (7117, 7203), False, 'from tensor_operator import TensorOperator\n'), ((12598, 12716), 'tensor_operator.TensorOperator.create_tensor', 'TensorOperator.create_tensor', (['(self.kernel_number, self.height, self.width, self.channel)', 'self.data_type', '"""index"""'], {}), "((self.kernel_number, self.height, self.width,\n self.channel), self.data_type, 'index')\n", (12626, 12716), False, 'from tensor_operator import TensorOperator\n'), ((12745, 12798), 'tensor_operator.TensorOperator.convert_nhwc_to_nchw', 'TensorOperator.convert_nhwc_to_nchw', (['self.tensor_nchw'], {}), '(self.tensor_nchw)\n', (12780, 12798), False, 'from tensor_operator import TensorOperator\n'), ((21483, 21613), 'tensor_operator.TensorOperator.convert_tensor_from_nch_wxatom_to_nchw', 'TensorOperator.convert_tensor_from_nch_wxatom_to_nchw', (['self.memory_surface_data[plane_idx]', 'self.channel_per_plane[plane_idx]'], {}), '(self.\n memory_surface_data[plane_idx], self.channel_per_plane[plane_idx])\n', (21536, 21613), False, 'from tensor_operator import TensorOperator\n'), ((23258, 23375), 'tensor_operator.TensorOperator.convert_tensor_from_nchw_to_nch_wxatom', 'TensorOperator.convert_tensor_from_nchw_to_nch_wxatom', (['tensor_list[plane_idx]', 'self.channel_per_plane[plane_idx]'], {}), '(tensor_list[plane_idx\n ], self.channel_per_plane[plane_idx])\n', (23311, 23375), False, 'from tensor_operator import TensorOperator\n'), ((24305, 24411), 'tensor_operator.TensorOperator.create_tensor', 'TensorOperator.create_tensor', (['(1, self.channel, self.height, self.width)', 'self.data_type', 'self.pattern'], {}), '((1, self.channel, self.height, self.width),\n self.data_type, self.pattern)\n', (24333, 24411), False, 'from tensor_operator import TensorOperator\n'), ((6227, 6343), 'tensor_operator.TensorOperator.create_tensor', 'TensorOperator.create_tensor', (['(self.batch, self.channel, self.height, self.width)', 'self.data_type', 'self.pattern'], {}), '((self.batch, self.channel, self.height, self.\n width), self.data_type, self.pattern)\n', (6255, 6343), False, 'from tensor_operator import TensorOperator\n'), ((7430, 7549), 'tensor_operator.TensorOperator.create_tensor', 'TensorOperator.create_tensor', (['(self.component, self.channel, self.height, self.width)', 'self.data_type', 'self.pattern'], {}), '((self.component, self.channel, self.height,\n self.width), self.data_type, self.pattern)\n', (7458, 7549), False, 'from tensor_operator import TensorOperator\n'), ((12947, 13070), 'tensor_operator.TensorOperator.create_tensor', 'TensorOperator.create_tensor', (['(self.kernel_number, self.channel, self.height, self.width)', 'self.data_type', 'self.pattern'], {}), '((self.kernel_number, self.channel, self.height,\n self.width), self.data_type, self.pattern)\n', (12975, 13070), False, 'from tensor_operator import TensorOperator\n'), ((6501, 6639), 'tensor_operator.TensorOperator.create_tensor', 'TensorOperator.create_tensor', (['(self.batch, self.surface_number, self.height, self.width * self.atomic_memory)', 'self.data_type', '"""index"""'], {}), "((self.batch, self.surface_number, self.height,\n self.width * self.atomic_memory), self.data_type, 'index')\n", (6529, 6639), False, 'from tensor_operator import TensorOperator\n'), ((7772, 7919), 'tensor_operator.TensorOperator.create_tensor', 'TensorOperator.create_tensor', (['(1, self.surface_number, self.height, self.component * self.width * self.\n atomic_memory)', 'self.data_type', '"""index"""'], {}), "((1, self.surface_number, self.height, self.\n component * self.width * self.atomic_memory), self.data_type, 'index')\n", (7800, 7919), False, 'from tensor_operator import TensorOperator\n'), ((13244, 13367), 'tensor_operator.TensorOperator.create_tensor', 'TensorOperator.create_tensor', (['(self.kernel_number * self.channel * self.height * self.width,)', 'self.data_type', '"""index"""'], {}), "((self.kernel_number * self.channel * self.\n height * self.width,), self.data_type, 'index')\n", (13272, 13367), False, 'from tensor_operator import TensorOperator\n'), ((24624, 24825), 'tensor_operator.TensorOperator.create_tensor', 'TensorOperator.create_tensor', (['(1, 1, self.height, (self.width + self.pad_num_line_start[plane_idx] +\n pad_num_line_end[plane_idx]) * self.channel_per_plane[plane_idx])', 'self.data_type', '"""index"""'], {}), "((1, 1, self.height, (self.width + self.\n pad_num_line_start[plane_idx] + pad_num_line_end[plane_idx]) * self.\n channel_per_plane[plane_idx]), self.data_type, 'index')\n", (24652, 24825), False, 'from tensor_operator import TensorOperator\n')] |
import numpy as np
from Weno import *
import Numfluxes as NF
from RK3TVD import *
import numba as nb
import time
import socket
size=1000
L=1.0
dt=0.8/size
T=1.
def init(X):
h=L/size
for i in range(0,size):
if i>size//8 and i<size//2+size//8:
X[i]=1.-2*(i-size//8)*h/L;
else:
X[i]=0.0
#
In = np.empty(size)
Out = np.empty(size)
init(In)
np.savetxt("gp0",In)
print("size= ",size," dt= ",dt," nteps=", T/dt)
R=RK3TVD(size,L)
#NumF=NF.GodunovConvection
NumF=NF.GodunovBurghers
#NumF=NF.LaxFriedrichsConvection
#NumF=NF.LaxFriedrichsBurghers
t=0
t1 = time.time()
while t<T:
Out=R.op(NumF,In,dt)
In,Out=Out,In
t+=dt
t=(time.time()-t1)
print("computing time: ",t)
fi=open("gp","w")
np.savetxt("gp",In)
fi.close()
print("A file 'gp' with the final solution was created.")
f=open("RunningOn"+socket.gethostname(),"w")
f.write(str(t)+"\n")
f.close()
| [
"socket.gethostname",
"numpy.empty",
"numpy.savetxt",
"time.time"
] | [((348, 362), 'numpy.empty', 'np.empty', (['size'], {}), '(size)\n', (356, 362), True, 'import numpy as np\n'), ((369, 383), 'numpy.empty', 'np.empty', (['size'], {}), '(size)\n', (377, 383), True, 'import numpy as np\n'), ((394, 415), 'numpy.savetxt', 'np.savetxt', (['"""gp0"""', 'In'], {}), "('gp0', In)\n", (404, 415), True, 'import numpy as np\n'), ((609, 620), 'time.time', 'time.time', ([], {}), '()\n', (618, 620), False, 'import time\n'), ((754, 774), 'numpy.savetxt', 'np.savetxt', (['"""gp"""', 'In'], {}), "('gp', In)\n", (764, 774), True, 'import numpy as np\n'), ((692, 703), 'time.time', 'time.time', ([], {}), '()\n', (701, 703), False, 'import time\n'), ((863, 883), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (881, 883), False, 'import socket\n')] |
import itertools
from collections import defaultdict
from functools import partial
from math import ceil
from multiprocessing import Pool
from os import cpu_count
import numpy as np
from numba import njit
from pandas import DataFrame
def __set_matrix(x, phases=None, subset_genes=None, subset_samples=None, rm_zeros=True, fraction=None, verbose=False):
"""
Sets the parameter for the algorithms and trims the 'x'-matrix to contain only necessary elements
:param x: Pandas-Matrix with gene counts, index must contain gene names, columns must contain sample names
:param phases: Dictionary of Lists, i.e. {phase: [sample, ...]}, containing annotation of samples to their phase
:param subset_genes: List of Indices, Names or Boolean of genes to look at. Excluding all other.
:param subset_samples: List of Indices, Names or Boolean of samples to look at. Excluding all other
:return: Dictionary: {
"x": truncated matrix values,
"phases": phases annotation,
"sample_names": list of sample names,
"gene_names": list of gene names,
"thresholds": thresholds
}
"""
current_shape = x.shape
if verbose:
print('[__set_matrix] Original Matrix \'x\' has shape {} x {}'.format(
current_shape[0], current_shape[1]
))
# Check for index
if x.index.dtype != object:
raise Exception("Index empty! Please set genes as index with pandas.set_index().")
# Copy x to ensure original matrix is not altered
x_copy = x.copy()
# Eliminate rows where genes not in 'subset_genes', if provided
if subset_genes is not None:
# Make sure 'subset_genes' is index array if boolean or named array is supplied
# And remove genes provided in 'subset_genes' but not contained in 'x'
genes_mask = to_index(subset_genes, x_copy.index)
x_copy = x_copy.iloc[genes_mask, :]
if verbose:
print('[__set_matrix] Removed {} genes that were not in \'subset_genes\'. {} genes remaining.'.format(
(current_shape[0] - x_copy.shape[0]), x_copy.shape[0])
)
current_shape = x_copy.shape
if rm_zeros:
# Eliminate not expressed genes
x_copy = x_copy[(x_copy.T != 0).any()]
if verbose:
print('[__set_matrix] Removed {} genes that were not expressed in any samples. {} genes remaining.'.format(
(current_shape[0] - x_copy.shape[0]), x_copy.shape[0])
)
# Store remaining gene names for later use, rename for readability
gene_names = list(x_copy.index)
# Store all sample names for re calculation of indices
all_samples = x_copy.columns.values
# Eliminate columns where samples not in 'subset_samples', if provided
if subset_samples is not None:
# Make sure 'subset_samples' is index array if boolean or named array is supplied
# And remove samples provided in 'subset_genes' but not contained in 'x'
sample_mask = to_index(subset_samples, all_samples)
x_copy = x_copy.iloc[:, sample_mask]
if verbose:
print('[__set_matrix] Removed {} samples that were not in \'subset_samples\'. {} samples remaining.'.format(
(current_shape[1] - x_copy.shape[1]), x_copy.shape[1])
)
current_shape = x_copy.shape
thresholds = None
phases_copy = None
# Eliminate samples not annotated in 'phases'
if phases is not None:
# Get 1D index based mask from samples per phase
# And remove all samples not contained in this list
phase_mask = [
idx for _, samples in phases.items()
for idx in to_index(
to_named(samples, all_samples),
x_copy.columns.values
)
]
x_copy = x_copy.iloc[:, phase_mask]
if verbose:
print(
'[__set_matrix] Removed {} samples that were not annotated in \'phases\'. {} samples remaining.'.format(
(current_shape[1] - x_copy.shape[1]), x_copy.shape[1])
)
# Re-calculate phases indices based on truncated sample list
phases_copy = {
phase: to_index(
to_named(samples, all_samples),
x_copy.columns.values
) for phase, samples in phases.items()
}
# Pre Calculate thresholds for phases
thresholds = {phase: ceil(len(samples) * fraction) for phase, samples in phases_copy.items()}
# Store remaining sample names for later use, rename for readability
sample_names = list(x_copy.columns.values)
if verbose:
print('[__set_matrix] Matrix truncation done. Working with {} genes for {} samples.'.format(
x_copy.shape[0], x_copy.shape[1])
)
# Transform to ndarray for faster calculations
x_copy = x_copy.values
return {
"x": x_copy,
"phases": phases_copy,
"sample_names": sample_names,
"gene_names": gene_names,
"thresholds": thresholds
}
def sandbag(x, phases, fraction=0.5, processes=1, subset_genes=None, subset_samples=None, weighted=False,
triplets=False, verbose=False):
""" Calculates the pairs of genes serving as marker pairs for each phase, based on a matrix of gene counts and
an annotation of known phases.
:param x: Pandas-Matrix with gene counts, index must contain gene names, columns must contain sample names
:param fraction: Fraction to be used as threshold.
:param processes: Number of processes to use for multiprocess.pool
:param phases: Dictionary of Lists, i.e. {phase: [sample, ...]}, containing annotation of samples to their phase
:param subset_genes: List of Indices, Names or Boolean of genes to look at excluding all other
:param subset_samples: List of Indices, Names or Boolean of samples to look at excluding all other
:param weighted: Calculate weight for each pair.
:param triplets: Calculate 3-tuples instead of pairs. Where (g1 > g2 > g3)
:param verbose: Debug info
:return: Dictionary of List of Tuples, i.e. {phase: [(Gene1, Gene2), ...]}, containing marker pairs per phase
"""
# Set the parameter to the class instance and remove unnecessary elements in 'x'
params = __set_matrix(x, fraction=fraction, phases=phases, subset_genes=subset_genes, subset_samples=subset_samples,
verbose=verbose)
if verbose:
print('[sandbag] Identifying marker pairs...', end='')
possible_combinations = itertools.combinations(range(0, len(params["gene_names"])), 2)
if processes == 0:
processes = cpu_count() - 1
masks = (params["phases"]["G1"], params["phases"]["S"], params["phases"]["G2M"])
thresholds = [params["thresholds"]["G1"], params["thresholds"]["S"], params["thresholds"]["G2M"]]
check_phase_for_pair_wrapper_par = partial(check_phase_for_pair_wrapper, x=params["x"], masks=masks,
thresholds=thresholds)
# Multi cored calculation if requested
if processes != 1:
# Worker pool of processes
with Pool(processes=processes) as pool:
if verbose:
print("Processing in parallel with {} processes...".format(processes))
annotations = pool.map(
check_phase_for_pair_wrapper_par, possible_combinations)
annotations = list(annotations)
else:
annotations = (check_phase_for_pair_wrapper_par(pair) for pair in possible_combinations)
# Create container for marker pairs
marker_pairs = {phase: [] for phase in phases.keys()}
# Puts marker pairs into the 'marker_pairs' dictionary and removes 'None' phase annotation
for annotation in annotations:
if annotation[0]:
if weighted:
marker_pairs[annotation[0]].append(
(
params["gene_names"][annotation[1][0]],
params["gene_names"][annotation[1][1]],
(annotation[2] / len(params["sample_names"]))
)
)
else:
marker_pairs[annotation[0]].append(
(params["gene_names"][annotation[1][0]], params["gene_names"][annotation[1][1]]))
if triplets:
marker_pairs = identify_triplets(marker_pairs, weighted=weighted)
if verbose:
count_pairs = 0
for _, pairs in marker_pairs.items():
count_pairs = count_pairs + len(pairs)
print(" Done!")
print("[sandbag] Identified {} marker pairs (phase: count):".format(count_pairs), end=' ')
print({phase: len(pairs) for phase, pairs in marker_pairs.items()})
# Return 'marker_pairs' dictionary: {phase: [(Gene1, Gene2), ...]}
return marker_pairs
def cyclone(x, marker_pairs, subset_genes=None, iterations=1000, min_iter=100, min_pairs=50,
subset_samples=None, verbose=False, rm_zeros=False, processes=1, weighted=False, triplets=False):
""" Calculates scores for each sample and each phase and assigns prediction based on marker pairs indentified by
sandbag
:param x: Pandas-Matrix with gene counts, index must contain gene names, columns must contain sample names
:param marker_pairs: Dict of marker pairs per phase. See sandbag output.
:param iterations: An integer scalar specifying the number of iterations for random sampling to obtain a cycle
score.
:param min_iter: An integer scalar specifying the minimum number of iterations for score estimation
:param min_pairs: An integer scalar specifying the minimum number of pairs for cycle estimation.
:param subset_genes: List of Indices, Names or Boolean of genes to look at excluding all other
:param subset_samples: List of Indices, Names or Boolean of samples to look at excluding all other
:param weighted: Use weights for score calculation
:param processes: Number of processes to use for multiprocess.pool
:param rm_zeros: Whether not expressed genes should be removed
:param triplets: Pairs a 3-tuples
:param verbose: Debug info
:return: Dictionary of List of Tuples, i.e. {phase: [(Gene1, Gene2), ...]}, containing marker pairs per phase
"""
params = __set_matrix(x, subset_genes=subset_genes, subset_samples=subset_samples, rm_zeros=rm_zeros,
verbose=verbose)
if verbose:
print('[cyclone] Preparing marker pairs, where at least one gene was not present in \'x\'...', end='')
# Eliminate all gene pairs where at least one gene is not present in gene_names and convert to index
marker_pairs_idx = defaultdict(list)
removed = 0
used = defaultdict(list)
used_idx = defaultdict(list)
gene_name_idx = {g: i for i, g in enumerate(params["gene_names"])}
weights = defaultdict(list)
# Generate used list
for phase, pairs in marker_pairs.items():
u = []
for pair in pairs:
try:
if weighted:
if len(pair) == 4:
idx_pair = (gene_name_idx[pair[0]], gene_name_idx[pair[1]], gene_name_idx[pair[2]])
u.extend([idx_pair[0], idx_pair[1], idx_pair[2]])
else:
idx_pair = (gene_name_idx[pair[0]], gene_name_idx[pair[1]], -1)
u.extend([idx_pair[0], idx_pair[1]])
weights[phase].append(pair[-1])
else:
if len(pair) == 3:
idx_pair = (gene_name_idx[pair[0]], gene_name_idx[pair[1]], gene_name_idx[pair[2]])
u.extend([idx_pair[0], idx_pair[1], idx_pair[2]])
else:
idx_pair = (gene_name_idx[pair[0]], gene_name_idx[pair[1]], -1)
u.extend([idx_pair[0], idx_pair[1]])
weights[phase].append(1)
marker_pairs_idx[phase].append(idx_pair)
except KeyError:
removed = removed + 1
used[phase] = list(np.unique(u))
for phase, pairs in marker_pairs.items():
u_idx = np.empty(len(params["gene_names"]), dtype=int)
for i, u in enumerate(used[phase]):
u_idx[u] = i
used_idx[phase] = u_idx
if verbose:
count_pairs = 0
for phase, pairs in marker_pairs_idx.items():
count_pairs = count_pairs + len(pairs)
if len(pairs) == 0:
print('0 marker pairs for phase {}, setting scores to zeros!'.format(phase))
print(' Done!')
print('[cyclone] Removed {} marker pairs. {} marker pairs remaining.'.format(removed, count_pairs))
print('[cyclone] Calculating scores and predicting cell cycle phase...', end='')
if processes == 0:
processes = cpu_count() - 1
# Iterate over phases
scores = {phase: __get_phase_scores(params["x"], iterations, min_iter, min_pairs,
pairs, used[phase], used_idx[phase], processes, weights[phase], triplets
) for phase, pairs
in marker_pairs_idx.items()}
for p in list(["G1", "S", "G2M"]):
if p not in scores:
scores[p] = [0.0] * len(params['sample_names'])
scores_df = DataFrame(scores)
normalized_df = scores_df.div(scores_df.sum(axis=1), axis=0)
normalized_df.columns = ["G1_norm", "G2M_norm", "S_norm"]
prediction = {}
for index, score in scores_df.iterrows():
if score["G1"] >= 0.5 or score["G2M"] >= 0.5:
if score["G1"] >= score["G2M"]:
prediction[params["sample_names"][index]] = "G1"
else:
prediction[params["sample_names"][index]] = "G2M"
else:
prediction[params["sample_names"][index]] = "S"
prediction_normalized = {}
for index, score in normalized_df.iterrows():
if score["G1_norm"] >= score["G2M_norm"] and score["G1_norm"] >= score["S_norm"]:
prediction_normalized[params["sample_names"][index]] = "G1"
elif score["G2M_norm"] > score["G1_norm"] and score["G2M_norm"] >= score["S_norm"]:
prediction_normalized[params["sample_names"][index]] = "G2M"
else:
prediction_normalized[params["sample_names"][index]] = "S"
output = {
"prediction": prediction,
"prediction_normalized": prediction_normalized,
"scores": scores_df,
"normalized": normalized_df
}
if verbose:
print(' Done!')
print("[cyclone] Calculated scores and prediction (phase: count): ", end='')
counts = defaultdict(int)
for _, pred in prediction.items():
counts[pred] = counts[pred] + 1
print(', '.join('{}: {}'.format(phase, count) for phase, count in counts.items()))
return output
def __get_phase_scores(x, iterations, min_iter, min_pairs, pairs, used, used_idx, processes, weights, triplets):
# Multi cored calculation if requested
if processes != 1:
get_sample_score_par = partial(
__get_sample_score, iterations=iterations, min_iter=min_iter, min_pairs=min_pairs, pairs=pairs,
used_idx=used_idx, weights=weights, triplets=triplets
)
samples = [sample[used] for sample in x.T]
# Worker pool of processes
with Pool(processes=processes) as pool:
phase_scores = pool.map(get_sample_score_par, samples)
return list(phase_scores)
else:
phase_scores = [__get_sample_score(
sample[used], iterations, min_iter, min_pairs, pairs, used_idx, weights, triplets
) for sample in x.T]
return phase_scores
@njit()
def __get_sample_score(sample, iterations, min_iter, min_pairs, pairs, used_idx, weights, triplets):
if triplets:
cur_score = get_proportion_triple(sample, min_pairs, pairs, used_idx, weights)
else:
cur_score = get_proportion(sample, min_pairs, pairs, used_idx, weights)
if cur_score is None:
return 0
below = 0
total = 0
idx = sample
for i in range(0, iterations):
np.random.shuffle(idx)
if triplets:
new_score = get_proportion_triple(idx, min_pairs, pairs, used_idx, weights)
else:
new_score = get_proportion(idx, min_pairs, pairs, used_idx, weights)
if new_score is not None:
if new_score < cur_score:
below += 1
total += 1
if total >= min_iter:
return below / total
@njit()
def get_proportion_triple(sample, min_pairs, pairs, used_idx, weights):
hits = 0
total = 0
for i, pair in enumerate(pairs):
a = sample[used_idx[pair[0]]]
b = sample[used_idx[pair[1]]]
c = sample[used_idx[pair[2]]]
if a > b > c:
hits += weights[i]
if a != b != c:
total += weights[i]
if hits < min_pairs:
return None
return hits / total
@njit()
def get_proportion(sample, min_pairs, pairs, used_idx, weights):
hits = 0
total = 0
for i, pair in enumerate(pairs):
a = sample[used_idx[pair[0]]]
b = sample[used_idx[pair[1]]]
if a > b:
hits += weights[i]
if a != b:
total += weights[i]
if hits < min_pairs:
return None
return hits / total
def check_phase_for_pair_wrapper(pair, x, masks, thresholds):
phases = [None, "G1", "S", "G2M"]
phase = __check_phase_for_pair(pair, x, masks, thresholds)
if phase[0] > 0:
return phases[abs(phase[0])], pair, phase[1]
else:
return phases[abs(phase[0])], (pair[1], pair[0]), phase[1]
@njit()
def __check_phase_for_pair(pair, x, masks, thresholds):
""" Calculates the phase for which a pair of genes is a valid marker pair.
Returns the phase in which gene 1 is higher expressed (in more than fraction * number of cells in phase)
as gene 2 while being lower expressed (in more than fraction * number of cells in phases) in all other phases
Return None if pair is not a marker pair for any phase
:param pair: Tuple (Gene 1 index, Gene 2 index) of genes to be checked
:param x: 'x' with gene counts
:param masks: Masked of genes annotated
:param thresholds: Pre calculated dict of thresholds, i.e. {phase: 'fraction' * 'number of cells in phases'}
:return: Phase for which this pair is a marker, or None if not a marker pair, along with the pair Tuple
"""
x1 = x[pair[0], :]
x2 = x[pair[1], :]
# Subtract all gene counts of gene 2 from gene counts of gene 1
diff = __expression_diff(x1, x2)
# Counter for phases in which gene 1 > gene 2
up = 0
# Stores last phase in which gene 1 < gene 2
down = 0
frac = 0
# Test each phase
for i in range(0, 3):
frac = __count_up(mask(diff, masks[i]))
# Check if gene 1 > gene 2 in more than set fraction of samples in current phase
if frac >= thresholds[i]:
up += 1
passed_other = True
# Check if gene 2 > gene 1 in all other phases
for j in range(0, 3):
# Skip same phase
if i != j:
sub_frac = __count_down(mask(diff, masks[j]))
# Check if gene 1 < gene 2 in more than set fraction of samples in current 'sub_phase'
if not sub_frac >= thresholds[j]:
passed_other = False
sub_frac = __count_up(mask(diff, masks[j]))
# If not, check if gene 1 > gene 2 in current 'sub_phase'
if sub_frac >= thresholds[j]:
frac += sub_frac
up += 1
# Don't check other phases
break
else:
frac += sub_frac
# Store down phase as it could be up for the reversed pair
down = j + 1
# Return phase and pair if found
if passed_other:
return i + 1, frac
else:
break
# When gene 1 > gene 2 in all but one phase, consider that (Gene2, Gene1) is marker pair in the remaining phase
if up == 2:
# When the loop above already revealed the remaining phase and checked that Gene 2 > Gene 1 not only '>='
if down != 0:
# Return reversed pair with phase
return 0 - down, frac
# Else look at the remaining phase and check if Gene 2 > Gene 1 not only '>='
else:
sub_frac = __count_down(mask(diff, masks[2]))
if sub_frac >= thresholds[2]:
frac += sub_frac
# Return reversed pair with checked phase
return -3, frac
# Return 'None' if no phase if current pair, and reversed, is not a marker pair for any phase
return 0, 0
@njit()
def mask(x, arr):
y = np.zeros(len(arr))
for i in range(0, len(arr)):
y[i] = x[arr[i]]
return y
@njit()
def __expression_diff(x1, x2):
""" Fast matrix subtraction
:param x1: Row 1
:param x2: Row 2
:return: Difference
"""
return np.subtract(x1, x2)
@njit()
def __count_up(diff):
return (diff > 0).sum()
@njit()
def __check_if_up(diff, threshold):
""" Checks if Gene 1 is higher expressed than Gene 2
:param diff: Difference expression gene 1 - gene 2
:param threshold: Number of required samples
:return: True if more than threshold samples are above 1
"""
return __count_up(diff) >= threshold
@njit()
def __count_down(diff):
return (diff < 0).sum()
@njit()
def __check_if_down(diff, threshold):
""" Checks if Gene 1 is lower expressed than Gene 2
:param diff: Difference expression gene 1 - gene 2
:param threshold: Number of required samples
:return: True if more than threshold samples are below 1
"""
return __count_down(diff) >= threshold
def to_index(m, names=None):
names = list(names)
# Check idx
if all(isinstance(i, int) for i in m):
return m
# Named mask
if all(isinstance(i, str) for i in m):
return list([names.index(i) for i in m if i in names])
# Boolean mask
if all(isinstance(i, bool) for i in m):
return list([i for i, j in enumerate(m) if j])
raise ValueError("Only homogeneous Index, Name or Boolean arrays valid")
def to_named(m, names):
names = list(names)
# Named mask
if all(isinstance(i, str) for i in m):
return list(set(names).intersection(m))
# Check idx
if all(isinstance(i, int) for i in m):
return list(names[i] for i in m)
# Boolean mask
if all(isinstance(i, bool) for i in m):
return list([names[i] for i, j in enumerate(m) if j])
raise ValueError("Only homogeneous Index, Name or Boolean arrays valid")
def identify_triplets(marker_pairs, weighted=False, fraction=0):
triplets = defaultdict(list)
for phase, pairs in marker_pairs.items():
pairs_map = defaultdict(set)
weight_map = {}
for pair in pairs:
pairs_map[pair[0]].add(pair[1])
if weighted:
weight_map[(pair[0], pair[1])] = pair[2]
else:
weight_map[(pair[0], pair[1])] = 1
found = []
for g1, g2s in list(pairs_map.items()):
g2sint = g2s.intersection
for g2 in g2s:
for g3 in g2sint(pairs_map[g2]):
if g3 in pairs_map[g1]:
if weighted:
weight = weight_map[(g1, g2)] * weight_map[(g2, g3)] * weight_map[(g1, g3)]
if weight > fraction:
found.append((g1, g2, g3, weight))
else:
found.append((g1, g2, g3))
triplets[phase] = found
return triplets
| [
"numpy.unique",
"numba.njit",
"numpy.subtract",
"functools.partial",
"collections.defaultdict",
"os.cpu_count",
"multiprocessing.Pool",
"pandas.DataFrame",
"numpy.random.shuffle"
] | [((15823, 15829), 'numba.njit', 'njit', ([], {}), '()\n', (15827, 15829), False, 'from numba import njit\n'), ((16666, 16672), 'numba.njit', 'njit', ([], {}), '()\n', (16670, 16672), False, 'from numba import njit\n'), ((17108, 17114), 'numba.njit', 'njit', ([], {}), '()\n', (17112, 17114), False, 'from numba import njit\n'), ((17814, 17820), 'numba.njit', 'njit', ([], {}), '()\n', (17818, 17820), False, 'from numba import njit\n'), ((21111, 21117), 'numba.njit', 'njit', ([], {}), '()\n', (21115, 21117), False, 'from numba import njit\n'), ((21237, 21243), 'numba.njit', 'njit', ([], {}), '()\n', (21241, 21243), False, 'from numba import njit\n'), ((21416, 21422), 'numba.njit', 'njit', ([], {}), '()\n', (21420, 21422), False, 'from numba import njit\n'), ((21476, 21482), 'numba.njit', 'njit', ([], {}), '()\n', (21480, 21482), False, 'from numba import njit\n'), ((21794, 21800), 'numba.njit', 'njit', ([], {}), '()\n', (21798, 21800), False, 'from numba import njit\n'), ((21856, 21862), 'numba.njit', 'njit', ([], {}), '()\n', (21860, 21862), False, 'from numba import njit\n'), ((6932, 7024), 'functools.partial', 'partial', (['check_phase_for_pair_wrapper'], {'x': "params['x']", 'masks': 'masks', 'thresholds': 'thresholds'}), "(check_phase_for_pair_wrapper, x=params['x'], masks=masks,\n thresholds=thresholds)\n", (6939, 7024), False, 'from functools import partial\n'), ((10728, 10745), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (10739, 10745), False, 'from collections import defaultdict\n'), ((10775, 10792), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (10786, 10792), False, 'from collections import defaultdict\n'), ((10808, 10825), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (10819, 10825), False, 'from collections import defaultdict\n'), ((10913, 10930), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (10924, 10930), False, 'from collections import defaultdict\n'), ((13407, 13424), 'pandas.DataFrame', 'DataFrame', (['scores'], {}), '(scores)\n', (13416, 13424), False, 'from pandas import DataFrame\n'), ((21393, 21412), 'numpy.subtract', 'np.subtract', (['x1', 'x2'], {}), '(x1, x2)\n', (21404, 21412), True, 'import numpy as np\n'), ((23171, 23188), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (23182, 23188), False, 'from collections import defaultdict\n'), ((14757, 14773), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (14768, 14773), False, 'from collections import defaultdict\n'), ((15183, 15349), 'functools.partial', 'partial', (['__get_sample_score'], {'iterations': 'iterations', 'min_iter': 'min_iter', 'min_pairs': 'min_pairs', 'pairs': 'pairs', 'used_idx': 'used_idx', 'weights': 'weights', 'triplets': 'triplets'}), '(__get_sample_score, iterations=iterations, min_iter=min_iter,\n min_pairs=min_pairs, pairs=pairs, used_idx=used_idx, weights=weights,\n triplets=triplets)\n', (15190, 15349), False, 'from functools import partial\n'), ((16258, 16280), 'numpy.random.shuffle', 'np.random.shuffle', (['idx'], {}), '(idx)\n', (16275, 16280), True, 'import numpy as np\n'), ((23257, 23273), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (23268, 23273), False, 'from collections import defaultdict\n'), ((6688, 6699), 'os.cpu_count', 'cpu_count', ([], {}), '()\n', (6697, 6699), False, 'from os import cpu_count\n'), ((7184, 7209), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'processes'}), '(processes=processes)\n', (7188, 7209), False, 'from multiprocessing import Pool\n'), ((12155, 12167), 'numpy.unique', 'np.unique', (['u'], {}), '(u)\n', (12164, 12167), True, 'import numpy as np\n'), ((12918, 12929), 'os.cpu_count', 'cpu_count', ([], {}), '()\n', (12927, 12929), False, 'from os import cpu_count\n'), ((15477, 15502), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'processes'}), '(processes=processes)\n', (15481, 15502), False, 'from multiprocessing import Pool\n')] |
import numpy as np
import pandas as pd
import streamlit as st
import altair as alt
from matplotlib import pyplot as plt
import config, dataset, main, utils
# Matplotlib params
plt.style.use("seaborn")
plt.rcParams["figure.dpi"] = 300
def get_altair_hist_plot(series, name, bin_min, bin_max, bin_step):
"""
Plot the given Pandas Series as an histogram using Altair
"""
hist, bin_edges = np.histogram(
series,
bins=np.arange(bin_min, bin_max, bin_step),
)
print(bin_edges)
data = pd.DataFrame({name: bin_edges[:-1], "Count": hist})
return (
alt.Chart(data)
.mark_bar()
.encode(
alt.X(f"{name}:Q", bin=alt.Bin(extent=[bin_min, bin_max], step=bin_step)), y="Count"
)
)
# Title section
st.set_page_config(
page_title="3D Bin Packing",
)
st.header("3D Bin Packing")
# Dataset section
st.header("Dataset")
product_dataset = dataset.ProductDataset(
"data/products.pkl",
config.NUM_PRODUCTS,
config.MIN_PRODUCT_WIDTH,
config.MAX_PRODUCT_WIDTH,
config.MIN_PRODUCT_DEPTH,
config.MAX_PRODUCT_DEPTH,
config.MIN_PRODUCT_HEIGHT,
config.MAX_PRODUCT_HEIGHT,
config.MIN_PRODUCT_WEIGHT,
config.MAX_PRODUCT_WEIGHT,
force_overload=False,
)
# Plot depth over width ratio in the dataset
dw_ratio_plot = get_altair_hist_plot(
product_dataset.products.depth / product_dataset.products.width,
"D/W Ratio",
0,
1,
0.01,
)
st.altair_chart(dw_ratio_plot, use_container_width=True)
# Plot height over width ratio in the dataset
hw_ratio_plot = get_altair_hist_plot(
product_dataset.products.height / product_dataset.products.width,
"H/W Ratio",
0,
2,
0.05,
)
st.altair_chart(hw_ratio_plot, use_container_width=True)
# Plot volume distribution in the dataset
volume_plot = get_altair_hist_plot(product_dataset.products.volume / 1e6, "Volume", 0, 100, 1)
st.altair_chart(volume_plot, use_container_width=True)
# Plot weight distribution in the dataset
weight_plot = get_altair_hist_plot(product_dataset.products.weight, "Weight", 0, 100, 5)
st.altair_chart(weight_plot, use_container_width=True)
# Order section
st.header("Order")
# Select number of products and get random order
ordered_products = st.slider("Ordered products", 0, 1000, value=10, step=5)
order = product_dataset.get_order(ordered_products)
# Show the order as a table
st.dataframe(order)
# Show lower bounds on bins for the selected order
# on the sidebar of the dashboard
lower_bound = st.sidebar.selectbox(
f"Lower bounds for the selected {ordered_products}-products order", ("L0", "L1", "L2")
)
if lower_bound == "L0":
lb = utils.get_l0_lb(order, config.PALLET_DIMS)
elif lower_bound == "L1":
lb, _, _, _ = utils.get_l1_lb(order, config.PALLET_DIMS)
elif lower_bound == "L2":
lb, _, _, _ = utils.get_l2_lb(order, config.PALLET_DIMS)
st.sidebar.write(f"Martello's {lower_bound} lower bound: {lb}")
# Solutions section
st.header("Solution")
# Select parameters
st.subheader("Parameters")
solution_type = st.selectbox(
"Select the algorithm you'd like to test",
("Baseline", "Maxrects", "Column generation"),
index=1,
)
tlim = st.slider("Time limits", 0, 100, value=10, step=5)
max_iters = st.slider("Maximum re-iterations", 0, 5, value=1, step=1)
superitems_horizontal = st.radio("Add horizontal superitems", ("Yes", "No"))
# Compute solution
if solution_type == "Baseline" or solution_type == "Maxrects":
bin_pool = main.main(
order,
procedure="bl" if solution_type == "Baseline" else "mr",
max_iters=max_iters,
tlim=tlim,
superitems_horizontal=True if superitems_horizontal == "Yes" else False,
)
elif solution_type == "Column generation":
cg_use_height_groups = st.radio(
"Call column generation by height groups", ("Yes", "No"), index=1
)
cg_mr_warm_start = st.radio(
"Use maxrects as a warm-start for column generation", ("Yes", "No"), index=1
)
cg_max_iters = st.slider("Column generation maximum iterations", 0, 100, value=20, step=5)
cg_max_stag_iters = st.slider(
"Column generation early stopping iterations", 0, 100, value=3, step=1
)
cg_sp_mr = st.radio(
"Use maxrects for the pricing subproblem in column generation", ("Yes", "No"), index=1
)
cg_sp_np_type = st.selectbox(
"Select the approach to use in the subproblem no-placement for column generation",
("MIP", "CP"),
index=0,
)
cg_sp_p_type = st.selectbox(
"Select the approach to use in the subproblem placement for column generation",
("Maxrects", "MIP", "CP"),
index=0,
)
bin_pool = main.main(
order,
procedure="cg",
max_iters=max_iters,
tlim=tlim,
superitems_horizontal=True if superitems_horizontal == "Yes" else False,
cg_use_height_groups=True if cg_use_height_groups == "Yes" else False,
cg_mr_warm_start=True if cg_mr_warm_start == "Yes" else False,
cg_max_iters=cg_max_iters,
cg_max_stag_iters=cg_max_stag_iters,
cg_sp_mr=True if cg_sp_mr == "Yes" else False,
cg_sp_np_type=cg_sp_np_type.lower(),
cg_sp_p_type="mr" if cg_sp_p_type == "Maxrects" else cg_sp_p_type.lower(),
)
# Show original layer pool (before compacting)
st.subheader("Original layer pool")
st.dataframe(bin_pool.get_original_layer_pool().to_dataframe())
# Show original bin pool (before compacting)
st.subheader("Original bin pool")
original_bin_pool = bin_pool.get_original_bin_pool()
for i, bin in enumerate(original_bin_pool):
st.write(f"Bin #{i + 1}")
st.dataframe(bin.layer_pool.describe())
ax = bin.plot()
st.pyplot(plt.gcf())
# Show compact bin pool
st.subheader("Compact bin pool")
for i, bin in enumerate(bin_pool.compact_bins):
st.write(f"Bin #{i + 1}")
ax = bin.plot()
st.pyplot(plt.gcf())
# Success message
st.success("Bin packing procedure successfully completed")
| [
"altair.Chart",
"main.main",
"dataset.ProductDataset",
"utils.get_l0_lb",
"streamlit.header",
"numpy.arange",
"streamlit.sidebar.write",
"matplotlib.pyplot.style.use",
"streamlit.set_page_config",
"pandas.DataFrame",
"streamlit.altair_chart",
"matplotlib.pyplot.gcf",
"streamlit.write",
"st... | [((179, 203), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn"""'], {}), "('seaborn')\n", (192, 203), True, 'from matplotlib import pyplot as plt\n'), ((784, 831), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""3D Bin Packing"""'}), "(page_title='3D Bin Packing')\n", (802, 831), True, 'import streamlit as st\n'), ((839, 866), 'streamlit.header', 'st.header', (['"""3D Bin Packing"""'], {}), "('3D Bin Packing')\n", (848, 866), True, 'import streamlit as st\n'), ((887, 907), 'streamlit.header', 'st.header', (['"""Dataset"""'], {}), "('Dataset')\n", (896, 907), True, 'import streamlit as st\n'), ((926, 1243), 'dataset.ProductDataset', 'dataset.ProductDataset', (['"""data/products.pkl"""', 'config.NUM_PRODUCTS', 'config.MIN_PRODUCT_WIDTH', 'config.MAX_PRODUCT_WIDTH', 'config.MIN_PRODUCT_DEPTH', 'config.MAX_PRODUCT_DEPTH', 'config.MIN_PRODUCT_HEIGHT', 'config.MAX_PRODUCT_HEIGHT', 'config.MIN_PRODUCT_WEIGHT', 'config.MAX_PRODUCT_WEIGHT'], {'force_overload': '(False)'}), "('data/products.pkl', config.NUM_PRODUCTS, config.\n MIN_PRODUCT_WIDTH, config.MAX_PRODUCT_WIDTH, config.MIN_PRODUCT_DEPTH,\n config.MAX_PRODUCT_DEPTH, config.MIN_PRODUCT_HEIGHT, config.\n MAX_PRODUCT_HEIGHT, config.MIN_PRODUCT_WEIGHT, config.\n MAX_PRODUCT_WEIGHT, force_overload=False)\n", (948, 1243), False, 'import config, dataset, main, utils\n'), ((1468, 1524), 'streamlit.altair_chart', 'st.altair_chart', (['dw_ratio_plot'], {'use_container_width': '(True)'}), '(dw_ratio_plot, use_container_width=True)\n', (1483, 1524), True, 'import streamlit as st\n'), ((1723, 1779), 'streamlit.altair_chart', 'st.altair_chart', (['hw_ratio_plot'], {'use_container_width': '(True)'}), '(hw_ratio_plot, use_container_width=True)\n', (1738, 1779), True, 'import streamlit as st\n'), ((1918, 1972), 'streamlit.altair_chart', 'st.altair_chart', (['volume_plot'], {'use_container_width': '(True)'}), '(volume_plot, use_container_width=True)\n', (1933, 1972), True, 'import streamlit as st\n'), ((2105, 2159), 'streamlit.altair_chart', 'st.altair_chart', (['weight_plot'], {'use_container_width': '(True)'}), '(weight_plot, use_container_width=True)\n', (2120, 2159), True, 'import streamlit as st\n'), ((2178, 2196), 'streamlit.header', 'st.header', (['"""Order"""'], {}), "('Order')\n", (2187, 2196), True, 'import streamlit as st\n'), ((2266, 2322), 'streamlit.slider', 'st.slider', (['"""Ordered products"""', '(0)', '(1000)'], {'value': '(10)', 'step': '(5)'}), "('Ordered products', 0, 1000, value=10, step=5)\n", (2275, 2322), True, 'import streamlit as st\n'), ((2404, 2423), 'streamlit.dataframe', 'st.dataframe', (['order'], {}), '(order)\n', (2416, 2423), True, 'import streamlit as st\n'), ((2524, 2642), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['f"""Lower bounds for the selected {ordered_products}-products order"""', "('L0', 'L1', 'L2')"], {}), "(\n f'Lower bounds for the selected {ordered_products}-products order', (\n 'L0', 'L1', 'L2'))\n", (2544, 2642), True, 'import streamlit as st\n'), ((2889, 2952), 'streamlit.sidebar.write', 'st.sidebar.write', (['f"""Martello\'s {lower_bound} lower bound: {lb}"""'], {}), '(f"Martello\'s {lower_bound} lower bound: {lb}")\n', (2905, 2952), True, 'import streamlit as st\n'), ((2975, 2996), 'streamlit.header', 'st.header', (['"""Solution"""'], {}), "('Solution')\n", (2984, 2996), True, 'import streamlit as st\n'), ((3018, 3044), 'streamlit.subheader', 'st.subheader', (['"""Parameters"""'], {}), "('Parameters')\n", (3030, 3044), True, 'import streamlit as st\n'), ((3061, 3176), 'streamlit.selectbox', 'st.selectbox', (['"""Select the algorithm you\'d like to test"""', "('Baseline', 'Maxrects', 'Column generation')"], {'index': '(1)'}), '("Select the algorithm you\'d like to test", (\'Baseline\',\n \'Maxrects\', \'Column generation\'), index=1)\n', (3073, 3176), True, 'import streamlit as st\n'), ((3195, 3245), 'streamlit.slider', 'st.slider', (['"""Time limits"""', '(0)', '(100)'], {'value': '(10)', 'step': '(5)'}), "('Time limits', 0, 100, value=10, step=5)\n", (3204, 3245), True, 'import streamlit as st\n'), ((3258, 3315), 'streamlit.slider', 'st.slider', (['"""Maximum re-iterations"""', '(0)', '(5)'], {'value': '(1)', 'step': '(1)'}), "('Maximum re-iterations', 0, 5, value=1, step=1)\n", (3267, 3315), True, 'import streamlit as st\n'), ((3340, 3392), 'streamlit.radio', 'st.radio', (['"""Add horizontal superitems"""', "('Yes', 'No')"], {}), "('Add horizontal superitems', ('Yes', 'No'))\n", (3348, 3392), True, 'import streamlit as st\n'), ((5353, 5388), 'streamlit.subheader', 'st.subheader', (['"""Original layer pool"""'], {}), "('Original layer pool')\n", (5365, 5388), True, 'import streamlit as st\n'), ((5499, 5532), 'streamlit.subheader', 'st.subheader', (['"""Original bin pool"""'], {}), "('Original bin pool')\n", (5511, 5532), True, 'import streamlit as st\n'), ((5774, 5806), 'streamlit.subheader', 'st.subheader', (['"""Compact bin pool"""'], {}), "('Compact bin pool')\n", (5786, 5806), True, 'import streamlit as st\n'), ((5949, 6007), 'streamlit.success', 'st.success', (['"""Bin packing procedure successfully completed"""'], {}), "('Bin packing procedure successfully completed')\n", (5959, 6007), True, 'import streamlit as st\n'), ((527, 578), 'pandas.DataFrame', 'pd.DataFrame', (["{name: bin_edges[:-1], 'Count': hist}"], {}), "({name: bin_edges[:-1], 'Count': hist})\n", (539, 578), True, 'import pandas as pd\n'), ((2672, 2714), 'utils.get_l0_lb', 'utils.get_l0_lb', (['order', 'config.PALLET_DIMS'], {}), '(order, config.PALLET_DIMS)\n', (2687, 2714), False, 'import config, dataset, main, utils\n'), ((3491, 3678), 'main.main', 'main.main', (['order'], {'procedure': "('bl' if solution_type == 'Baseline' else 'mr')", 'max_iters': 'max_iters', 'tlim': 'tlim', 'superitems_horizontal': "(True if superitems_horizontal == 'Yes' else False)"}), "(order, procedure='bl' if solution_type == 'Baseline' else 'mr',\n max_iters=max_iters, tlim=tlim, superitems_horizontal=True if \n superitems_horizontal == 'Yes' else False)\n", (3500, 3678), False, 'import config, dataset, main, utils\n'), ((5634, 5659), 'streamlit.write', 'st.write', (['f"""Bin #{i + 1}"""'], {}), "(f'Bin #{i + 1}')\n", (5642, 5659), True, 'import streamlit as st\n'), ((5859, 5884), 'streamlit.write', 'st.write', (['f"""Bin #{i + 1}"""'], {}), "(f'Bin #{i + 1}')\n", (5867, 5884), True, 'import streamlit as st\n'), ((2759, 2801), 'utils.get_l1_lb', 'utils.get_l1_lb', (['order', 'config.PALLET_DIMS'], {}), '(order, config.PALLET_DIMS)\n', (2774, 2801), False, 'import config, dataset, main, utils\n'), ((3787, 3862), 'streamlit.radio', 'st.radio', (['"""Call column generation by height groups"""', "('Yes', 'No')"], {'index': '(1)'}), "('Call column generation by height groups', ('Yes', 'No'), index=1)\n", (3795, 3862), True, 'import streamlit as st\n'), ((3900, 3991), 'streamlit.radio', 'st.radio', (['"""Use maxrects as a warm-start for column generation"""', "('Yes', 'No')"], {'index': '(1)'}), "('Use maxrects as a warm-start for column generation', ('Yes', 'No'\n ), index=1)\n", (3908, 3991), True, 'import streamlit as st\n'), ((4020, 4095), 'streamlit.slider', 'st.slider', (['"""Column generation maximum iterations"""', '(0)', '(100)'], {'value': '(20)', 'step': '(5)'}), "('Column generation maximum iterations', 0, 100, value=20, step=5)\n", (4029, 4095), True, 'import streamlit as st\n'), ((4120, 4205), 'streamlit.slider', 'st.slider', (['"""Column generation early stopping iterations"""', '(0)', '(100)'], {'value': '(3)', 'step': '(1)'}), "('Column generation early stopping iterations', 0, 100, value=3,\n step=1)\n", (4129, 4205), True, 'import streamlit as st\n'), ((4231, 4332), 'streamlit.radio', 'st.radio', (['"""Use maxrects for the pricing subproblem in column generation"""', "('Yes', 'No')"], {'index': '(1)'}), "('Use maxrects for the pricing subproblem in column generation', (\n 'Yes', 'No'), index=1)\n", (4239, 4332), True, 'import streamlit as st\n'), ((4362, 4491), 'streamlit.selectbox', 'st.selectbox', (['"""Select the approach to use in the subproblem no-placement for column generation"""', "('MIP', 'CP')"], {'index': '(0)'}), "(\n 'Select the approach to use in the subproblem no-placement for column generation'\n , ('MIP', 'CP'), index=0)\n", (4374, 4491), True, 'import streamlit as st\n'), ((4532, 4670), 'streamlit.selectbox', 'st.selectbox', (['"""Select the approach to use in the subproblem placement for column generation"""', "('Maxrects', 'MIP', 'CP')"], {'index': '(0)'}), "(\n 'Select the approach to use in the subproblem placement for column generation'\n , ('Maxrects', 'MIP', 'CP'), index=0)\n", (4544, 4670), True, 'import streamlit as st\n'), ((5738, 5747), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (5745, 5747), True, 'from matplotlib import pyplot as plt\n'), ((5919, 5928), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (5926, 5928), True, 'from matplotlib import pyplot as plt\n'), ((450, 487), 'numpy.arange', 'np.arange', (['bin_min', 'bin_max', 'bin_step'], {}), '(bin_min, bin_max, bin_step)\n', (459, 487), True, 'import numpy as np\n'), ((2846, 2888), 'utils.get_l2_lb', 'utils.get_l2_lb', (['order', 'config.PALLET_DIMS'], {}), '(order, config.PALLET_DIMS)\n', (2861, 2888), False, 'import config, dataset, main, utils\n'), ((688, 737), 'altair.Bin', 'alt.Bin', ([], {'extent': '[bin_min, bin_max]', 'step': 'bin_step'}), '(extent=[bin_min, bin_max], step=bin_step)\n', (695, 737), True, 'import altair as alt\n'), ((600, 615), 'altair.Chart', 'alt.Chart', (['data'], {}), '(data)\n', (609, 615), True, 'import altair as alt\n')] |
import numpy as np
from itertools import compress
from autodp.rl_core.env.sim_env import SimEnv
class BatchSimEnv(object):
"""This class is a wrapper to create and control a list of environments for batch processing."""
def __init__(self, image_batch=[], label_batch=[]):
self._envs = [SimEnv(i, l) for (i, l) in zip(image_batch, label_batch)]
def step(self, action_batch, qout=None):
"""Step one step for each environment."""
if qout is not None:
for (idx, env) in enumerate(self._envs):
env.step(action_batch[idx], qout[idx])
else:
for (idx, env) in enumerate(self._envs):
env.step(action_batch[idx])
def add(self, image_batch, label_batch):
"""Add more environments."""
self._envs += [SimEnv(i, l) for (i, l) in zip(image_batch, label_batch)]
def update_done(self, dones):
"""Update done images."""
self._envs = list(compress(self._envs, np.logical_not(dones)))
def get_paths(self):
"""Return a list of paths."""
return [env.get_path for env in self._envs]
def get_path(self, idx):
"""Return the path of a specific environment."""
return self._envs[idx].get_path
def get_labels(self):
"""Return the list of true labels."""
return [env.get_label for env in self._envs]
| [
"numpy.logical_not",
"autodp.rl_core.env.sim_env.SimEnv"
] | [((305, 317), 'autodp.rl_core.env.sim_env.SimEnv', 'SimEnv', (['i', 'l'], {}), '(i, l)\n', (311, 317), False, 'from autodp.rl_core.env.sim_env import SimEnv\n'), ((813, 825), 'autodp.rl_core.env.sim_env.SimEnv', 'SimEnv', (['i', 'l'], {}), '(i, l)\n', (819, 825), False, 'from autodp.rl_core.env.sim_env import SimEnv\n'), ((987, 1008), 'numpy.logical_not', 'np.logical_not', (['dones'], {}), '(dones)\n', (1001, 1008), True, 'import numpy as np\n')] |
from pyica import fastica
from pyica import sources
import numpy as np
class TestICA():
def setup(self):
self.signals = np.vstack([np.sin([x/20.0 for x in range(1,1001)]),(1.0 + np.mod(range(1000),200) - 100.0)/100.0])
self.mixing = np.array([[0.291, 0.6557], [-0.5439, 0.5572]])
self.X = np.dot(self.mixing,self.signals)
self.A,self.W,self.S = fastica.fastica(self.X,2,maxIterations=10000)
def test_W_orthogonality(self):
assert np.allclose(np.dot(self.W.T,self.W),np.eye(2),atol=1.0e-06),"python: W^TW not within 1e-06 of I"
def test_S_recovery(self):
from scipy.linalg import det
assert np.allclose(1.0,np.abs(det(np.corrcoef(self.S,self.signals)[0:2,2:])),atol=1.0e-03),"python: |det(rho(ShatT,S))| not within 1e-03 of unity"
class TestSources():
def setup(self):
self.S = sources.unitsources()
def test_S_size(self):
assert self.S.shape[0] == 3,"sources: incorrect number of test sources generated"
| [
"numpy.eye",
"numpy.corrcoef",
"numpy.array",
"numpy.dot",
"pyica.fastica.fastica",
"pyica.sources.unitsources"
] | [((256, 302), 'numpy.array', 'np.array', (['[[0.291, 0.6557], [-0.5439, 0.5572]]'], {}), '([[0.291, 0.6557], [-0.5439, 0.5572]])\n', (264, 302), True, 'import numpy as np\n'), ((320, 353), 'numpy.dot', 'np.dot', (['self.mixing', 'self.signals'], {}), '(self.mixing, self.signals)\n', (326, 353), True, 'import numpy as np\n'), ((384, 431), 'pyica.fastica.fastica', 'fastica.fastica', (['self.X', '(2)'], {'maxIterations': '(10000)'}), '(self.X, 2, maxIterations=10000)\n', (399, 431), False, 'from pyica import fastica\n'), ((865, 886), 'pyica.sources.unitsources', 'sources.unitsources', ([], {}), '()\n', (884, 886), False, 'from pyica import sources\n'), ((494, 518), 'numpy.dot', 'np.dot', (['self.W.T', 'self.W'], {}), '(self.W.T, self.W)\n', (500, 518), True, 'import numpy as np\n'), ((518, 527), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (524, 527), True, 'import numpy as np\n'), ((690, 723), 'numpy.corrcoef', 'np.corrcoef', (['self.S', 'self.signals'], {}), '(self.S, self.signals)\n', (701, 723), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import ffmpeg
from joblib import Parallel, delayed
import warnings
import cv2
from tqdm import tqdm
# UPSAMPLE DATA
def upsample(df, frac=2):
up_df = df.query("distance>=20").copy()
up_df = up_df.sample(frac=frac, replace=True)
up_df = pd.concat([df.query("distance<20"), up_df], axis=0)
return up_df
# CLEAN DATA
def clean_data(df):
df = df.query("distance>0").copy()
return df
def load_video(filepath, fps=1):
"""Use ffmpeg to load a video as an array with a specified frame rate.
filepath (pathlike): Path to the video
fps (float): Desired number of frames per second
"""
def _get_video_stream(path):
probe = ffmpeg.probe(path)
return next((stream for stream in probe["streams"] if stream["codec_type"] == "video"), None)
video_stream = _get_video_stream(filepath)
w = int(video_stream["width"])
h = int(video_stream["height"])
pipeline = ffmpeg.input(filepath)
pipeline = pipeline.filter("fps", fps=fps, round="up")
pipeline = pipeline.output("pipe:", format="rawvideo", pix_fmt="rgb24")
out, err = pipeline.run(capture_stdout=True, capture_stderr=True)
arr = np.frombuffer(out, np.uint8).reshape([-1, h, w, 3])
return arr
def video2image(df, image_dir, debug=False):
if debug:
df = df.iloc[:50]
def convert(df, video_id):
video_df = df.query("video_id==@video_id")
video = load_video(video_df.video_path.iloc[0], fps=1) # video path is same for all images within a video
height, width = video.shape[1:-1]
for time in video_df.time.unique():
image = video[time]
check = cv2.imwrite(f'{image_dir}/{video_id}-{time:03d}.png', image)
if not check:
warnings.warn(f'{video_id} writing failed')
return [video_id, width, height]
info = Parallel(n_jobs=-1, backend='threading', verbose=0)(delayed(convert)(df, video_id)\
for video_id in tqdm(df.video_id.unique(),
desc='video2image '))
return info
| [
"cv2.imwrite",
"ffmpeg.input",
"joblib.delayed",
"joblib.Parallel",
"ffmpeg.probe",
"warnings.warn",
"numpy.frombuffer"
] | [((962, 984), 'ffmpeg.input', 'ffmpeg.input', (['filepath'], {}), '(filepath)\n', (974, 984), False, 'import ffmpeg\n'), ((707, 725), 'ffmpeg.probe', 'ffmpeg.probe', (['path'], {}), '(path)\n', (719, 725), False, 'import ffmpeg\n'), ((1895, 1946), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(-1)', 'backend': '"""threading"""', 'verbose': '(0)'}), "(n_jobs=-1, backend='threading', verbose=0)\n", (1903, 1946), False, 'from joblib import Parallel, delayed\n'), ((1205, 1233), 'numpy.frombuffer', 'np.frombuffer', (['out', 'np.uint8'], {}), '(out, np.uint8)\n', (1218, 1233), True, 'import numpy as np\n'), ((1696, 1756), 'cv2.imwrite', 'cv2.imwrite', (['f"""{image_dir}/{video_id}-{time:03d}.png"""', 'image'], {}), "(f'{image_dir}/{video_id}-{time:03d}.png', image)\n", (1707, 1756), False, 'import cv2\n'), ((1799, 1842), 'warnings.warn', 'warnings.warn', (['f"""{video_id} writing failed"""'], {}), "(f'{video_id} writing failed')\n", (1812, 1842), False, 'import warnings\n'), ((1947, 1963), 'joblib.delayed', 'delayed', (['convert'], {}), '(convert)\n', (1954, 1963), False, 'from joblib import Parallel, delayed\n')] |
##################################################### Import system libraries ######################################################
import matplotlib as mpl
mpl.rcdefaults()
mpl.rcParams.update(mpl.rc_params_from_file('meine-matplotlibrc'))
import matplotlib.pyplot as plt
import numpy as np
import scipy.constants as const
import uncertainties.unumpy as unp
from uncertainties import ufloat
from uncertainties.unumpy import (
nominal_values as noms,
std_devs as stds,
)
################################################ Finish importing system libraries #################################################
################################################ Adding subfolder to system's path #################################################
import os, sys, inspect
# realpath() will make your script run, even if you symlink it :)
cmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile( inspect.currentframe() ))[0]))
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
# use this if you want to include modules from a subfolder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"python_custom_scripts")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
############################################# Finish adding subfolder to system's path #############################################
##################################################### Import custom libraries ######################################################
from curve_fit import ucurve_fit
from table import (
make_table,
make_full_table,
make_composed_table,
make_SI,
write,
)
from regression import (
reg_linear,
reg_quadratic,
reg_cubic
)
from error_calculation import(
MeanError
)
################################################ Finish importing custom libraries #################################################
import math
from scipy.interpolate import UnivariateSpline
# Planck
h = 4.135667516e-15 # eV second
# vacuum velo of light
c = 299792458 # metre per second
# diffraction distance
d = 201.4e-12 # metre
#elementary charge
e = 1.6e-19#coulomb
#Rydbergonstante
r = 13.6 #eV
#sommerfeldsche Feinstrukturkonstante
s_k = 7.29e-3
zwei_theta, impulsrate = np.genfromtxt('messdaten/1_Messung_werte.txt', unpack=True)
write('build/Tabelle_messung_1.tex', make_table([zwei_theta,impulsrate],[1, 0])) # Jeder fehlerbehaftete Wert bekommt zwei Spalten
write('build/Tabelle_messung_1_texformat.tex', make_full_table(
'Messdaten Bragg Bedingung.',
'table:A2',
'build/Tabelle_messung_1.tex',
[], # Hier aufpassen: diese Zahlen bezeichnen diejenigen resultierenden Spaltennummern,
# die Multicolumns sein sollen
[
r'$\theta \:/\: \si{\degree}$',
r'$Zaehlrate$']))
theta, Z = np.loadtxt("messdaten/Bremsberg_werte.txt", unpack=True)
theta = theta/2
plt.xlabel(r'$\theta \:/\: \si{\degree}$')
plt.ylabel(r'$Impulsrate \:/\: \si{\kilo\gram\meter\per\second\tothe{2}}$')
# plt.xlabel(r'$t \:/\: \si{\milli\second}$')
#plt.title("Emissionsspektrum einer Cu-Anode bei 35 kV")
plt.grid()
plt.xticks()
plt.yticks()
plt.annotate(r'$K_\alpha$', xy=(46.5/2, 6499))
plt.annotate(r'$K_\beta$', xy=(41.5/2, 2000))
plt.annotate(r'Bremsberg', xy=(20/2, 750))
plt.plot(theta, Z,'b-', label='Interpolation')
plt.legend(loc='best')
plt.savefig("build/cu-emission.pdf")
plt.close()
# print("hallo")
# #np.arcsin(0.5)
# print(np.arcsin(1))
# print(np.sin(90))
# import math
# print("try")
# print(math.sin(90))
# print(math.cos(math.radians(1)))
####Grenzwinkel-Bestimmung####
print("Ergebniss")
#lamb1 = math.sin(math.radians(5.4)) * 2* 201.4*10**(-12)
lamb_min = 2*d*np.sin(np.deg2rad(5.4))
E_max = h*c/lamb_min
write('build/lambda_min.tex', make_SI(lamb_min*1e12, r'\pico\meter', figures=2)) # type in Anz. signifikanter Stellen
write('build/E_max.tex', make_SI(E_max*1e-3, r'\kilo\electronvolt', figures=2)) # type in Anz. signifikanter Stellen
####Halbwertsbreite####
def halbwertsbreite(x, y):
spline = UnivariateSpline(x, y-np.max(y)/2, s=0)
r1, r2 = spline.roots() # find the roots
lambda1 = 2*d*np.sin(np.deg2rad(r1))
lambda2 = 2*d*np.sin(np.deg2rad(r2))
E1 = h*c/lambda1
E2 = h*c/lambda2
DE = E1 - E2
print ('Halbwertswinkel: {0:.5e} deg, {1:.5e} deg'.format(r1, r2))
print ('Halbwertsbreite: {0:.5e}'.format(np.abs(r1-r2)))
print (u'Energieaufloesung: {0:.5e} eV'.format(DE))
xnew = np.linspace(min(x), max(x))
ynew = spline(xnew)
plt.plot(x, y, 'rx', label='Messdaten')
plt.plot(xnew, ynew+np.max(y)/2,'b-', label='Interpolation')
plt.axvline(r1)
plt.axvline(r2)
plt.grid()
plt.legend()
plt.xlabel("doppelter Kristallwinkel in Grad")
plt.ylabel(u"Zählrate")
###############################################################
spline = UnivariateSpline(theta[84:90], Z[84:90]-np.max(Z[84:90])/2, s=0)
r1, r2 = spline.roots() # find the roots
lambda1 = 2*d*np.sin(np.deg2rad(r1))
lambda2 = 2*d*np.sin(np.deg2rad(r2))
E1 = h*c/lambda1
E2 = h*c/lambda2
DE = E1 - E2
print ('Halbwertswinkel: {0:.5e} deg, {1:.5e} deg'.format(r1, r2))
print ('Halbwertsbreite: {0:.5e}'.format(np.abs(r1-r2)))
print (u'Energieaufloesung: {0:.5e} eV'.format(DE))
xnew = np.linspace(min(theta[84:90]), max(theta[84:90]))
ynew = spline(xnew)
plt.plot(theta[84:90], Z[84:90], 'rx', label='Messdaten')
plt.plot(xnew, ynew+np.max(Z[84:90])/2,'b-', label='Interpolation')
plt.axvline(r1)
plt.axvline(r2)
plt.grid()
plt.legend(loc='best')
# plt.xlabel("doppelter Kristallwinkel in Grad")
# plt.ylabel(u"Zählrate")
plt.xlabel(r'$\theta \:/\: \si{\degree}$')
plt.ylabel(r'$Impulsrate \:/\: \si{\kilo\gram\meter\per\second\tothe{2}}$')
write('build/Halbwertswinkel_beta_1.tex', make_SI(r1, r'\degree', figures=2))
write('build/Halbwertswinkel_beta_2.tex', make_SI(r2, r'\degree', figures=2))
write('build/Halbwertsbreite_beta.tex', make_SI(np.abs(r1-r2), r'\degree', figures=2))
write('build/Energieaufloesung_beta.tex', make_SI(DE*1e-3, r'\kilo\electronvolt', figures=2))
plt.savefig("build/halbwertsbreiten_beta.pdf")
plt.close()
#halbwertsbreite(theta[96:101], Z[96:101])
spline = UnivariateSpline(theta[96:101], Z[96:101]-np.max(Z[96:101])/2, s=0)
r1, r2 = spline.roots() # find the roots
lambda1 = 2*d*np.sin(np.deg2rad(r1))
lambda2 = 2*d*np.sin(np.deg2rad(r2))
E1 = h*c/lambda1
E2 = h*c/lambda2
DE = E1 - E2
print ('Halbwertswinkel: {0:.5e} deg, {1:.5e} deg'.format(r1, r2))
print ('Halbwertsbreite: {0:.5e}'.format(np.abs(r1-r2)))
print (u'Energieaufloesung: {0:.5e} eV'.format(DE))
xnew = np.linspace(min(theta[96:101]), max(theta[96:101]))
ynew = spline(xnew)
plt.plot(theta[96:101], Z[96:101], 'rx', label='Messdaten')
plt.plot(xnew, ynew+np.max(Z[96:101])/2,'b-', label='Interpolation')
plt.axvline(r1)
plt.axvline(r2)
plt.grid()
plt.legend(loc='best')
plt.xlabel(r'$\theta \:/\: \si{\degree}$')
plt.ylabel(r'$Impulsrate \:/\: \si{\kilo\gram\meter\per\second\tothe{2}}$')
write('build/Halbwertswinkel_alpha_1.tex', make_SI(r1, r'\degree', figures=2))
write('build/Halbwertswinkel_alpha_2.tex', make_SI(r2, r'\degree', figures=2))
write('build/Halbwertsbreite_alpha.tex', make_SI(np.abs(r1-r2), r'\degree', figures=2))
write('build/Energieaufloesung_alpha.tex', make_SI(DE*1e-3, r'\kilo\electronvolt', figures=2))
write('build/Absorptionsenergie_Kupfer.tex', make_SI(E1*1e-3, r' ', figures=2))
plt.savefig("build/halbwertsbreiten_alpha.pdf")
plt.close()
##################### Abschirmkonstante
theta_alpha = 47.2/2
theta_beta = 42.8/2
write('build/theta_alpha.tex', make_SI(theta_alpha, r'\degree', figures=2))
write('build/theta_beta.tex', make_SI(theta_beta, r'\degree', figures=2))
lambda_alpha = 2*d*np.sin(np.deg2rad(theta_alpha))
lambda_beta = 2*d*np.sin(np.deg2rad(theta_beta))
E_alpha = h*c/lambda_alpha
E_beta = h*c/lambda_beta
sigma_1 = 29 - np.sqrt(E_beta/r)
sigma_2 = 29 -2* np.sqrt((r*((29-sigma_1)**2) - E_alpha)/r)
write('build/sigma_1.tex', make_SI(sigma_1, r' ', figures=2))
write('build/sigma_2.tex', make_SI(sigma_2, r' ', figures=2))
##Literaturwerte
sigma_1_lit = 29 - np.sqrt(8903/r)
sigma_2_lit = 29 -2* np.sqrt((r*((29-sigma_1)**2) - 8046)/r)
write('build/sigma_1_lit.tex', make_SI(sigma_1_lit, r' ', figures=2))
write('build/sigma_2_lit.tex', make_SI(sigma_2_lit, r' ', figures=2))
#write('build/Energiedifferenz.tex', make_SI(6268-1919, r'\electronvolt', figures=2)) # abgelesen
#######################
# Das Absorptionsspektrum, Graphiken
## Germanium
plt.clf
theta_ger, Z_ger = np.genfromtxt('messdaten/Germanium.txt', unpack=True)
theta_ger = theta_ger/2
# plt.plot(theta_ger, Z_ger)
plt.xlabel(r'$\theta \:/\: \si{\degree}$')
plt.ylabel(r'$Impulsrate \:/\: \si{\kilo\gram\meter\per\second\tothe{2}}$')
plt.grid()
# plt.xticks()
# plt.yticks()
plt.plot(theta_ger, Z_ger,'b-', label='Messdaten')
plt.legend(loc='best')
plt.savefig("build/Germanium.pdf")
plt.close()
## Zink
theta_zink, Z_zink = np.genfromtxt('messdaten/Zink.txt', unpack=True)
theta_zink = theta_zink/2
plt.xlabel(r'$\theta \:/\: \si{\degree}$')
plt.ylabel(r'$Impulsrate \:/\: \si{\kilo\gram\meter\per\second\tothe{2}}$')
plt.grid()
plt.xticks()
plt.yticks()
plt.plot(theta_zink, Z_zink,'b-', label='Messdaten')
plt.legend(loc='best')
plt.savefig("build/Zink.pdf")
plt.close()
##Zirkonium
theta_zir, Z_zir = np.genfromtxt('messdaten/Zirkonium.txt', unpack=True)
theta_zir = theta_zir/2
plt.xlabel(r'$\theta \:/\: \si{\degree}$')
plt.ylabel(r'$Impulsrate \:/\: \si{\kilo\gram\meter\per\second\tothe{2}}$')
plt.grid()
plt.xticks()
plt.yticks()
plt.plot(theta_zir, Z_zir,'b-', label='Messdaten')
plt.legend(loc='best')
plt.savefig("build/Zirkonium.pdf")
plt.close()
##Gold
theta_gold, Z_gold = np.genfromtxt('messdaten/Gold.txt', unpack=True)
theta_gold = theta_gold/2
plt.xlabel(r'$\theta \:/\: \si{\degree}$')
plt.ylabel(r'$Impulsrate \:/\: \si{\kilo\gram\meter\per\second\tothe{2}}$')
plt.grid()
plt.xticks()
plt.yticks()
plt.plot(theta_gold, Z_gold,'b-', label='Messdaten')
plt.legend(loc='best')
plt.savefig("build/Gold.pdf")
plt.close()
#### Energiebestimmung
def Grade(x_1, y_1, x_2, y_2):
m = (y_2-y_1)/(x_2-x_1)
b = y_1 - m*x_1
y = (y_2 + y_1)/2
x = (y-b)/m
return x
##Germanium
theta_ger_x = Grade(theta_ger[32], Z_ger[32], theta_ger[35], Z_ger[35])
lambda_ger = 2*d*np.sin(np.deg2rad(theta_ger_x))
E_ger = h*c/lambda_ger
write('build/Absorptionsenergie_Germanium.tex', make_SI(E_ger*1e-3, r'\kilo\electronvolt', figures=2))
write('build/Absorptionsenergie_Germanium_ohne.tex', make_SI(E_ger*1e-3, r' ', figures=2))
##Zink
theta_zink_x = Grade(theta_zink[30], Z_zink[30], theta_zink[35], Z_zink[35])
lambda_zink = 2*d*np.sin(np.deg2rad(theta_zink_x))
E_zink = h*c/lambda_zink
write('build/Absorptionsenergie_Zink.tex', make_SI(E_zink*1e-3, r'\kilo\electronvolt', figures=2))
write('build/Absorptionsenergie_Zink_ohne.tex', make_SI(E_zink*1e-3, r' ', figures=2))
##Zirkonium
theta_zir_x = Grade(theta_zir[23], Z_zir[23], theta_zir[27], Z_zir[27])
lambda_zir = 2*d*np.sin(np.deg2rad(theta_zir_x))
E_zir = h*c/lambda_zir
write('build/Absorptionsenergie_Zirkonium.tex', make_SI(E_zir*1e-3, r'\kilo\electronvolt', figures=2))
write('build/Absorptionsenergie_Zirkonium_ohne.tex', make_SI(E_zir*1e-3, r' ', figures=2))
#### Bestimmung der Abschirmkonstante
sigma_ger = 32 - np.sqrt((E_ger/r) -((s_k**2)/4)*32**4)
sigma_zink = 30 - np.sqrt((E_zink/r) -((s_k**2)/4)*30**4)
sigma_zir = 40 - np.sqrt((E_zir/r) -((s_k**2)/4)*40**4)
write('build/Abschirmkonstante_Germanium.tex', make_SI(sigma_ger, r' ', figures=2))
write('build/Abschirmkonstante_Zink.tex', make_SI(sigma_zink, r' ', figures=2))
write('build/Abschirmkonstante_Zirkonium.tex', make_SI(sigma_zir, r' ', figures=2))
#Moseley-Diagramm
E_k = (E_zink, E_ger, E_zir)
Z = (30,32,40) # Zn, Ge, Zr
E_k_wurzel = np.sqrt(E_k)
params = ucurve_fit(reg_linear, Z, E_k_wurzel)
m,b = params
write('build/hcRydbergonstante.tex', make_SI(4/3*m**2, r'\electronvolt', figures=1))
write('build/Rydbergonstante.tex', make_SI(4/3*m**2/(h*c), r'\per\meter', figures=1))
plt.clf
t_plot = np.linspace(25,45, 100)
plt.plot(t_plot , reg_linear(t_plot, *noms(params)), 'b-', label='Fit')
plt.plot(Z, E_k_wurzel, 'rx', label='Messdaten')
plt.xlabel(r'Kernladungszahl $Z$')
plt.ylabel(r'$\sqrt{E_\textrm{k} \:/\: \si{\kilo\electronvolt}}$')
plt.legend(loc='best')
plt.savefig("build/Moseley_Diagramm.pdf")
plt.close
################################ FREQUENTLY USED CODE ################################
#
########## IMPORT ##########
# t, U, U_err = np.genfromtxt('data.txt', unpack=True)
# t *= 1e-3
########## ERRORS ##########
# R_unc = ufloat(R[0],R[2])
# U = 1e3 * unp.uarray(U, U_err)
# Rx_mean = np.mean(Rx) # Mittelwert und syst. Fehler
# Rx_mean_err = MeanError(noms(Rx)) # Fehler des Mittelwertes
#
## Relative Fehler zum späteren Vergleich in der Diskussion
# RelFehler_G = (G_mess - G_lit) / G_lit
# RelFehler_B = (B_mess - B_lit) / B_lit
# write('build/RelFehler_G.tex', make_SI(RelFehler_G*100, r'\percent', figures=1))
# write('build/RelFehler_B.tex', make_SI(RelFehler_B*100, r'\percent', figures=1))
########## CURVE FIT ##########
# def f(t, a, b, c, d):
# return a * np.sin(b * t + c) + d
#
# params = ucurve_fit(f, t, U, p0=[1, 1e3, 0, 0]) # p0 bezeichnet die Startwerte der zu fittenden Parameter
# params = ucurve_fit(reg_linear, x, y) # linearer Fit
# params = ucurve_fit(reg_quadratic, x, y) # quadratischer Fit
# params = ucurve_fit(reg_cubic, x, y) # kubischer Fit
# a, b = params
# write('build/parameter_a.tex', make_SI(a * 1e-3, r'\kilo\volt', figures=1)) # type in Anz. signifikanter Stellen
# write('build/parameter_b.tex', make_SI(b * 1e-3, r'\kilo\hertz', figures=2)) # type in Anz. signifikanter Stellen
########## PLOTTING ##########
# plt.clf # clear actual plot before generating a new one
#
## automatically choosing limits with existing array T1
# t_plot = np.linspace(np.amin(T1), np.amax(T1), 100)
# plt.xlim(t_plot[0]-1/np.size(T1)*(t_plot[-1]-t_plot[0]), t_plot[-1]+1/np.size(T1)*(t_plot[-1]-t_plot[0]))
#
## hard coded limits
# t_plot = np.linspace(-0.5, 2 * np.pi + 0.5, 1000) * 1e-3
#
## standard plotting
# plt.plot(t_plot * 1e3, f(t_plot, *noms(params)) * 1e-3, 'b-', label='Fit')
# plt.plot(t * 1e3, U * 1e3, 'rx', label='Messdaten')
## plt.errorbar(B * 1e3, noms(y) * 1e5, fmt='rx', yerr=stds(y) * 1e5, label='Messdaten') # mit Fehlerbalken
## plt.xscale('log') # logarithmische x-Achse
# plt.xlim(t_plot[0] * 1e3, t_plot[-1] * 1e3)
# plt.xlabel(r'$t \:/\: \si{\milli\second}$')
# plt.ylabel(r'$U \:/\: \si{\kilo\volt}$')
# plt.legend(loc='best')
# plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
# plt.savefig('build/aufgabenteil_a_plot.pdf')
########## WRITING TABLES ##########
### IF THERE IS ONLY ONE COLUMN IN A TABLE (workaround):
## a=np.array([Wert_d[0]])
## b=np.array([Rx_mean])
## c=np.array([Rx_mean_err])
## d=np.array([Lx_mean*1e3])
## e=np.array([Lx_mean_err*1e3])
#
# write('build/Tabelle_b.tex', make_table([a,b,c,d,e],[0, 1, 0, 1, 1])) # Jeder fehlerbehaftete Wert bekommt zwei Spalten
# write('build/Tabelle_b_texformat.tex', make_full_table(
# 'Messdaten Kapazitätsmessbrücke.',
# 'table:A2',
# 'build/Tabelle_b.tex',
# [1,2,3,4,5], # Hier aufpassen: diese Zahlen bezeichnen diejenigen resultierenden Spaltennummern,
# # die Multicolumns sein sollen
# ['Wert',
# r'$C_2 \:/\: \si{\nano\farad}$',
# r'$R_2 \:/\: \si{\ohm}$',
# r'$R_3 / R_4$', '$R_x \:/\: \si{\ohm}$',
# r'$C_x \:/\: \si{\nano\farad}$']))
#
## Aufsplitten von Tabellen, falls sie zu lang sind
# t1, t2 = np.array_split(t * 1e3, 2)
# U1, U2 = np.array_split(U * 1e-3, 2)
# write('build/loesung-table.tex', make_table([t1, U1, t2, U2], [3, None, 3, None])) # type in Nachkommastellen
#
## Verschmelzen von Tabellen (nur Rohdaten, Anzahl der Zeilen muss gleich sein)
# write('build/Tabelle_b_composed.tex', make_composed_table(['build/Tabelle_b_teil1.tex','build/Tabelle_b_teil2.tex']))
########## ARRAY FUNCTIONS ##########
# np.arange(2,10) # Erzeugt aufwärts zählendes Array von 2 bis 10
# np.zeros(15) # Erzeugt Array mit 15 Nullen
# np.ones(15) # Erzeugt Array mit 15 Einsen
#
# np.amin(array) # Liefert den kleinsten Wert innerhalb eines Arrays
# np.argmin(array) # Gibt mir den Index des Minimums eines Arrays zurück
# np.amax(array) # Liefert den größten Wert innerhalb eines Arrays
# np.argmax(array) # Gibt mir den Index des Maximums eines Arrays zurück
#
# a1,a2 = np.array_split(array, 2) # Array in zwei Hälften teilen
# np.size(array) # Anzahl der Elemente eines Arrays ermitteln
########## ARRAY INDEXING ##########
# y[n - 1::n] # liefert aus einem Array jeden n-ten Wert als Array
########## DIFFERENT STUFF ##########
# R = const.physical_constants["molar gas constant"] # Array of value, unit, error
| [
"matplotlib.pyplot.grid",
"numpy.sqrt",
"sys.path.insert",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.annotate",
"matplotlib.rc_params_from_file",
"table.make_SI",
"table.make_full_table",
"numpy.genfromtxt",
"curve_fit.ucurve_fit",
"matplotlib.rcdefaults",
"matplotlib.pyplot.xlabel",
"m... | [((158, 174), 'matplotlib.rcdefaults', 'mpl.rcdefaults', ([], {}), '()\n', (172, 174), True, 'import matplotlib as mpl\n'), ((2307, 2366), 'numpy.genfromtxt', 'np.genfromtxt', (['"""messdaten/1_Messung_werte.txt"""'], {'unpack': '(True)'}), "('messdaten/1_Messung_werte.txt', unpack=True)\n", (2320, 2366), True, 'import numpy as np\n'), ((2894, 2950), 'numpy.loadtxt', 'np.loadtxt', (['"""messdaten/Bremsberg_werte.txt"""'], {'unpack': '(True)'}), "('messdaten/Bremsberg_werte.txt', unpack=True)\n", (2904, 2950), True, 'import numpy as np\n'), ((2970, 3016), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\theta \\\\:/\\\\: \\\\si{\\\\degree}$"""'], {}), "('$\\\\theta \\\\:/\\\\: \\\\si{\\\\degree}$')\n", (2980, 3016), True, 'import matplotlib.pyplot as plt\n'), ((3013, 3101), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$Impulsrate \\\\:/\\\\: \\\\si{\\\\kilo\\\\gram\\\\meter\\\\per\\\\second\\\\tothe{2}}$"""'], {}), "(\n '$Impulsrate \\\\:/\\\\: \\\\si{\\\\kilo\\\\gram\\\\meter\\\\per\\\\second\\\\tothe{2}}$')\n", (3023, 3101), True, 'import matplotlib.pyplot as plt\n'), ((3192, 3202), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (3200, 3202), True, 'import matplotlib.pyplot as plt\n'), ((3203, 3215), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {}), '()\n', (3213, 3215), True, 'import matplotlib.pyplot as plt\n'), ((3216, 3228), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {}), '()\n', (3226, 3228), True, 'import matplotlib.pyplot as plt\n'), ((3229, 3277), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""$K_\\\\alpha$"""'], {'xy': '(46.5 / 2, 6499)'}), "('$K_\\\\alpha$', xy=(46.5 / 2, 6499))\n", (3241, 3277), True, 'import matplotlib.pyplot as plt\n'), ((3276, 3323), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""$K_\\\\beta$"""'], {'xy': '(41.5 / 2, 2000)'}), "('$K_\\\\beta$', xy=(41.5 / 2, 2000))\n", (3288, 3323), True, 'import matplotlib.pyplot as plt\n'), ((3322, 3365), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""Bremsberg"""'], {'xy': '(20 / 2, 750)'}), "('Bremsberg', xy=(20 / 2, 750))\n", (3334, 3365), True, 'import matplotlib.pyplot as plt\n'), ((3365, 3412), 'matplotlib.pyplot.plot', 'plt.plot', (['theta', 'Z', '"""b-"""'], {'label': '"""Interpolation"""'}), "(theta, Z, 'b-', label='Interpolation')\n", (3373, 3412), True, 'import matplotlib.pyplot as plt\n'), ((3412, 3434), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (3422, 3434), True, 'import matplotlib.pyplot as plt\n'), ((3437, 3473), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""build/cu-emission.pdf"""'], {}), "('build/cu-emission.pdf')\n", (3448, 3473), True, 'import matplotlib.pyplot as plt\n'), ((3474, 3485), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3483, 3485), True, 'import matplotlib.pyplot as plt\n'), ((5434, 5491), 'matplotlib.pyplot.plot', 'plt.plot', (['theta[84:90]', 'Z[84:90]', '"""rx"""'], {'label': '"""Messdaten"""'}), "(theta[84:90], Z[84:90], 'rx', label='Messdaten')\n", (5442, 5491), True, 'import matplotlib.pyplot as plt\n'), ((5560, 5575), 'matplotlib.pyplot.axvline', 'plt.axvline', (['r1'], {}), '(r1)\n', (5571, 5575), True, 'import matplotlib.pyplot as plt\n'), ((5576, 5591), 'matplotlib.pyplot.axvline', 'plt.axvline', (['r2'], {}), '(r2)\n', (5587, 5591), True, 'import matplotlib.pyplot as plt\n'), ((5593, 5603), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (5601, 5603), True, 'import matplotlib.pyplot as plt\n'), ((5604, 5626), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (5614, 5626), True, 'import matplotlib.pyplot as plt\n'), ((5702, 5748), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\theta \\\\:/\\\\: \\\\si{\\\\degree}$"""'], {}), "('$\\\\theta \\\\:/\\\\: \\\\si{\\\\degree}$')\n", (5712, 5748), True, 'import matplotlib.pyplot as plt\n'), ((5745, 5833), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$Impulsrate \\\\:/\\\\: \\\\si{\\\\kilo\\\\gram\\\\meter\\\\per\\\\second\\\\tothe{2}}$"""'], {}), "(\n '$Impulsrate \\\\:/\\\\: \\\\si{\\\\kilo\\\\gram\\\\meter\\\\per\\\\second\\\\tothe{2}}$')\n", (5755, 5833), True, 'import matplotlib.pyplot as plt\n'), ((6160, 6206), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""build/halbwertsbreiten_beta.pdf"""'], {}), "('build/halbwertsbreiten_beta.pdf')\n", (6171, 6206), True, 'import matplotlib.pyplot as plt\n'), ((6207, 6218), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6216, 6218), True, 'import matplotlib.pyplot as plt\n'), ((6760, 6819), 'matplotlib.pyplot.plot', 'plt.plot', (['theta[96:101]', 'Z[96:101]', '"""rx"""'], {'label': '"""Messdaten"""'}), "(theta[96:101], Z[96:101], 'rx', label='Messdaten')\n", (6768, 6819), True, 'import matplotlib.pyplot as plt\n'), ((6889, 6904), 'matplotlib.pyplot.axvline', 'plt.axvline', (['r1'], {}), '(r1)\n', (6900, 6904), True, 'import matplotlib.pyplot as plt\n'), ((6905, 6920), 'matplotlib.pyplot.axvline', 'plt.axvline', (['r2'], {}), '(r2)\n', (6916, 6920), True, 'import matplotlib.pyplot as plt\n'), ((6922, 6932), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (6930, 6932), True, 'import matplotlib.pyplot as plt\n'), ((6933, 6955), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (6943, 6955), True, 'import matplotlib.pyplot as plt\n'), ((6956, 7002), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\theta \\\\:/\\\\: \\\\si{\\\\degree}$"""'], {}), "('$\\\\theta \\\\:/\\\\: \\\\si{\\\\degree}$')\n", (6966, 7002), True, 'import matplotlib.pyplot as plt\n'), ((6999, 7087), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$Impulsrate \\\\:/\\\\: \\\\si{\\\\kilo\\\\gram\\\\meter\\\\per\\\\second\\\\tothe{2}}$"""'], {}), "(\n '$Impulsrate \\\\:/\\\\: \\\\si{\\\\kilo\\\\gram\\\\meter\\\\per\\\\second\\\\tothe{2}}$')\n", (7009, 7087), True, 'import matplotlib.pyplot as plt\n'), ((7497, 7544), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""build/halbwertsbreiten_alpha.pdf"""'], {}), "('build/halbwertsbreiten_alpha.pdf')\n", (7508, 7544), True, 'import matplotlib.pyplot as plt\n'), ((7545, 7556), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7554, 7556), True, 'import matplotlib.pyplot as plt\n'), ((8617, 8670), 'numpy.genfromtxt', 'np.genfromtxt', (['"""messdaten/Germanium.txt"""'], {'unpack': '(True)'}), "('messdaten/Germanium.txt', unpack=True)\n", (8630, 8670), True, 'import numpy as np\n'), ((8725, 8771), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\theta \\\\:/\\\\: \\\\si{\\\\degree}$"""'], {}), "('$\\\\theta \\\\:/\\\\: \\\\si{\\\\degree}$')\n", (8735, 8771), True, 'import matplotlib.pyplot as plt\n'), ((8768, 8856), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$Impulsrate \\\\:/\\\\: \\\\si{\\\\kilo\\\\gram\\\\meter\\\\per\\\\second\\\\tothe{2}}$"""'], {}), "(\n '$Impulsrate \\\\:/\\\\: \\\\si{\\\\kilo\\\\gram\\\\meter\\\\per\\\\second\\\\tothe{2}}$')\n", (8778, 8856), True, 'import matplotlib.pyplot as plt\n'), ((8845, 8855), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (8853, 8855), True, 'import matplotlib.pyplot as plt\n'), ((8887, 8938), 'matplotlib.pyplot.plot', 'plt.plot', (['theta_ger', 'Z_ger', '"""b-"""'], {'label': '"""Messdaten"""'}), "(theta_ger, Z_ger, 'b-', label='Messdaten')\n", (8895, 8938), True, 'import matplotlib.pyplot as plt\n'), ((8938, 8960), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (8948, 8960), True, 'import matplotlib.pyplot as plt\n'), ((8961, 8995), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""build/Germanium.pdf"""'], {}), "('build/Germanium.pdf')\n", (8972, 8995), True, 'import matplotlib.pyplot as plt\n'), ((8996, 9007), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9005, 9007), True, 'import matplotlib.pyplot as plt\n'), ((9039, 9087), 'numpy.genfromtxt', 'np.genfromtxt', (['"""messdaten/Zink.txt"""'], {'unpack': '(True)'}), "('messdaten/Zink.txt', unpack=True)\n", (9052, 9087), True, 'import numpy as np\n'), ((9115, 9161), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\theta \\\\:/\\\\: \\\\si{\\\\degree}$"""'], {}), "('$\\\\theta \\\\:/\\\\: \\\\si{\\\\degree}$')\n", (9125, 9161), True, 'import matplotlib.pyplot as plt\n'), ((9158, 9246), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$Impulsrate \\\\:/\\\\: \\\\si{\\\\kilo\\\\gram\\\\meter\\\\per\\\\second\\\\tothe{2}}$"""'], {}), "(\n '$Impulsrate \\\\:/\\\\: \\\\si{\\\\kilo\\\\gram\\\\meter\\\\per\\\\second\\\\tothe{2}}$')\n", (9168, 9246), True, 'import matplotlib.pyplot as plt\n'), ((9235, 9245), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (9243, 9245), True, 'import matplotlib.pyplot as plt\n'), ((9246, 9258), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {}), '()\n', (9256, 9258), True, 'import matplotlib.pyplot as plt\n'), ((9259, 9271), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {}), '()\n', (9269, 9271), True, 'import matplotlib.pyplot as plt\n'), ((9273, 9326), 'matplotlib.pyplot.plot', 'plt.plot', (['theta_zink', 'Z_zink', '"""b-"""'], {'label': '"""Messdaten"""'}), "(theta_zink, Z_zink, 'b-', label='Messdaten')\n", (9281, 9326), True, 'import matplotlib.pyplot as plt\n'), ((9326, 9348), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (9336, 9348), True, 'import matplotlib.pyplot as plt\n'), ((9349, 9378), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""build/Zink.pdf"""'], {}), "('build/Zink.pdf')\n", (9360, 9378), True, 'import matplotlib.pyplot as plt\n'), ((9379, 9390), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9388, 9390), True, 'import matplotlib.pyplot as plt\n'), ((9424, 9477), 'numpy.genfromtxt', 'np.genfromtxt', (['"""messdaten/Zirkonium.txt"""'], {'unpack': '(True)'}), "('messdaten/Zirkonium.txt', unpack=True)\n", (9437, 9477), True, 'import numpy as np\n'), ((9503, 9549), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\theta \\\\:/\\\\: \\\\si{\\\\degree}$"""'], {}), "('$\\\\theta \\\\:/\\\\: \\\\si{\\\\degree}$')\n", (9513, 9549), True, 'import matplotlib.pyplot as plt\n'), ((9546, 9634), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$Impulsrate \\\\:/\\\\: \\\\si{\\\\kilo\\\\gram\\\\meter\\\\per\\\\second\\\\tothe{2}}$"""'], {}), "(\n '$Impulsrate \\\\:/\\\\: \\\\si{\\\\kilo\\\\gram\\\\meter\\\\per\\\\second\\\\tothe{2}}$')\n", (9556, 9634), True, 'import matplotlib.pyplot as plt\n'), ((9623, 9633), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (9631, 9633), True, 'import matplotlib.pyplot as plt\n'), ((9634, 9646), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {}), '()\n', (9644, 9646), True, 'import matplotlib.pyplot as plt\n'), ((9647, 9659), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {}), '()\n', (9657, 9659), True, 'import matplotlib.pyplot as plt\n'), ((9661, 9712), 'matplotlib.pyplot.plot', 'plt.plot', (['theta_zir', 'Z_zir', '"""b-"""'], {'label': '"""Messdaten"""'}), "(theta_zir, Z_zir, 'b-', label='Messdaten')\n", (9669, 9712), True, 'import matplotlib.pyplot as plt\n'), ((9712, 9734), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (9722, 9734), True, 'import matplotlib.pyplot as plt\n'), ((9735, 9769), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""build/Zirkonium.pdf"""'], {}), "('build/Zirkonium.pdf')\n", (9746, 9769), True, 'import matplotlib.pyplot as plt\n'), ((9770, 9781), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9779, 9781), True, 'import matplotlib.pyplot as plt\n'), ((9812, 9860), 'numpy.genfromtxt', 'np.genfromtxt', (['"""messdaten/Gold.txt"""'], {'unpack': '(True)'}), "('messdaten/Gold.txt', unpack=True)\n", (9825, 9860), True, 'import numpy as np\n'), ((9888, 9934), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\theta \\\\:/\\\\: \\\\si{\\\\degree}$"""'], {}), "('$\\\\theta \\\\:/\\\\: \\\\si{\\\\degree}$')\n", (9898, 9934), True, 'import matplotlib.pyplot as plt\n'), ((9931, 10019), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$Impulsrate \\\\:/\\\\: \\\\si{\\\\kilo\\\\gram\\\\meter\\\\per\\\\second\\\\tothe{2}}$"""'], {}), "(\n '$Impulsrate \\\\:/\\\\: \\\\si{\\\\kilo\\\\gram\\\\meter\\\\per\\\\second\\\\tothe{2}}$')\n", (9941, 10019), True, 'import matplotlib.pyplot as plt\n'), ((10008, 10018), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (10016, 10018), True, 'import matplotlib.pyplot as plt\n'), ((10019, 10031), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {}), '()\n', (10029, 10031), True, 'import matplotlib.pyplot as plt\n'), ((10032, 10044), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {}), '()\n', (10042, 10044), True, 'import matplotlib.pyplot as plt\n'), ((10046, 10099), 'matplotlib.pyplot.plot', 'plt.plot', (['theta_gold', 'Z_gold', '"""b-"""'], {'label': '"""Messdaten"""'}), "(theta_gold, Z_gold, 'b-', label='Messdaten')\n", (10054, 10099), True, 'import matplotlib.pyplot as plt\n'), ((10099, 10121), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (10109, 10121), True, 'import matplotlib.pyplot as plt\n'), ((10122, 10151), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""build/Gold.pdf"""'], {}), "('build/Gold.pdf')\n", (10133, 10151), True, 'import matplotlib.pyplot as plt\n'), ((10152, 10163), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (10161, 10163), True, 'import matplotlib.pyplot as plt\n'), ((11920, 11932), 'numpy.sqrt', 'np.sqrt', (['E_k'], {}), '(E_k)\n', (11927, 11932), True, 'import numpy as np\n'), ((11942, 11979), 'curve_fit.ucurve_fit', 'ucurve_fit', (['reg_linear', 'Z', 'E_k_wurzel'], {}), '(reg_linear, Z, E_k_wurzel)\n', (11952, 11979), False, 'from curve_fit import ucurve_fit\n'), ((12182, 12206), 'numpy.linspace', 'np.linspace', (['(25)', '(45)', '(100)'], {}), '(25, 45, 100)\n', (12193, 12206), True, 'import numpy as np\n'), ((12278, 12326), 'matplotlib.pyplot.plot', 'plt.plot', (['Z', 'E_k_wurzel', '"""rx"""'], {'label': '"""Messdaten"""'}), "(Z, E_k_wurzel, 'rx', label='Messdaten')\n", (12286, 12326), True, 'import matplotlib.pyplot as plt\n'), ((12327, 12361), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Kernladungszahl $Z$"""'], {}), "('Kernladungszahl $Z$')\n", (12337, 12361), True, 'import matplotlib.pyplot as plt\n'), ((12363, 12435), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\sqrt{E_\\\\textrm{k} \\\\:/\\\\: \\\\si{\\\\kilo\\\\electronvolt}}$"""'], {}), "('$\\\\sqrt{E_\\\\textrm{k} \\\\:/\\\\: \\\\si{\\\\kilo\\\\electronvolt}}$')\n", (12373, 12435), True, 'import matplotlib.pyplot as plt\n'), ((12430, 12452), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (12440, 12452), True, 'import matplotlib.pyplot as plt\n'), ((12453, 12494), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""build/Moseley_Diagramm.pdf"""'], {}), "('build/Moseley_Diagramm.pdf')\n", (12464, 12494), True, 'import matplotlib.pyplot as plt\n'), ((195, 240), 'matplotlib.rc_params_from_file', 'mpl.rc_params_from_file', (['"""meine-matplotlibrc"""'], {}), "('meine-matplotlibrc')\n", (218, 240), True, 'import matplotlib as mpl\n'), ((980, 1010), 'sys.path.insert', 'sys.path.insert', (['(0)', 'cmd_folder'], {}), '(0, cmd_folder)\n', (995, 1010), False, 'import os, sys, inspect\n'), ((1259, 1292), 'sys.path.insert', 'sys.path.insert', (['(0)', 'cmd_subfolder'], {}), '(0, cmd_subfolder)\n', (1274, 1292), False, 'import os, sys, inspect\n'), ((2405, 2449), 'table.make_table', 'make_table', (['[zwei_theta, impulsrate]', '[1, 0]'], {}), '([zwei_theta, impulsrate], [1, 0])\n', (2415, 2449), False, 'from table import make_table, make_full_table, make_composed_table, make_SI, write\n'), ((2550, 2703), 'table.make_full_table', 'make_full_table', (['"""Messdaten Bragg Bedingung."""', '"""table:A2"""', '"""build/Tabelle_messung_1.tex"""', '[]', "['$\\\\theta \\\\:/\\\\: \\\\si{\\\\degree}$', '$Zaehlrate$']"], {}), "('Messdaten Bragg Bedingung.', 'table:A2',\n 'build/Tabelle_messung_1.tex', [], ['$\\\\theta \\\\:/\\\\: \\\\si{\\\\degree}$',\n '$Zaehlrate$'])\n", (2565, 2703), False, 'from table import make_table, make_full_table, make_composed_table, make_SI, write\n'), ((3849, 3912), 'table.make_SI', 'make_SI', (['(lamb_min * 1000000000000.0)', '"""\\\\pico\\\\meter"""'], {'figures': '(2)'}), "(lamb_min * 1000000000000.0, '\\\\pico\\\\meter', figures=2)\n", (3856, 3912), False, 'from table import make_table, make_full_table, make_composed_table, make_SI, write\n'), ((3968, 4025), 'table.make_SI', 'make_SI', (['(E_max * 0.001)', '"""\\\\kilo\\\\electronvolt"""'], {'figures': '(2)'}), "(E_max * 0.001, '\\\\kilo\\\\electronvolt', figures=2)\n", (3975, 4025), False, 'from table import make_table, make_full_table, make_composed_table, make_SI, write\n'), ((4618, 4657), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""rx"""'], {'label': '"""Messdaten"""'}), "(x, y, 'rx', label='Messdaten')\n", (4626, 4657), True, 'import matplotlib.pyplot as plt\n'), ((4727, 4742), 'matplotlib.pyplot.axvline', 'plt.axvline', (['r1'], {}), '(r1)\n', (4738, 4742), True, 'import matplotlib.pyplot as plt\n'), ((4747, 4762), 'matplotlib.pyplot.axvline', 'plt.axvline', (['r2'], {}), '(r2)\n', (4758, 4762), True, 'import matplotlib.pyplot as plt\n'), ((4768, 4778), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (4776, 4778), True, 'import matplotlib.pyplot as plt\n'), ((4783, 4795), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4793, 4795), True, 'import matplotlib.pyplot as plt\n'), ((4800, 4846), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""doppelter Kristallwinkel in Grad"""'], {}), "('doppelter Kristallwinkel in Grad')\n", (4810, 4846), True, 'import matplotlib.pyplot as plt\n'), ((4851, 4874), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['u"""Zählrate"""'], {}), "(u'Zählrate')\n", (4861, 4874), True, 'import matplotlib.pyplot as plt\n'), ((5865, 5899), 'table.make_SI', 'make_SI', (['r1', '"""\\\\degree"""'], {'figures': '(2)'}), "(r1, '\\\\degree', figures=2)\n", (5872, 5899), False, 'from table import make_table, make_full_table, make_composed_table, make_SI, write\n'), ((5943, 5977), 'table.make_SI', 'make_SI', (['r2', '"""\\\\degree"""'], {'figures': '(2)'}), "(r2, '\\\\degree', figures=2)\n", (5950, 5977), False, 'from table import make_table, make_full_table, make_composed_table, make_SI, write\n'), ((6108, 6162), 'table.make_SI', 'make_SI', (['(DE * 0.001)', '"""\\\\kilo\\\\electronvolt"""'], {'figures': '(2)'}), "(DE * 0.001, '\\\\kilo\\\\electronvolt', figures=2)\n", (6115, 6162), False, 'from table import make_table, make_full_table, make_composed_table, make_SI, write\n'), ((7119, 7153), 'table.make_SI', 'make_SI', (['r1', '"""\\\\degree"""'], {'figures': '(2)'}), "(r1, '\\\\degree', figures=2)\n", (7126, 7153), False, 'from table import make_table, make_full_table, make_composed_table, make_SI, write\n'), ((7198, 7232), 'table.make_SI', 'make_SI', (['r2', '"""\\\\degree"""'], {'figures': '(2)'}), "(r2, '\\\\degree', figures=2)\n", (7205, 7232), False, 'from table import make_table, make_full_table, make_composed_table, make_SI, write\n'), ((7365, 7419), 'table.make_SI', 'make_SI', (['(DE * 0.001)', '"""\\\\kilo\\\\electronvolt"""'], {'figures': '(2)'}), "(DE * 0.001, '\\\\kilo\\\\electronvolt', figures=2)\n", (7372, 7419), False, 'from table import make_table, make_full_table, make_composed_table, make_SI, write\n'), ((7462, 7497), 'table.make_SI', 'make_SI', (['(E1 * 0.001)', '""" """'], {'figures': '(2)'}), "(E1 * 0.001, ' ', figures=2)\n", (7469, 7497), False, 'from table import make_table, make_full_table, make_composed_table, make_SI, write\n'), ((7670, 7713), 'table.make_SI', 'make_SI', (['theta_alpha', '"""\\\\degree"""'], {'figures': '(2)'}), "(theta_alpha, '\\\\degree', figures=2)\n", (7677, 7713), False, 'from table import make_table, make_full_table, make_composed_table, make_SI, write\n'), ((7745, 7787), 'table.make_SI', 'make_SI', (['theta_beta', '"""\\\\degree"""'], {'figures': '(2)'}), "(theta_beta, '\\\\degree', figures=2)\n", (7752, 7787), False, 'from table import make_table, make_full_table, make_composed_table, make_SI, write\n'), ((7958, 7977), 'numpy.sqrt', 'np.sqrt', (['(E_beta / r)'], {}), '(E_beta / r)\n', (7965, 7977), True, 'import numpy as np\n'), ((8064, 8096), 'table.make_SI', 'make_SI', (['sigma_1', '""" """'], {'figures': '(2)'}), "(sigma_1, ' ', figures=2)\n", (8071, 8096), False, 'from table import make_table, make_full_table, make_composed_table, make_SI, write\n'), ((8126, 8158), 'table.make_SI', 'make_SI', (['sigma_2', '""" """'], {'figures': '(2)'}), "(sigma_2, ' ', figures=2)\n", (8133, 8158), False, 'from table import make_table, make_full_table, make_composed_table, make_SI, write\n'), ((8198, 8215), 'numpy.sqrt', 'np.sqrt', (['(8903 / r)'], {}), '(8903 / r)\n', (8205, 8215), True, 'import numpy as np\n'), ((8307, 8343), 'table.make_SI', 'make_SI', (['sigma_1_lit', '""" """'], {'figures': '(2)'}), "(sigma_1_lit, ' ', figures=2)\n", (8314, 8343), False, 'from table import make_table, make_full_table, make_composed_table, make_SI, write\n'), ((8377, 8413), 'table.make_SI', 'make_SI', (['sigma_2_lit', '""" """'], {'figures': '(2)'}), "(sigma_2_lit, ' ', figures=2)\n", (8384, 8413), False, 'from table import make_table, make_full_table, make_composed_table, make_SI, write\n'), ((10525, 10582), 'table.make_SI', 'make_SI', (['(E_ger * 0.001)', '"""\\\\kilo\\\\electronvolt"""'], {'figures': '(2)'}), "(E_ger * 0.001, '\\\\kilo\\\\electronvolt', figures=2)\n", (10532, 10582), False, 'from table import make_table, make_full_table, make_composed_table, make_SI, write\n'), ((10633, 10671), 'table.make_SI', 'make_SI', (['(E_ger * 0.001)', '""" """'], {'figures': '(2)'}), "(E_ger * 0.001, ' ', figures=2)\n", (10640, 10671), False, 'from table import make_table, make_full_table, make_composed_table, make_SI, write\n'), ((10875, 10933), 'table.make_SI', 'make_SI', (['(E_zink * 0.001)', '"""\\\\kilo\\\\electronvolt"""'], {'figures': '(2)'}), "(E_zink * 0.001, '\\\\kilo\\\\electronvolt', figures=2)\n", (10882, 10933), False, 'from table import make_table, make_full_table, make_composed_table, make_SI, write\n'), ((10979, 11018), 'table.make_SI', 'make_SI', (['(E_zink * 0.001)', '""" """'], {'figures': '(2)'}), "(E_zink * 0.001, ' ', figures=2)\n", (10986, 11018), False, 'from table import make_table, make_full_table, make_composed_table, make_SI, write\n'), ((11223, 11280), 'table.make_SI', 'make_SI', (['(E_zir * 0.001)', '"""\\\\kilo\\\\electronvolt"""'], {'figures': '(2)'}), "(E_zir * 0.001, '\\\\kilo\\\\electronvolt', figures=2)\n", (11230, 11280), False, 'from table import make_table, make_full_table, make_composed_table, make_SI, write\n'), ((11331, 11369), 'table.make_SI', 'make_SI', (['(E_zir * 0.001)', '""" """'], {'figures': '(2)'}), "(E_zir * 0.001, ' ', figures=2)\n", (11338, 11369), False, 'from table import make_table, make_full_table, make_composed_table, make_SI, write\n'), ((11426, 11469), 'numpy.sqrt', 'np.sqrt', (['(E_ger / r - s_k ** 2 / 4 * 32 ** 4)'], {}), '(E_ger / r - s_k ** 2 / 4 * 32 ** 4)\n', (11433, 11469), True, 'import numpy as np\n'), ((11483, 11527), 'numpy.sqrt', 'np.sqrt', (['(E_zink / r - s_k ** 2 / 4 * 30 ** 4)'], {}), '(E_zink / r - s_k ** 2 / 4 * 30 ** 4)\n', (11490, 11527), True, 'import numpy as np\n'), ((11540, 11583), 'numpy.sqrt', 'np.sqrt', (['(E_zir / r - s_k ** 2 / 4 * 40 ** 4)'], {}), '(E_zir / r - s_k ** 2 / 4 * 40 ** 4)\n', (11547, 11583), True, 'import numpy as np\n'), ((11627, 11661), 'table.make_SI', 'make_SI', (['sigma_ger', '""" """'], {'figures': '(2)'}), "(sigma_ger, ' ', figures=2)\n", (11634, 11661), False, 'from table import make_table, make_full_table, make_composed_table, make_SI, write\n'), ((11706, 11741), 'table.make_SI', 'make_SI', (['sigma_zink', '""" """'], {'figures': '(2)'}), "(sigma_zink, ' ', figures=2)\n", (11713, 11741), False, 'from table import make_table, make_full_table, make_composed_table, make_SI, write\n'), ((11791, 11825), 'table.make_SI', 'make_SI', (['sigma_zir', '""" """'], {'figures': '(2)'}), "(sigma_zir, ' ', figures=2)\n", (11798, 11825), False, 'from table import make_table, make_full_table, make_composed_table, make_SI, write\n'), ((12030, 12082), 'table.make_SI', 'make_SI', (['(4 / 3 * m ** 2)', '"""\\\\electronvolt"""'], {'figures': '(1)'}), "(4 / 3 * m ** 2, '\\\\electronvolt', figures=1)\n", (12037, 12082), False, 'from table import make_table, make_full_table, make_composed_table, make_SI, write\n'), ((12113, 12173), 'table.make_SI', 'make_SI', (['(4 / 3 * m ** 2 / (h * c))', '"""\\\\per\\\\meter"""'], {'figures': '(1)'}), "(4 / 3 * m ** 2 / (h * c), '\\\\per\\\\meter', figures=1)\n", (12120, 12173), False, 'from table import make_table, make_full_table, make_composed_table, make_SI, write\n'), ((3781, 3796), 'numpy.deg2rad', 'np.deg2rad', (['(5.4)'], {}), '(5.4)\n', (3791, 3796), True, 'import numpy as np\n'), ((5079, 5093), 'numpy.deg2rad', 'np.deg2rad', (['r1'], {}), '(r1)\n', (5089, 5093), True, 'import numpy as np\n'), ((5116, 5130), 'numpy.deg2rad', 'np.deg2rad', (['r2'], {}), '(r2)\n', (5126, 5130), True, 'import numpy as np\n'), ((5287, 5302), 'numpy.abs', 'np.abs', (['(r1 - r2)'], {}), '(r1 - r2)\n', (5293, 5302), True, 'import numpy as np\n'), ((6027, 6042), 'numpy.abs', 'np.abs', (['(r1 - r2)'], {}), '(r1 - r2)\n', (6033, 6042), True, 'import numpy as np\n'), ((6403, 6417), 'numpy.deg2rad', 'np.deg2rad', (['r1'], {}), '(r1)\n', (6413, 6417), True, 'import numpy as np\n'), ((6440, 6454), 'numpy.deg2rad', 'np.deg2rad', (['r2'], {}), '(r2)\n', (6450, 6454), True, 'import numpy as np\n'), ((6611, 6626), 'numpy.abs', 'np.abs', (['(r1 - r2)'], {}), '(r1 - r2)\n', (6617, 6626), True, 'import numpy as np\n'), ((7283, 7298), 'numpy.abs', 'np.abs', (['(r1 - r2)'], {}), '(r1 - r2)\n', (7289, 7298), True, 'import numpy as np\n'), ((7816, 7839), 'numpy.deg2rad', 'np.deg2rad', (['theta_alpha'], {}), '(theta_alpha)\n', (7826, 7839), True, 'import numpy as np\n'), ((7866, 7888), 'numpy.deg2rad', 'np.deg2rad', (['theta_beta'], {}), '(theta_beta)\n', (7876, 7888), True, 'import numpy as np\n'), ((7993, 8041), 'numpy.sqrt', 'np.sqrt', (['((r * (29 - sigma_1) ** 2 - E_alpha) / r)'], {}), '((r * (29 - sigma_1) ** 2 - E_alpha) / r)\n', (8000, 8041), True, 'import numpy as np\n'), ((8235, 8280), 'numpy.sqrt', 'np.sqrt', (['((r * (29 - sigma_1) ** 2 - 8046) / r)'], {}), '((r * (29 - sigma_1) ** 2 - 8046) / r)\n', (8242, 8280), True, 'import numpy as np\n'), ((10429, 10452), 'numpy.deg2rad', 'np.deg2rad', (['theta_ger_x'], {}), '(theta_ger_x)\n', (10439, 10452), True, 'import numpy as np\n'), ((10781, 10805), 'numpy.deg2rad', 'np.deg2rad', (['theta_zink_x'], {}), '(theta_zink_x)\n', (10791, 10805), True, 'import numpy as np\n'), ((11127, 11150), 'numpy.deg2rad', 'np.deg2rad', (['theta_zir_x'], {}), '(theta_zir_x)\n', (11137, 11150), True, 'import numpy as np\n'), ((4245, 4259), 'numpy.deg2rad', 'np.deg2rad', (['r1'], {}), '(r1)\n', (4255, 4259), True, 'import numpy as np\n'), ((4286, 4300), 'numpy.deg2rad', 'np.deg2rad', (['r2'], {}), '(r2)\n', (4296, 4300), True, 'import numpy as np\n'), ((4477, 4492), 'numpy.abs', 'np.abs', (['(r1 - r2)'], {}), '(r1 - r2)\n', (4483, 4492), True, 'import numpy as np\n'), ((4991, 5007), 'numpy.max', 'np.max', (['Z[84:90]'], {}), '(Z[84:90])\n', (4997, 5007), True, 'import numpy as np\n'), ((5512, 5528), 'numpy.max', 'np.max', (['Z[84:90]'], {}), '(Z[84:90])\n', (5518, 5528), True, 'import numpy as np\n'), ((6314, 6331), 'numpy.max', 'np.max', (['Z[96:101]'], {}), '(Z[96:101])\n', (6320, 6331), True, 'import numpy as np\n'), ((6840, 6857), 'numpy.max', 'np.max', (['Z[96:101]'], {}), '(Z[96:101])\n', (6846, 6857), True, 'import numpy as np\n'), ((12244, 12256), 'uncertainties.unumpy.nominal_values', 'noms', (['params'], {}), '(params)\n', (12248, 12256), True, 'from uncertainties.unumpy import nominal_values as noms, std_devs as stds\n'), ((4156, 4165), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (4162, 4165), True, 'import numpy as np\n'), ((4682, 4691), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (4688, 4691), True, 'import numpy as np\n'), ((914, 936), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (934, 936), False, 'import os, sys, inspect\n'), ((1165, 1187), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (1185, 1187), False, 'import os, sys, inspect\n')] |
import tensorflow as tf
tf.enable_eager_execution()
import numpy as np
import pandas as pd
import os
# import argparse
def read_tf(tfrecord_path):
"""
read in the tensors
:param tfrecord_path: the path to the tensor
:return: the image and the label
"""
raw_image_dataset = tf.data.TFRecordDataset(tfrecord_path)
# Create a dictionary describing the features.
image_feature_description = {
'data_vol': tf.io.FixedLenFeature([], tf.string),
'label_vol': tf.io.FixedLenFeature([], tf.string),
}
def _parse_image_function(example_proto):
# Parse the input tf.Example proto using the dictionary above.
return tf.io.parse_single_example(example_proto, image_feature_description)
parsed_image_dataset = raw_image_dataset.map(_parse_image_function)
for parser in parsed_image_dataset:
data_vol = tf.decode_raw(parser['data_vol'], tf.float32)
label_vol = tf.decode_raw(parser['label_vol'], tf.float32)
image_raw1 = data_vol.numpy()
image_raw2 = label_vol.numpy()
image_raw1 = image_raw1.reshape((256, 256, 3))
image_raw2 = np.expand_dims(image_raw2.reshape((256, 256, 3))[..., 0], -1)
return image_raw1, image_raw2
def tf_to_numpy(tf_path='../../input/'):
"""
convert tensor to numpy array and save it
:param tf_path: the path to the csv file that save all the path to the tensors
:return:
"""
for data_name in ["ct_train", "ct_val", "mr_train", "mr_val"]:
df_train = pd.read_csv(os.path.join(tf_path, '{}_list.csv'.format(data_name)))
ids_train = df_train['img']
folder_tosave = os.path.join(tf_path, 'PnpAda_release_data/{}/'.format(data_name))
if not os.path.exists(folder_tosave):
os.mkdir(folder_tosave)
if not os.path.exists(os.path.join(folder_tosave, 'img')):
os.mkdir(os.path.join(folder_tosave, 'img/'))
if not os.path.exists(os.path.join(folder_tosave, 'mask')):
os.mkdir(os.path.join(folder_tosave, 'mask/'))
for i, id in enumerate(ids_train):
if i % 100 == 0:
print(id)
if not os.path.exists(os.path.join(folder_tosave, 'img', id)):
img_path = '../../input/PnpAda_release_data/train_n_val/{}_tfs/{}'.format(data_name, id)
img, mask = read_tf(img_path)
np.save(os.path.join(folder_tosave, 'img', id), img)
np.save(os.path.join(folder_tosave, 'mask', id), mask)
print('**************** {} finished ****************'.format(data_name))
if __name__ == '__main__':
# tf_to_numpy()
# print("################ all the processes finished ################")
img, mask = read_tf('../../input/PnpAda_release_data/train_n_val/ct_train_tfs/ct_train_slice{}.tfrecords'.format(0))
print(img.shape, mask.shape)
print(np.mean(img), np.std(img))
print(img.min(), img.max())
img2 = (img - img.min()) * 255 / (img.max() - img.min())
img2 = np.array(img2, dtype=int)
print(img2.min(), img2.max())
from matplotlib import pyplot as plt
plt.imshow(img2[128-112:128+112,128-112:128+112], cmap='gray')
plt.show()
plt.imshow(mask[128-112:128+112,128-112:128+112,0], cmap='gray')
plt.show()
img, mask = read_tf('../../input/PnpAda_release_data/train_n_val/ct_val_tfs/ct_val_slice{}.tfrecords'.format(1))
print(img.shape, mask.shape)
print(np.mean(img), np.std(img))
print(img.min(), img.max())
img2 = (img - img.min()) * 255 / (img.max() - img.min())
img2 = np.array(img2, dtype=int)
print(img2.min(), img2.max())
plt.imshow(img2[128 - 112:128 + 112, 128 - 112:128 + 112], cmap='gray')
plt.show()
plt.imshow(mask[128 - 112:128 + 112, 128 - 112:128 + 112, 0], cmap='gray')
plt.show()
| [
"tensorflow.data.TFRecordDataset",
"matplotlib.pyplot.imshow",
"numpy.mean",
"os.path.exists",
"tensorflow.io.parse_single_example",
"os.path.join",
"tensorflow.enable_eager_execution",
"tensorflow.decode_raw",
"numpy.array",
"tensorflow.io.FixedLenFeature",
"os.mkdir",
"numpy.std",
"matplot... | [((24, 51), 'tensorflow.enable_eager_execution', 'tf.enable_eager_execution', ([], {}), '()\n', (49, 51), True, 'import tensorflow as tf\n'), ((299, 337), 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['tfrecord_path'], {}), '(tfrecord_path)\n', (322, 337), True, 'import tensorflow as tf\n'), ((3044, 3069), 'numpy.array', 'np.array', (['img2'], {'dtype': 'int'}), '(img2, dtype=int)\n', (3052, 3069), True, 'import numpy as np\n'), ((3149, 3220), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img2[128 - 112:128 + 112, 128 - 112:128 + 112]'], {'cmap': '"""gray"""'}), "(img2[128 - 112:128 + 112, 128 - 112:128 + 112], cmap='gray')\n", (3159, 3220), True, 'from matplotlib import pyplot as plt\n'), ((3216, 3226), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3224, 3226), True, 'from matplotlib import pyplot as plt\n'), ((3231, 3305), 'matplotlib.pyplot.imshow', 'plt.imshow', (['mask[128 - 112:128 + 112, 128 - 112:128 + 112, 0]'], {'cmap': '"""gray"""'}), "(mask[128 - 112:128 + 112, 128 - 112:128 + 112, 0], cmap='gray')\n", (3241, 3305), True, 'from matplotlib import pyplot as plt\n'), ((3300, 3310), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3308, 3310), True, 'from matplotlib import pyplot as plt\n'), ((3603, 3628), 'numpy.array', 'np.array', (['img2'], {'dtype': 'int'}), '(img2, dtype=int)\n', (3611, 3628), True, 'import numpy as np\n'), ((3667, 3738), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img2[128 - 112:128 + 112, 128 - 112:128 + 112]'], {'cmap': '"""gray"""'}), "(img2[128 - 112:128 + 112, 128 - 112:128 + 112], cmap='gray')\n", (3677, 3738), True, 'from matplotlib import pyplot as plt\n'), ((3743, 3753), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3751, 3753), True, 'from matplotlib import pyplot as plt\n'), ((3758, 3832), 'matplotlib.pyplot.imshow', 'plt.imshow', (['mask[128 - 112:128 + 112, 128 - 112:128 + 112, 0]'], {'cmap': '"""gray"""'}), "(mask[128 - 112:128 + 112, 128 - 112:128 + 112, 0], cmap='gray')\n", (3768, 3832), True, 'from matplotlib import pyplot as plt\n'), ((3837, 3847), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3845, 3847), True, 'from matplotlib import pyplot as plt\n'), ((443, 479), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (464, 479), True, 'import tensorflow as tf\n'), ((502, 538), 'tensorflow.io.FixedLenFeature', 'tf.io.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (523, 538), True, 'import tensorflow as tf\n'), ((679, 747), 'tensorflow.io.parse_single_example', 'tf.io.parse_single_example', (['example_proto', 'image_feature_description'], {}), '(example_proto, image_feature_description)\n', (705, 747), True, 'import tensorflow as tf\n'), ((881, 926), 'tensorflow.decode_raw', 'tf.decode_raw', (["parser['data_vol']", 'tf.float32'], {}), "(parser['data_vol'], tf.float32)\n", (894, 926), True, 'import tensorflow as tf\n'), ((948, 994), 'tensorflow.decode_raw', 'tf.decode_raw', (["parser['label_vol']", 'tf.float32'], {}), "(parser['label_vol'], tf.float32)\n", (961, 994), True, 'import tensorflow as tf\n'), ((2913, 2925), 'numpy.mean', 'np.mean', (['img'], {}), '(img)\n', (2920, 2925), True, 'import numpy as np\n'), ((2927, 2938), 'numpy.std', 'np.std', (['img'], {}), '(img)\n', (2933, 2938), True, 'import numpy as np\n'), ((3472, 3484), 'numpy.mean', 'np.mean', (['img'], {}), '(img)\n', (3479, 3484), True, 'import numpy as np\n'), ((3486, 3497), 'numpy.std', 'np.std', (['img'], {}), '(img)\n', (3492, 3497), True, 'import numpy as np\n'), ((1745, 1774), 'os.path.exists', 'os.path.exists', (['folder_tosave'], {}), '(folder_tosave)\n', (1759, 1774), False, 'import os\n'), ((1788, 1811), 'os.mkdir', 'os.mkdir', (['folder_tosave'], {}), '(folder_tosave)\n', (1796, 1811), False, 'import os\n'), ((1846, 1880), 'os.path.join', 'os.path.join', (['folder_tosave', '"""img"""'], {}), "(folder_tosave, 'img')\n", (1858, 1880), False, 'import os\n'), ((1908, 1943), 'os.path.join', 'os.path.join', (['folder_tosave', '"""img/"""'], {}), "(folder_tosave, 'img/')\n", (1920, 1943), False, 'import os\n'), ((1979, 2014), 'os.path.join', 'os.path.join', (['folder_tosave', '"""mask"""'], {}), "(folder_tosave, 'mask')\n", (1991, 2014), False, 'import os\n'), ((2042, 2078), 'os.path.join', 'os.path.join', (['folder_tosave', '"""mask/"""'], {}), "(folder_tosave, 'mask/')\n", (2054, 2078), False, 'import os\n'), ((2212, 2250), 'os.path.join', 'os.path.join', (['folder_tosave', '"""img"""', 'id'], {}), "(folder_tosave, 'img', id)\n", (2224, 2250), False, 'import os\n'), ((2428, 2466), 'os.path.join', 'os.path.join', (['folder_tosave', '"""img"""', 'id'], {}), "(folder_tosave, 'img', id)\n", (2440, 2466), False, 'import os\n'), ((2497, 2536), 'os.path.join', 'os.path.join', (['folder_tosave', '"""mask"""', 'id'], {}), "(folder_tosave, 'mask', id)\n", (2509, 2536), False, 'import os\n')] |
import numpy as np
import matplotlib.pyplot as plt
from custom_poling.core.target import Target
from custom_poling.core.custom_crystal import CustomCrystal
# Crystal properties
domain_width = 10.0e-6
number_domains = 1000
L = number_domains * domain_width
k0 = np.pi / domain_width
# Numerical integration parameters
k_range = 100/L
dk = k_range/401
k_array = np.arange(k0-k_range/2,k0+k_range/2,dk)
# Create a custom crystal object
custom_crystal_gauss = CustomCrystal(domain_width,number_domains)
domain_middles_gauss = custom_crystal_gauss.domain_middles
# Define and plot a Gaussian target function
std = 10/L
height = 0.00025
target_pmf_gauss = lambda k:1j*height*np.exp(-(k-k0)**2/(2*std**2))*np.exp(1j * L/2 * k)
target_gauss = Target(target_pmf_gauss,k_array)
target_gauss.plot_pmf(show=False, save_as="target_pmf_gauss.pdf")
# Compute and plot the target amplitude
target_amplitude_gauss = target_gauss.compute_amplitude(k0,domain_middles_gauss)
target_gauss.plot_amplitude(show=False, save_as="target_amplitude_gauss.pdf")
# Compute and plot the custom domains
custom_domains_gauss = custom_crystal_gauss.compute_domains(target_amplitude_gauss,k0)
custom_crystal_gauss.plot_domains(show=False, save_as="custom_domains_gauss.pdf")
# Compute and plot the PMF for the cystomized crystal
custom_crystal_gauss.compute_pmf(k_array)
custom_crystal_gauss.plot_pmf(show=False, save_as="custom_crystal_gauss.pdf")
# Compare the output PMF to the target PMF
plt.plot(k_array,np.abs(target_gauss.pmf),label='Target PMF')
plt.plot(k_array,np.abs(custom_crystal_gauss.pmf),label='Custom PMF')
plt.legend()
plt.savefig("compare_PMF.pdf")
print("Saved figure as: compare_PMF.pdf")
# Compute amplitudes
custom_amplitude,z_list= custom_crystal_gauss.compute_amplitude(k0,num_internal_points=1)
# Compare the output amplitudes to the target amplitudes
plt.plot(z_list,np.abs(custom_amplitude),label='Custom amplitude')
plt.plot(custom_crystal_gauss.domain_middles,np.abs(target_gauss.amplitude),label='Target amplitude')
plt.legend()
plt.savefig("compare_amplitudes.pdf")
print("Saved figure as: compare_amplitudes.pdf") | [
"numpy.abs",
"matplotlib.pyplot.savefig",
"numpy.arange",
"numpy.exp",
"custom_poling.core.target.Target",
"custom_poling.core.custom_crystal.CustomCrystal",
"matplotlib.pyplot.legend"
] | [((363, 412), 'numpy.arange', 'np.arange', (['(k0 - k_range / 2)', '(k0 + k_range / 2)', 'dk'], {}), '(k0 - k_range / 2, k0 + k_range / 2, dk)\n', (372, 412), True, 'import numpy as np\n'), ((460, 503), 'custom_poling.core.custom_crystal.CustomCrystal', 'CustomCrystal', (['domain_width', 'number_domains'], {}), '(domain_width, number_domains)\n', (473, 503), False, 'from custom_poling.core.custom_crystal import CustomCrystal\n'), ((740, 773), 'custom_poling.core.target.Target', 'Target', (['target_pmf_gauss', 'k_array'], {}), '(target_pmf_gauss, k_array)\n', (746, 773), False, 'from custom_poling.core.target import Target\n'), ((1598, 1610), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1608, 1610), True, 'import matplotlib.pyplot as plt\n'), ((1611, 1641), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""compare_PMF.pdf"""'], {}), "('compare_PMF.pdf')\n", (1622, 1641), True, 'import matplotlib.pyplot as plt\n'), ((2023, 2035), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2033, 2035), True, 'import matplotlib.pyplot as plt\n'), ((2036, 2073), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""compare_amplitudes.pdf"""'], {}), "('compare_amplitudes.pdf')\n", (2047, 2073), True, 'import matplotlib.pyplot as plt\n'), ((1483, 1507), 'numpy.abs', 'np.abs', (['target_gauss.pmf'], {}), '(target_gauss.pmf)\n', (1489, 1507), True, 'import numpy as np\n'), ((1545, 1577), 'numpy.abs', 'np.abs', (['custom_crystal_gauss.pmf'], {}), '(custom_crystal_gauss.pmf)\n', (1551, 1577), True, 'import numpy as np\n'), ((1870, 1894), 'numpy.abs', 'np.abs', (['custom_amplitude'], {}), '(custom_amplitude)\n', (1876, 1894), True, 'import numpy as np\n'), ((1966, 1996), 'numpy.abs', 'np.abs', (['target_gauss.amplitude'], {}), '(target_gauss.amplitude)\n', (1972, 1996), True, 'import numpy as np\n'), ((704, 728), 'numpy.exp', 'np.exp', (['(1.0j * L / 2 * k)'], {}), '(1.0j * L / 2 * k)\n', (710, 728), True, 'import numpy as np\n'), ((674, 713), 'numpy.exp', 'np.exp', (['(-(k - k0) ** 2 / (2 * std ** 2))'], {}), '(-(k - k0) ** 2 / (2 * std ** 2))\n', (680, 713), True, 'import numpy as np\n')] |
# implementation based on DeepLTL https://github.com/reactive-systems/deepltl
from argparse import ArgumentParser
import subprocess
import os.path as path
import sys
import json
import random
import tensorflow as tf
import numpy as np
from dlsgs.utils import ltl_parser
def argparser():
parser = ArgumentParser()
# Meta
parser.add_argument('--run-name', default='default', help='name of this run, to better find produced data later')
parser.add_argument('--job-dir', default='runs', help='general job directory to save produced data into')
parser.add_argument('--data-dir', default='datasets', help='directory of datasets')
parser.add_argument('--ds-name', default=None, help='Name of the dataset to use')
do_test = parser.add_mutually_exclusive_group()
do_test.add_argument('--train', dest='test', action='store_false', default=False, help='Run in training mode, do not perform testing; default')
do_test.add_argument('--test', dest='test', action='store_true', default=False, help='Run in testing mode, do not train')
parser.add_argument('--binary-path', default=None, help='Path to binaries, current: aalta')
parser.add_argument('--no-auto', action='store_true', help="Do not get parameters from params.txt when testing")
parser.add_argument('--eval-name', default='test', help="Name of log and test files")
parser.add_argument('--no-save', action='store_true')
parser.add_argument('--save-only', type=str, default='last', help='save which checkpoints: all, best, last')
parser.add_argument('--params-file', type=str, help='load parameters from specified file')
parser.add_argument('--seed', type=int, help='Global seed for python, numpy, tensorflow. If not specified, generate new one')
# Typical Hyperparameters
parser.add_argument('--batch-size', type=int, default=100)
parser.add_argument('--epochs', type=int, default=3)
parser.add_argument('--initial-epoch', type=int, default=0, help='used to track the epoch number correctly when resuming training')
parser.add_argument('--samples', type=int, default=None)
parser.add_argument('--alpha', type=float, default=1)
parser.add_argument('--beam-size', type=int, default=2)
return parser
EXCLUDE_AUTO_ARGS = ['job_dir', 'run_name', 'data_dir', 'binary_path', 'test', 'force_load', 'eval_name', 'load_from', 'load_parts']
def load_params(params_dict, path, exclude_auto=True):
with tf.io.gfile.GFile(path, 'r') as f:
d = json.loads(f.read())
if exclude_auto:
for exclude in EXCLUDE_AUTO_ARGS:
if exclude in d:
d.pop(exclude)
dropped = []
for q, qm in map(lambda x: (x, '--' + x.replace('_', '-')), list(d)):
if any(arg.startswith(qm) for arg in sys.argv[1:]): # drop if specified on command line
d.pop(q)
dropped.append(qm)
print('Loaded parameters from', path, (', dropped ' + str(dropped) + ' as specified on command line') if dropped else '')
new = params_dict.copy()
new.update(d)
return new
def setup(**kwargs):
# If testing, load from params.txt
if kwargs['test'] and not kwargs['no_auto']:
if kwargs['params_file'] is not None:
raise NotImplementedError()
load_path = path.join(kwargs['job_dir'], kwargs['run_name'], 'params.json')
kwargs = load_params(kwargs, load_path, exclude_auto=True)
elif kwargs['params_file'] is not None:
kwargs = load_params(kwargs, kwargs['params_file'], exclude_auto=False)
binary_path = kwargs['binary_path']
# GPU stuff
gpus = tf.config.experimental.list_physical_devices('GPU')
print('GPUs', gpus)
if len(gpus) > 1:
print("More than one GPU specified, I'm scared!")
sys.exit(1)
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
# Get binaries
filenames = [] #['aalta']
if binary_path is not None:
for filename in filenames:
try:
tf.io.gfile.makedirs('bin')
tf.io.gfile.copy(path.join(binary_path, filename), path.join('bin', filename))
except tf.errors.AlreadyExistsError:
pass
# Random stuff
if kwargs['seed'] is None:
random.seed()
kwargs['seed'] = random.randint(0, 2**32 - 1)
print('Seed not provided, generated new one:', kwargs['seed'])
random.seed(kwargs['seed'])
np.random.seed(kwargs['seed'])
tf.random.set_seed(kwargs['seed'])
return kwargs
def log_params(job_dir, run_name, _skip=None, **kwargs):
if _skip is None:
_skip = []
logdir = path.join(job_dir, run_name)
tf.io.gfile.makedirs(logdir)
d = kwargs.copy()
d.update({'job_dir' : job_dir, 'run_name' : run_name})
for _s in _skip:
if _s in d:
d.pop(_s)
with tf.io.gfile.GFile(path.join(logdir, 'params.json'), 'w') as f:
f.write(json.dumps(d, indent=4) + '\n')
def checkpoint_path(job_dir, run_name, **kwargs):
return path.join(job_dir, run_name, 'checkpoints')
def checkpoint_callback(job_dir, run_name, save_weights_only=True, save_only='all', **kwargs):
if save_only == 'all':
filepath = str(path.join(checkpoint_path(job_dir, run_name), 'cp_')) + 'ep{epoch:02d}_vl{val_loss:.3f}' # save per epoch
elif save_only == 'best':
filepath = str(path.join(checkpoint_path(job_dir, run_name), 'best')) # save best only
elif save_only == 'last':
filepath = str(path.join(checkpoint_path(job_dir, run_name), 'last')) # save best only
return tf.keras.callbacks.ModelCheckpoint(filepath, save_weights_only=save_weights_only, save_best_only=save_only=='best')
def get_log_dir(job_dir, run_name, **kwargs):
return str(path.join(job_dir, run_name))
def tensorboard_callback(job_dir, run_name, **kwargs):
log_dir = str(path.join(job_dir, run_name))
return tf.keras.callbacks.TensorBoard(log_dir)
def last_checkpoint(job_dir, run_name, load_from=None, **kwargs):
if load_from is not None:
run_name = load_from
return tf.train.latest_checkpoint(checkpoint_path(job_dir, run_name))
EVAL_PARAMS = ['ds_name', 'batch_size', 'beam_size', 'alpha', 'samples']
def test_and_analyze_ltl(pred_fn, dataset, in_vocab=None, out_vocab=None, plot_name='test_results', log_name=None, **kwargs):
plotdir = path.join(kwargs['job_dir'], kwargs['run_name'])
tf.io.gfile.makedirs(plotdir)
proc_args = ['-f', '-', '-t', '-', '-r', '-', '--per-size', '--save-analysis', 'tmp_test_results', '--validator', 'aalta', '--timeout', '60', '--log-level', '4']
if log_name is not None:
proc_args.extend(['-l', path.join(plotdir, log_name + '.log')])
proc = subprocess.Popen(['python3', '-m', 'dlsgs.utils.trace_check'] + proc_args,
stdin=subprocess.PIPE, stdout=None, stderr=None, universal_newlines=True, bufsize=10000000)
try:
for x in dataset:
if kwargs['tree_pe']:
data, pe, label = x
pred = pred_fn([data, pe])
else:
data, label = x
pred = pred_fn(data)
if len(pred.shape) == 1:
pred = np.expand_dims(pred, axis=0)
data = tf.expand_dims(data, axis=0)
label = tf.expand_dims(label, axis=0)
for i in range(pred.shape[0]):
label_decoded = out_vocab.decode(list(label[i, :]))
if not label_decoded:
label_decoded = ''
formula_decoded = in_vocab.decode(list(data[i, :]))
formula_decoded = formula_decoded.replace('%', '')
step_in = formula_decoded + '\n' + out_vocab.decode(list(pred[i, :])) + '\n' + label_decoded + '\n'
sys.stdout.flush()
proc.stdin.write(step_in)
proc.stdin.flush()
except BrokenPipeError:
sys.exit('Pipe to trace checker broke. output:' + proc.communicate()[0])
sys.stdout.flush()
proc.communicate()
tf.io.gfile.copy('tmp_test_results.png', path.join(plotdir, plot_name + '.png'), overwrite=True)
tf.io.gfile.remove('tmp_test_results.png')
tf.io.gfile.copy('tmp_test_results.svg', path.join(plotdir, plot_name + '.svg'), overwrite=True)
tf.io.gfile.remove('tmp_test_results.svg')
def get_ass(lst):
if len(lst) == 1 and lst[0] == '1':
return {True : True}, 'True'
if len(lst) % 2 != 0:
raise ValueError('length of assignments not even')
ass_it = iter(lst)
ass_dict = {}
for var in ass_it:
if var in ass_dict:
raise ValueError('Double assignment of same variable')
val = next(ass_it)
if val == 'True' or val == '1':
ass_dict[var] = True
elif val == 'False' or val == '0':
ass_dict[var] = False
else:
raise ValueError('assignment var not True or False')
s = [f'{var}={val}' for (var, val) in ass_dict.items()]
return ass_dict, ' '.join(s)
def test_and_analyze_sat(pred_model, dataset, in_vocab, out_vocab, log_name, **kwargs):
#from jma.data.sat_generator import spot_to_pyaiger, is_model
import sympy.logic as syl
logdir = path.join(kwargs['job_dir'], kwargs['run_name'])
tf.io.gfile.makedirs(logdir)
with open(path.join(logdir, log_name + '.log'), 'w') as log_file:
res = {'invalid': 0, 'incorrect': 0, 'syn_correct': 0, 'sem_correct': 0}
for x in dataset:
if kwargs['pos_enc'] is None:
data, label_ = x
decodings = pred_model(data, training=False)
else:
data, pe, label_ = x
decodings = pred_model([data, pe], training=False)
for i in range(decodings.shape[0]):
formula = in_vocab.decode(list(data[i, :]), as_list=True)
pred = out_vocab.decode(list(decodings[i, :]), as_list=True)
label = out_vocab.decode(list(label_[i, :]), as_list=True)
formula_obj = ltl_parser.ltl_formula(''.join(formula), 'network-polish')
formula_str = formula_obj.to_str('spot')
_, pretty_label_ass = get_ass(label)
try:
ass, pretty_ass = get_ass(pred)
except ValueError as e:
res['invalid'] += 1
msg = f"INVALID ({str(e)})\nFormula: {formula_str}\nPred: {' '.join(pred)}\nLabel: {pretty_label_ass}\n"
log_file.write(msg)
continue
if pred == label:
res['syn_correct'] += 1
msg = f"SYNTACTICALLY CORRECT\nFormula: {formula_str}\nPred: {pretty_ass}\nLabel: {pretty_label_ass}\n"
# log_file.write(msg)
continue
# semantic checking
formula_sympy = formula_obj.to_sympy()
try:
substituted = syl.simplify_logic(formula_sympy.subs(ass))
holds = substituted == syl.true
except KeyError as e:
res['incorrect'] += 1
msg = f"INCORRECT (var {str(e)} not in formula)\nFormula: {formula_str}\nPred: {pretty_ass}\nLabel: {pretty_label_ass}\n"
log_file.write(msg)
continue
if holds:
res['sem_correct'] += 1
msg = f"SEMANTICALLY CORRECT\nFormula: {formula_str}\nPred: {pretty_ass}\nLabel: {pretty_label_ass}\n"
log_file.write(msg)
else:
res['incorrect'] += 1
msg = f"INCORRECT\nFormula: {formula_str}\nPred: {pretty_ass}\nLabel: {pretty_label_ass}\nRemaining formula: {substituted}\n"
log_file.write(msg)
total = sum(res.values())
correct = res['syn_correct'] + res['sem_correct']
msg = (f"Correct: {correct/total*100:.1f}%, {correct} out of {total}\nSyntactically correct: {res['syn_correct']/total*100:.1f}%\nSemantically correct: {res['sem_correct']/total*100:.1f}%\n"
f"Incorrect: {res['incorrect']/total*100:.1f}%\nInvalid: {res['invalid']/total*100:.1f}%\n")
log_file.write(msg)
print(msg, end='')
with tf.io.gfile.GFile(path.join(logdir, log_name + '_params.json'), 'w') as f:
d = { k : v for k, v in kwargs.items() if k in EVAL_PARAMS}
f.write(json.dumps(d, indent=4) + '\n')
| [
"sys.exit",
"tensorflow.io.gfile.remove",
"tensorflow.io.gfile.GFile",
"argparse.ArgumentParser",
"subprocess.Popen",
"json.dumps",
"numpy.random.seed",
"sys.stdout.flush",
"random.randint",
"tensorflow.keras.callbacks.TensorBoard",
"tensorflow.expand_dims",
"tensorflow.random.set_seed",
"te... | [((306, 322), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (320, 322), False, 'from argparse import ArgumentParser\n'), ((3607, 3658), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (3651, 3658), True, 'import tensorflow as tf\n'), ((4413, 4440), 'random.seed', 'random.seed', (["kwargs['seed']"], {}), "(kwargs['seed'])\n", (4424, 4440), False, 'import random\n'), ((4445, 4475), 'numpy.random.seed', 'np.random.seed', (["kwargs['seed']"], {}), "(kwargs['seed'])\n", (4459, 4475), True, 'import numpy as np\n'), ((4480, 4514), 'tensorflow.random.set_seed', 'tf.random.set_seed', (["kwargs['seed']"], {}), "(kwargs['seed'])\n", (4498, 4514), True, 'import tensorflow as tf\n'), ((4646, 4674), 'os.path.join', 'path.join', (['job_dir', 'run_name'], {}), '(job_dir, run_name)\n', (4655, 4674), True, 'import os.path as path\n'), ((4679, 4707), 'tensorflow.io.gfile.makedirs', 'tf.io.gfile.makedirs', (['logdir'], {}), '(logdir)\n', (4699, 4707), True, 'import tensorflow as tf\n'), ((5035, 5078), 'os.path.join', 'path.join', (['job_dir', 'run_name', '"""checkpoints"""'], {}), "(job_dir, run_name, 'checkpoints')\n", (5044, 5078), True, 'import os.path as path\n'), ((5596, 5718), 'tensorflow.keras.callbacks.ModelCheckpoint', 'tf.keras.callbacks.ModelCheckpoint', (['filepath'], {'save_weights_only': 'save_weights_only', 'save_best_only': "(save_only == 'best')"}), "(filepath, save_weights_only=\n save_weights_only, save_best_only=save_only == 'best')\n", (5630, 5718), True, 'import tensorflow as tf\n'), ((5920, 5959), 'tensorflow.keras.callbacks.TensorBoard', 'tf.keras.callbacks.TensorBoard', (['log_dir'], {}), '(log_dir)\n', (5950, 5959), True, 'import tensorflow as tf\n'), ((6377, 6425), 'os.path.join', 'path.join', (["kwargs['job_dir']", "kwargs['run_name']"], {}), "(kwargs['job_dir'], kwargs['run_name'])\n", (6386, 6425), True, 'import os.path as path\n'), ((6430, 6459), 'tensorflow.io.gfile.makedirs', 'tf.io.gfile.makedirs', (['plotdir'], {}), '(plotdir)\n', (6450, 6459), True, 'import tensorflow as tf\n'), ((6738, 6913), 'subprocess.Popen', 'subprocess.Popen', (["(['python3', '-m', 'dlsgs.utils.trace_check'] + proc_args)"], {'stdin': 'subprocess.PIPE', 'stdout': 'None', 'stderr': 'None', 'universal_newlines': '(True)', 'bufsize': '(10000000)'}), "(['python3', '-m', 'dlsgs.utils.trace_check'] + proc_args,\n stdin=subprocess.PIPE, stdout=None, stderr=None, universal_newlines=\n True, bufsize=10000000)\n", (6754, 6913), False, 'import subprocess\n'), ((8027, 8045), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (8043, 8045), False, 'import sys\n'), ((8174, 8216), 'tensorflow.io.gfile.remove', 'tf.io.gfile.remove', (['"""tmp_test_results.png"""'], {}), "('tmp_test_results.png')\n", (8192, 8216), True, 'import tensorflow as tf\n'), ((8322, 8364), 'tensorflow.io.gfile.remove', 'tf.io.gfile.remove', (['"""tmp_test_results.svg"""'], {}), "('tmp_test_results.svg')\n", (8340, 8364), True, 'import tensorflow as tf\n'), ((9255, 9303), 'os.path.join', 'path.join', (["kwargs['job_dir']", "kwargs['run_name']"], {}), "(kwargs['job_dir'], kwargs['run_name'])\n", (9264, 9303), True, 'import os.path as path\n'), ((9308, 9336), 'tensorflow.io.gfile.makedirs', 'tf.io.gfile.makedirs', (['logdir'], {}), '(logdir)\n', (9328, 9336), True, 'import tensorflow as tf\n'), ((2447, 2475), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['path', '"""r"""'], {}), "(path, 'r')\n", (2464, 2475), True, 'import tensorflow as tf\n'), ((3284, 3347), 'os.path.join', 'path.join', (["kwargs['job_dir']", "kwargs['run_name']", '"""params.json"""'], {}), "(kwargs['job_dir'], kwargs['run_name'], 'params.json')\n", (3293, 3347), True, 'import os.path as path\n'), ((3771, 3782), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3779, 3782), False, 'import sys\n'), ((3812, 3863), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpu', '(True)'], {}), '(gpu, True)\n', (3852, 3863), True, 'import tensorflow as tf\n'), ((4270, 4283), 'random.seed', 'random.seed', ([], {}), '()\n', (4281, 4283), False, 'import random\n'), ((4309, 4339), 'random.randint', 'random.randint', (['(0)', '(2 ** 32 - 1)'], {}), '(0, 2 ** 32 - 1)\n', (4323, 4339), False, 'import random\n'), ((5775, 5803), 'os.path.join', 'path.join', (['job_dir', 'run_name'], {}), '(job_dir, run_name)\n', (5784, 5803), True, 'import os.path as path\n'), ((5879, 5907), 'os.path.join', 'path.join', (['job_dir', 'run_name'], {}), '(job_dir, run_name)\n', (5888, 5907), True, 'import os.path as path\n'), ((8114, 8152), 'os.path.join', 'path.join', (['plotdir', "(plot_name + '.png')"], {}), "(plotdir, plot_name + '.png')\n", (8123, 8152), True, 'import os.path as path\n'), ((8262, 8300), 'os.path.join', 'path.join', (['plotdir', "(plot_name + '.svg')"], {}), "(plotdir, plot_name + '.svg')\n", (8271, 8300), True, 'import os.path as path\n'), ((4879, 4911), 'os.path.join', 'path.join', (['logdir', '"""params.json"""'], {}), "(logdir, 'params.json')\n", (4888, 4911), True, 'import os.path as path\n'), ((9351, 9387), 'os.path.join', 'path.join', (['logdir', "(log_name + '.log')"], {}), "(logdir, log_name + '.log')\n", (9360, 9387), True, 'import os.path as path\n'), ((12390, 12434), 'os.path.join', 'path.join', (['logdir', "(log_name + '_params.json')"], {}), "(logdir, log_name + '_params.json')\n", (12399, 12434), True, 'import os.path as path\n'), ((4014, 4041), 'tensorflow.io.gfile.makedirs', 'tf.io.gfile.makedirs', (['"""bin"""'], {}), "('bin')\n", (4034, 4041), True, 'import tensorflow as tf\n'), ((4940, 4963), 'json.dumps', 'json.dumps', (['d'], {'indent': '(4)'}), '(d, indent=4)\n', (4950, 4963), False, 'import json\n'), ((6687, 6724), 'os.path.join', 'path.join', (['plotdir', "(log_name + '.log')"], {}), "(plotdir, log_name + '.log')\n", (6696, 6724), True, 'import os.path as path\n'), ((7228, 7256), 'numpy.expand_dims', 'np.expand_dims', (['pred'], {'axis': '(0)'}), '(pred, axis=0)\n', (7242, 7256), True, 'import numpy as np\n'), ((7280, 7308), 'tensorflow.expand_dims', 'tf.expand_dims', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (7294, 7308), True, 'import tensorflow as tf\n'), ((7333, 7362), 'tensorflow.expand_dims', 'tf.expand_dims', (['label'], {'axis': '(0)'}), '(label, axis=0)\n', (7347, 7362), True, 'import tensorflow as tf\n'), ((7818, 7836), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (7834, 7836), False, 'import sys\n'), ((12531, 12554), 'json.dumps', 'json.dumps', (['d'], {'indent': '(4)'}), '(d, indent=4)\n', (12541, 12554), False, 'import json\n'), ((4075, 4107), 'os.path.join', 'path.join', (['binary_path', 'filename'], {}), '(binary_path, filename)\n', (4084, 4107), True, 'import os.path as path\n'), ((4109, 4135), 'os.path.join', 'path.join', (['"""bin"""', 'filename'], {}), "('bin', filename)\n", (4118, 4135), True, 'import os.path as path\n')] |
import numpy as np
from pyriemann.estimation import Covariances
from pyriemann.spatialfilters import CSP
from sklearn.feature_selection import SelectKBest, mutual_info_classif
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import make_pipeline
from sklearn.svm import SVC
from moabb.pipelines.utils import FilterBank
parameters = {"C": np.logspace(-2, 2, 10)}
clf = GridSearchCV(SVC(kernel="linear"), parameters)
fb = FilterBank(make_pipeline(Covariances(estimator="oas"), CSP(nfilter=4)))
pipe = make_pipeline(fb, SelectKBest(score_func=mutual_info_classif, k=10), clf)
# this is what will be loaded
PIPELINE = {
"name": "FBCSP + optSVM",
"paradigms": ["FilterBankMotorImagery"],
"pipeline": pipe,
}
| [
"sklearn.feature_selection.SelectKBest",
"pyriemann.estimation.Covariances",
"numpy.logspace",
"pyriemann.spatialfilters.CSP",
"sklearn.svm.SVC"
] | [((363, 385), 'numpy.logspace', 'np.logspace', (['(-2)', '(2)', '(10)'], {}), '(-2, 2, 10)\n', (374, 385), True, 'import numpy as np\n'), ((406, 426), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""linear"""'}), "(kernel='linear')\n", (409, 426), False, 'from sklearn.svm import SVC\n'), ((542, 591), 'sklearn.feature_selection.SelectKBest', 'SelectKBest', ([], {'score_func': 'mutual_info_classif', 'k': '(10)'}), '(score_func=mutual_info_classif, k=10)\n', (553, 591), False, 'from sklearn.feature_selection import SelectKBest, mutual_info_classif\n'), ((470, 498), 'pyriemann.estimation.Covariances', 'Covariances', ([], {'estimator': '"""oas"""'}), "(estimator='oas')\n", (481, 498), False, 'from pyriemann.estimation import Covariances\n'), ((500, 514), 'pyriemann.spatialfilters.CSP', 'CSP', ([], {'nfilter': '(4)'}), '(nfilter=4)\n', (503, 514), False, 'from pyriemann.spatialfilters import CSP\n')] |
import numpy as np
import pandas as pd
import rpy2
import rpy2.robjects as ro
# ro.r('library(devtools)') ## to use source_url
# ro.r('library(tidyverse)')
############################## rpy2 functions ##############################
# def pull(r):
# return r2p(ro.globalenv[r])
# def push(py,rname=None):
# import inspect
# def retrieve_name(var):
# for fi in reversed(inspect.stack()):
# names = [var_name for var_name, var_val in fi.frame.f_locals.items() if var_val is var]
# if len(names) > 0:
# return names[0]
# if rname==None: rname = retrieve_name(py)
# ro.globalenv[rname]=p2r(py)
# def p2r(A):
# from rpy2.robjects.vectors import FloatVector
# from rpy2.robjects.vectors import StrVector as s2r_temp
# def a2r_temp(a):
# if type(a) in {float,int,bool}: a=[a]
# a=list(a)
# rtn=FloatVector(a)
# return rtn
# def m2r_temp(A):
# A=np.matrix(A)
# Acopy=A.T.copy()
# nrow=Acopy.shape[0]
# Acopy.shape=(np.prod(Acopy.shape),1)
# rtn=ro.r.matrix(a2r_temp(list(Acopy)),ncol=nrow)
# return rtn
# from rpy2.robjects import pandas2ri
# from rpy2.robjects.conversion import localconverter
# def pd2r_temp(A):
# with localconverter(ro.default_converter + pandas2ri.converter):
# rtn = ro.conversion.py2rpy(A)
# return rtn
# if type(A)==type(pd.DataFrame(np.zeros([2,2]))):
# rtn=pd2r_temp(A)
# elif type(A)==type(np.matrix(np.zeros([2,2]))):
# rtn=m2r_temp(A)
# elif type(A)==type(np.zeros([2,2])):
# if len(A.shape)==1:
# rtn=a2r_temp(A)
# else:
# rtn=m2r_temp(A)
# elif type(A)==str:
# rtn=s2r_temp(A)
# elif type(pd.DataFrame(np.matrix(A)).iloc[0,0])==str:
# rtn=s2r_temp(pd.DataFrame(np.matrix(A)).T.iloc[:,0])
# else:
# rtn=a2r_temp(A)
# return rtn
# def r2p(A):
# from rpy2.robjects import pandas2ri
# from rpy2.robjects.conversion import localconverter
# def r2a_temp(a):
# return list(a)
# def r2m_temp(A):
# return np.matrix(A)
# def r2pd_temp(A):
# with localconverter(ro.default_converter + pandas2ri.converter):
# rtn = ro.conversion.rpy2py(A)
# return rtn
# ro.globalenv['temp']=A
# if ro.r('is.null(dim(temp))')[0]==False: ## in the cases of matrix or dataframe
# if ro.r('is.data.frame(temp)')[0]:
# rtn=r2pd_temp(A)
# elif ro.r('is.matrix(temp)')[0]:
# rtn=r2m_temp(A)
# else:
# print('I don\`t know which type of this data in R.')
# else:
# rtn=r2a_temp(A)
# ro.r('rm("temp")')
# return rtn
def cbind(*Mat):
lenofMat=len(Mat)
if lenofMat==1:
print("You must enter two or more input objects.")
rtn=Mat[0]
elif lenofMat==2:
rtn=cbindtemp(Mat[0],Mat[1])
else:
rtn=cbindtemp(Mat[0],Mat[1])
for i in np.arange(2,lenofMat):
rtn=cbindtemp(rtn,Mat[i])
return rtn
def rbind(*Mat):
lenofMat=len(Mat)
if lenofMat==1:
print("You must enter two or more input objects.")
rtn=Mat[0]
elif lenofMat==2:
rtn=rbindtemp(Mat[0],Mat[1])
else:
rtn=rbindtemp(Mat[0],Mat[1])
for i in np.arange(2,lenofMat):
rtn=rbindtemp(rtn,Mat[i])
return rtn
def cbindtemp(A,B):
typ=['matrix','matrix']
if isinstance(A, pd.core.series.Series):
A=a2c(A)
if isinstance(B, pd.core.series.Series):
B=a2c(B)
A=np.asmatrix(A)
B=np.asmatrix(B)
# row-vector에 대한 처리
if A.shape[0]==1: typ[0]='rowvec'
if B.shape[0]==1: typ[1]='rowvec'
# col-vector에 대한 처리
if A.shape[1]==1: typ[0]='colvec'
if B.shape[1]==1: typ[1]='colvec'
# 스칼라에 대한 처리
if A.shape==(1,1): typ[0]='scala'
if B.shape==(1,1): typ[1]='scala'
if typ==['scala','scala']: A=np.array(A); B=np.array(B)
if typ==['scala','rowvec']: A=np.array(A);
if typ==['scala','colvec']: A=np.full(B.shape,A[0,0]);
if typ==['scala','matrix']: A=np.full((B.shape[0],1),A[0,0]);
if typ==['rowvec','scala']: B=np.array(B)
#if typ==['rowvec','rowvec']:
if typ==['rowvec','colvec']: A=A.T
if typ==['rowvec','matrix']: A=A.T
if typ==['colvec','scala']: B=np.full(A.shape,B[0,0])
if typ==['colvec','rowvec']: B=B.T
#if typ==['colvec','colvec']:
#if typ==['colvec','matrix']:
if typ==['matrix','scala']: B=np.full((A.shape[0],1),B[0,0])
if typ==['matrix','rowvec']: B=B.T
#if typ==['matrix','colvec']:
#if typ==['matrix','matrix']:
return np.hstack([A,B])
def rbindtemp(A,B):
typ=['matrix','matrix']
A=np.asmatrix(A)
B=np.asmatrix(B)
# row-vector에 대한 처리
if A.shape[0]==1: typ[0]='rowvec'
if B.shape[0]==1: typ[1]='rowvec'
# col-vector에 대한 처리
if A.shape[1]==1: typ[0]='colvec'
if B.shape[1]==1: typ[1]='colvec'
# 스칼라에 대한 처리
if A.shape==(1,1): typ[0]='scala'
if B.shape==(1,1): typ[1]='scala'
if typ==['scala','scala']: A=np.array(A); B=np.array(B)
if typ==['scala','rowvec']: A=np.full(B.shape,A[0,0]);
if typ==['scala','colvec']: A=np.array(A);
if typ==['scala','matrix']: A=np.full((1,B.shape[1]),A[0,0]);
if typ==['rowvec','scala']: B=np.full((1,A.shape[1]),B[0,0]);
#if typ==['rowvec','rowvec']:
if typ==['rowvec','colvec']: B=B.T
#if typ==['rowvec','matrix']:
#if typ==['colvec','scala']:
if typ==['colvec','rowvec']: A=A.T
#if typ==['colvec','colvec']:
if typ==['colvec','matrix']: A=A.T
if typ==['matrix','scala']: B=np.full((1,A.shape[1]),B[0,0])
#if typ==['matrix','rowvec']:
if typ==['matrix','colvec']: B=B.T
#if typ==['matrix','matrix']:
return np.vstack([A,B])
def ids(pddata):
push(pddata.columns,"vname")
print(r2p(ro.r("str_c(str_c('(',str_c(1:length(vname)-1),') ',vname),collapse='\n')"))[0])
def l2distance(X): #X:=n*p ndarray
X=np.array(X)
n=len(X)
rtn=np.array(np.zeros([n,n]))
try:
rtn=np.sum((X[:,np.newaxis,:]-X[np.newaxis,:,:])**2,axis=-1)
except MemoryError:
for i in np.arange(0,n):
rtn[i,:]=np.sum((X[i,:]-X[:,:])**2,axis=1)
return rtn
| [
"numpy.hstack",
"numpy.asmatrix",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.vstack",
"numpy.full",
"rpy2.robjects.r",
"numpy.arange"
] | [((3685, 3699), 'numpy.asmatrix', 'np.asmatrix', (['A'], {}), '(A)\n', (3696, 3699), True, 'import numpy as np\n'), ((3706, 3720), 'numpy.asmatrix', 'np.asmatrix', (['B'], {}), '(B)\n', (3717, 3720), True, 'import numpy as np\n'), ((4804, 4821), 'numpy.hstack', 'np.hstack', (['[A, B]'], {}), '([A, B])\n', (4813, 4821), True, 'import numpy as np\n'), ((4885, 4899), 'numpy.asmatrix', 'np.asmatrix', (['A'], {}), '(A)\n', (4896, 4899), True, 'import numpy as np\n'), ((4906, 4920), 'numpy.asmatrix', 'np.asmatrix', (['B'], {}), '(B)\n', (4917, 4920), True, 'import numpy as np\n'), ((6000, 6017), 'numpy.vstack', 'np.vstack', (['[A, B]'], {}), '([A, B])\n', (6009, 6017), True, 'import numpy as np\n'), ((6205, 6216), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (6213, 6216), True, 'import numpy as np\n'), ((4071, 4082), 'numpy.array', 'np.array', (['A'], {}), '(A)\n', (4079, 4082), True, 'import numpy as np\n'), ((4086, 4097), 'numpy.array', 'np.array', (['B'], {}), '(B)\n', (4094, 4097), True, 'import numpy as np\n'), ((4132, 4143), 'numpy.array', 'np.array', (['A'], {}), '(A)\n', (4140, 4143), True, 'import numpy as np\n'), ((4180, 4205), 'numpy.full', 'np.full', (['B.shape', 'A[0, 0]'], {}), '(B.shape, A[0, 0])\n', (4187, 4205), True, 'import numpy as np\n'), ((4240, 4273), 'numpy.full', 'np.full', (['(B.shape[0], 1)', 'A[0, 0]'], {}), '((B.shape[0], 1), A[0, 0])\n', (4247, 4273), True, 'import numpy as np\n'), ((4308, 4319), 'numpy.array', 'np.array', (['B'], {}), '(B)\n', (4316, 4319), True, 'import numpy as np\n'), ((4476, 4501), 'numpy.full', 'np.full', (['A.shape', 'B[0, 0]'], {}), '(A.shape, B[0, 0])\n', (4483, 4501), True, 'import numpy as np\n'), ((4649, 4682), 'numpy.full', 'np.full', (['(A.shape[0], 1)', 'B[0, 0]'], {}), '((A.shape[0], 1), B[0, 0])\n', (4656, 4682), True, 'import numpy as np\n'), ((5271, 5282), 'numpy.array', 'np.array', (['A'], {}), '(A)\n', (5279, 5282), True, 'import numpy as np\n'), ((5286, 5297), 'numpy.array', 'np.array', (['B'], {}), '(B)\n', (5294, 5297), True, 'import numpy as np\n'), ((5332, 5357), 'numpy.full', 'np.full', (['B.shape', 'A[0, 0]'], {}), '(B.shape, A[0, 0])\n', (5339, 5357), True, 'import numpy as np\n'), ((5392, 5403), 'numpy.array', 'np.array', (['A'], {}), '(A)\n', (5400, 5403), True, 'import numpy as np\n'), ((5439, 5472), 'numpy.full', 'np.full', (['(1, B.shape[1])', 'A[0, 0]'], {}), '((1, B.shape[1]), A[0, 0])\n', (5446, 5472), True, 'import numpy as np\n'), ((5507, 5540), 'numpy.full', 'np.full', (['(1, A.shape[1])', 'B[0, 0]'], {}), '((1, A.shape[1]), B[0, 0])\n', (5514, 5540), True, 'import numpy as np\n'), ((5845, 5878), 'numpy.full', 'np.full', (['(1, A.shape[1])', 'B[0, 0]'], {}), '((1, A.shape[1]), B[0, 0])\n', (5852, 5878), True, 'import numpy as np\n'), ((6247, 6263), 'numpy.zeros', 'np.zeros', (['[n, n]'], {}), '([n, n])\n', (6255, 6263), True, 'import numpy as np\n'), ((6286, 6351), 'numpy.sum', 'np.sum', (['((X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2)'], {'axis': '(-1)'}), '((X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2, axis=-1)\n', (6292, 6351), True, 'import numpy as np\n'), ((3083, 3105), 'numpy.arange', 'np.arange', (['(2)', 'lenofMat'], {}), '(2, lenofMat)\n', (3092, 3105), True, 'import numpy as np\n'), ((3423, 3445), 'numpy.arange', 'np.arange', (['(2)', 'lenofMat'], {}), '(2, lenofMat)\n', (3432, 3445), True, 'import numpy as np\n'), ((6384, 6399), 'numpy.arange', 'np.arange', (['(0)', 'n'], {}), '(0, n)\n', (6393, 6399), True, 'import numpy as np\n'), ((6082, 6160), 'rpy2.robjects.r', 'ro.r', (['"""str_c(str_c(\'(\',str_c(1:length(vname)-1),\') \',vname),collapse=\'\n\')"""'], {}), '("""str_c(str_c(\'(\',str_c(1:length(vname)-1),\') \',vname),collapse=\'\n\')""")\n', (6086, 6160), True, 'import rpy2.robjects as ro\n'), ((6421, 6461), 'numpy.sum', 'np.sum', (['((X[i, :] - X[:, :]) ** 2)'], {'axis': '(1)'}), '((X[i, :] - X[:, :]) ** 2, axis=1)\n', (6427, 6461), True, 'import numpy as np\n')] |
# Copyright (c) Microsoft Corporation and Fairlearn contributors.
# Licensed under the MIT License.
from typing import Any, Callable, Dict, Optional
import logging
import numpy as np
logger = logging.getLogger(__name__)
_DEFAULT_NAME = 'metric'
_METRIC_FUNCTION_NONE = "Found 'None' instead of metric function"
_METRIC_FUNCTION_NOT_CALLABLE = "Object passed as metric function not callable"
_SAMPLE_PARAMS_NOT_DICT = "Sample parameters must be a dictionary"
class FunctionContainer:
"""A helper class for metrics.
Parameters
----------
func : Callable
The metric function
name : str
The name of the metric. If ``None`` then the ``__name__``
property of the ``func`` is used, or if that is not available
a default is used.
sample_params : dict[str,array_like]
Sample parameters, which are to be sliced up along with
``y_true`` and ``y_pred``
"""
def __init__(self,
func: Callable,
name: Optional[str],
sample_params: Optional[Dict[str, Any]]):
"""Read a placeholder comment."""
if func is None:
raise ValueError(_METRIC_FUNCTION_NONE)
if not callable(func):
raise ValueError(_METRIC_FUNCTION_NOT_CALLABLE)
self._func = func
if name is None:
if hasattr(func, '__name__'):
self._name = func.__name__
else:
logger.warning("Supplied 'func' had no __name__ attribute")
self._name = _DEFAULT_NAME
else:
self._name = name
self._sample_params = dict()
if sample_params is not None:
if not isinstance(sample_params, dict):
raise ValueError(_SAMPLE_PARAMS_NOT_DICT)
for k, v in sample_params.items():
if v is not None:
# Coerce any sample_params to being ndarrays for easy masking
self._sample_params[k] = np.asarray(v)
@property
def func_(self) -> Callable:
"""Return the contained metric function."""
return self._func
@property
def name_(self) -> str:
"""Return the name of the metric."""
return self._name
@property
def sample_params_(self) -> Dict[str, np.ndarray]:
"""Return the dictionary of sample parameters (as ndarray)."""
return self._sample_params
def generate_sample_params_for_mask(self,
mask: np.ndarray) -> Dict[str, np.ndarray]:
"""Return the sample parameters selected by the given mask."""
curr_sample_params = dict()
for name, value in self.sample_params_.items():
curr_sample_params[name] = value[mask]
return curr_sample_params
def evaluate(self,
y_true,
y_pred,
mask: np.ndarray) -> Any:
"""Evaluate the metric for the given mask and input data.
The mask will be applied to ``y_true``, ``y_pred`` and
the sample parameters.
"""
# Following are internal sanity checks
assert isinstance(y_true, np.ndarray)
assert isinstance(y_pred, np.ndarray)
assert len(y_true) == len(y_pred)
assert len(y_true) == len(mask)
params = self.generate_sample_params_for_mask(mask)
return self.func_(y_true[mask], y_pred[mask], **params)
def evaluate_all(self,
y_true,
y_pred) -> Any:
"""Evaluate the metric on all data."""
return self.func_(y_true, y_pred, **self.sample_params_)
| [
"logging.getLogger",
"numpy.asarray"
] | [((194, 221), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (211, 221), False, 'import logging\n'), ((2006, 2019), 'numpy.asarray', 'np.asarray', (['v'], {}), '(v)\n', (2016, 2019), True, 'import numpy as np\n')] |
import logging
import os
from abc import ABC
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
from sklearn.metrics.pairwise import cosine_distances
logger = logging.getLogger()
class EvalDatasetMixin(ABC):
def store_scores_batch(self, q_idx, scores):
# fact idxs are already stored
if self.config.eval_use_logprobs:
scores = self.logits_2_logprobs(scores)
self.partials.at[q_idx, 'scores_batch'] = scores.cpu().numpy()
@staticmethod
def logits_2_logprobs(logits):
probs = logits.clone()
probs[~torch.isnan(logits)] = F.log_softmax(logits[~torch.isnan(logits)], dim=0)
# probs[logits == np.nan] = 0.0
return probs
@staticmethod
def stacked_array(old_array, array):
return np.vstack((old_array, array)) if len(old_array) > 0 else array[None, :]
def process_scores(self):
# process logits in batch -> add best facts to partial expl
# keep logits to rank considered but unselected facts
cols_to_edit = ['partial_expl', 'finished', 'scores_batch', 'scores', 'all_scores']
self.partials[cols_to_edit] = self.partials.apply(
lambda x: self.handle_row(x) if not x.finished else x[cols_to_edit],
axis=1, result_type='expand'
)
def handle_row(self, row):
assert not row.finished
selected_fact = self.select_fact(row)
partial = np.concatenate((row.partial_expl, [selected_fact]))
finished = selected_fact == self.stop_explanation_id
scores_batch = np.array([], dtype=float)
if self.config.average_scores_over_partials:
scores = self.stacked_array(row.scores, row.scores_batch)
else:
scores = row.scores_batch
all_scores = self.stacked_array(row.all_scores, row.scores_batch)
return partial, finished, scores_batch, scores, all_scores
def select_fact(self, row):
if len(row.scores_batch[~np.isnan(row.scores_batch)]) == 0:
return self.stop_explanation_id
# argsort will sort nan as greatest value
argsorted = np.argsort(row.scores_batch)[:len(row.scores_batch[~np.isnan(row.scores_batch)])]
selected_fact_idx, score = argsorted[-1], row.scores_batch[argsorted[-1]]
if selected_fact_idx == self.stop_explanation_id:
if (score < row.scores_batch[argsorted[-2]] + self.config.stop_delta
or len(row.partial_expl) < self.config.min_expl_length):
selected_fact_idx = argsorted[-2]
logger.info('Stop selected but undone. Score: %s - 2nd score: %s - delta: %s - '
'length: %s - min length: %s'
% (score, row.scores_batch[argsorted[-2]], self.config.stop_delta,
len(row.partial_expl), self.config.min_expl_length))
return selected_fact_idx
def is_finished(self):
is_finished = max(self.partials.partial_expl.apply(len)) >= self.config.max_expl_length
if self.config.predict_stop_expl:
is_finished = is_finished or all(self.partials.partial_expl.apply(
lambda part: len(part) > 0 and part[-1] == self.stop_explanation_id
))
return is_finished
def rank_all(self):
# squash scores -> but make sure facts that were considered only in some iterations dont get 0 for
# for skipped iterations (done by np.nanmean())
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
self.partials.scores = self.partials.scores.apply(
lambda scores: np.nanmean(scores, axis=0) if len(scores.shape) > 1 else scores
)
# nan > any real nb
self.partials['fact_idxs_sorted'] = self.partials.apply(
lambda row: self.prepare_fact_idxs(row),
axis=1
)
result = {
self.qa_idx_2_uid[q_idx]: [
self.fact_idx_2_uid[f_idx] for f_idx in f_idxs if f_idx != self.stop_explanation_id
]
for q_idx, f_idxs in enumerate(self.partials.fact_idxs_sorted)
}
self.reset_partials()
return result
def prepare_fact_idxs(self, row):
scores = row.scores.copy()
if self.config.rank_rest == 'random':
scores[np.isnan(scores)] = np.random.rand(np.count_nonzero(np.isnan(scores))) \
- 1.0 + scores[~np.isnan(scores)].min()
elif self.config.rank_rest == 'seq':
scores[np.isnan(scores)] = np.arange(-1, -np.count_nonzero(np.isnan(scores))-1, -1) \
+ scores[~np.isnan(scores)].min()
elif self.config.rank_rest == 'tf_idf':
# distances so positive and lower is better, after - : max is best is 0
tf_idf_dists = np.concatenate((- self.tf_idf_distances(row), [np.nan]))[np.isnan(scores)]
scores[np.isnan(scores)] = tf_idf_dists + scores[~np.isnan(scores)].min()
if not self.config.use_partial_expl_in_ranking:
# still only use non-nan because rank_rest might be none
sorted_all = np.flip(np.argsort(scores)[:len(scores[~np.isnan(scores)])])
elif not self.config.rank_scored_but_unused_2nd:
sorted_all = np.array(row.partial_expl)
else:
sorted_all = np.concatenate((
row.partial_expl,
np.setdiff1d(np.flip(np.argsort(scores)[:len(scores[~np.isnan(scores)])]),
# assume_unique=True because numpy sorts values otherwise
row.partial_expl, assume_unique=True)
if len(row.scores) > 0 else []
))
return sorted_all
def tf_idf_distances(self, row):
partial_expl = row.partial_expl[np.where(row.partial_expl != self.stop_explanation_id)]
stemmed_f = ' '.join(self.fact_feats.stemmed.iloc[partial_expl].apply(lambda x: ' '.join(x)))
stemmed_q = ' '.join(self.qa_feats.stemmed.iloc[row.q_idx])
transformed_expl = self.tfidf.vectorizer.transform([stemmed_q + stemmed_f])
cos_distances = cosine_distances(transformed_expl, self.transformed_facts)
return cos_distances[0]
def reset_partials(self):
if self.supply_gold:
partials = self.qa_feats[['gold_facts']].copy().apply(np.array)
else:
partials = pd.DataFrame(index=self.qa_feats.index)
partials['q_idx'] = partials.index.copy()
partials['partial_expl'] = [np.array([], dtype=int)] * len(partials)
partials['scores_batch'] = [np.array([], dtype=float)] * len(partials)
partials['scores'] = [np.array([], dtype=float)] * len(partials)
partials['all_scores'] = [np.array([], dtype=float)] * len(partials)
partials['finished'] = False
self.partials = partials
def save_predictions_dataframe(self, output_dir):
filename = os.path.join(output_dir, 'predictions_df.bin')
logger.info('Saving predictions dataframe to %s' % filename)
with open(filename, 'wb') as f:
torch.save(self.partials, f)
| [
"logging.getLogger",
"pandas.DataFrame",
"sklearn.metrics.pairwise.cosine_distances",
"numpy.where",
"os.path.join",
"warnings.catch_warnings",
"numpy.argsort",
"numpy.array",
"numpy.nanmean",
"numpy.isnan",
"numpy.vstack",
"numpy.concatenate",
"torch.save",
"warnings.simplefilter",
"tor... | [((194, 213), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (211, 213), False, 'import logging\n'), ((1479, 1530), 'numpy.concatenate', 'np.concatenate', (['(row.partial_expl, [selected_fact])'], {}), '((row.partial_expl, [selected_fact]))\n', (1493, 1530), True, 'import numpy as np\n'), ((1615, 1640), 'numpy.array', 'np.array', (['[]'], {'dtype': 'float'}), '([], dtype=float)\n', (1623, 1640), True, 'import numpy as np\n'), ((6322, 6380), 'sklearn.metrics.pairwise.cosine_distances', 'cosine_distances', (['transformed_expl', 'self.transformed_facts'], {}), '(transformed_expl, self.transformed_facts)\n', (6338, 6380), False, 'from sklearn.metrics.pairwise import cosine_distances\n'), ((7127, 7173), 'os.path.join', 'os.path.join', (['output_dir', '"""predictions_df.bin"""'], {}), "(output_dir, 'predictions_df.bin')\n", (7139, 7173), False, 'import os\n'), ((809, 838), 'numpy.vstack', 'np.vstack', (['(old_array, array)'], {}), '((old_array, array))\n', (818, 838), True, 'import numpy as np\n'), ((2173, 2201), 'numpy.argsort', 'np.argsort', (['row.scores_batch'], {}), '(row.scores_batch)\n', (2183, 2201), True, 'import numpy as np\n'), ((3586, 3611), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (3609, 3611), False, 'import warnings\n'), ((3625, 3681), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {'category': 'RuntimeWarning'}), "('ignore', category=RuntimeWarning)\n", (3646, 3681), False, 'import warnings\n'), ((5988, 6042), 'numpy.where', 'np.where', (['(row.partial_expl != self.stop_explanation_id)'], {}), '(row.partial_expl != self.stop_explanation_id)\n', (5996, 6042), True, 'import numpy as np\n'), ((6586, 6625), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'self.qa_feats.index'}), '(index=self.qa_feats.index)\n', (6598, 6625), True, 'import pandas as pd\n'), ((7295, 7323), 'torch.save', 'torch.save', (['self.partials', 'f'], {}), '(self.partials, f)\n', (7305, 7323), False, 'import torch\n'), ((599, 618), 'torch.isnan', 'torch.isnan', (['logits'], {}), '(logits)\n', (610, 618), False, 'import torch\n'), ((4478, 4494), 'numpy.isnan', 'np.isnan', (['scores'], {}), '(scores)\n', (4486, 4494), True, 'import numpy as np\n'), ((5460, 5486), 'numpy.array', 'np.array', (['row.partial_expl'], {}), '(row.partial_expl)\n', (5468, 5486), True, 'import numpy as np\n'), ((6713, 6736), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (6721, 6736), True, 'import numpy as np\n'), ((6790, 6815), 'numpy.array', 'np.array', (['[]'], {'dtype': 'float'}), '([], dtype=float)\n', (6798, 6815), True, 'import numpy as np\n'), ((6863, 6888), 'numpy.array', 'np.array', (['[]'], {'dtype': 'float'}), '([], dtype=float)\n', (6871, 6888), True, 'import numpy as np\n'), ((6940, 6965), 'numpy.array', 'np.array', (['[]'], {'dtype': 'float'}), '([], dtype=float)\n', (6948, 6965), True, 'import numpy as np\n'), ((644, 663), 'torch.isnan', 'torch.isnan', (['logits'], {}), '(logits)\n', (655, 663), False, 'import torch\n'), ((4694, 4710), 'numpy.isnan', 'np.isnan', (['scores'], {}), '(scores)\n', (4702, 4710), True, 'import numpy as np\n'), ((5325, 5343), 'numpy.argsort', 'np.argsort', (['scores'], {}), '(scores)\n', (5335, 5343), True, 'import numpy as np\n'), ((2023, 2049), 'numpy.isnan', 'np.isnan', (['row.scores_batch'], {}), '(row.scores_batch)\n', (2031, 2049), True, 'import numpy as np\n'), ((3776, 3802), 'numpy.nanmean', 'np.nanmean', (['scores'], {'axis': '(0)'}), '(scores, axis=0)\n', (3786, 3802), True, 'import numpy as np\n'), ((5062, 5078), 'numpy.isnan', 'np.isnan', (['scores'], {}), '(scores)\n', (5070, 5078), True, 'import numpy as np\n'), ((5099, 5115), 'numpy.isnan', 'np.isnan', (['scores'], {}), '(scores)\n', (5107, 5115), True, 'import numpy as np\n'), ((2225, 2251), 'numpy.isnan', 'np.isnan', (['row.scores_batch'], {}), '(row.scores_batch)\n', (2233, 2251), True, 'import numpy as np\n'), ((4530, 4546), 'numpy.isnan', 'np.isnan', (['scores'], {}), '(scores)\n', (4538, 4546), True, 'import numpy as np\n'), ((4606, 4622), 'numpy.isnan', 'np.isnan', (['scores'], {}), '(scores)\n', (4614, 4622), True, 'import numpy as np\n'), ((4746, 4762), 'numpy.isnan', 'np.isnan', (['scores'], {}), '(scores)\n', (4754, 4762), True, 'import numpy as np\n'), ((4822, 4838), 'numpy.isnan', 'np.isnan', (['scores'], {}), '(scores)\n', (4830, 4838), True, 'import numpy as np\n'), ((5357, 5373), 'numpy.isnan', 'np.isnan', (['scores'], {}), '(scores)\n', (5365, 5373), True, 'import numpy as np\n'), ((5142, 5158), 'numpy.isnan', 'np.isnan', (['scores'], {}), '(scores)\n', (5150, 5158), True, 'import numpy as np\n'), ((5614, 5632), 'numpy.argsort', 'np.argsort', (['scores'], {}), '(scores)\n', (5624, 5632), True, 'import numpy as np\n'), ((5646, 5662), 'numpy.isnan', 'np.isnan', (['scores'], {}), '(scores)\n', (5654, 5662), True, 'import numpy as np\n')] |
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright Holders: <NAME>, <NAME>, <NAME>
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
import numpy as np
import pytest
from pymor.operators.numpy import NumpyMatrixOperator
from pymor.vectorarrays.numpy import NumpyVectorArray
def random_integers(count, seed):
np.random.seed(seed)
return list(np.random.randint(0, 3200, count))
def numpy_matrix_operator_with_arrays_factory(dim_source, dim_range, count_source, count_range, seed):
np.random.seed(seed)
op = NumpyMatrixOperator(np.random.random((dim_range, dim_source)))
s = NumpyVectorArray(np.random.random((count_source, dim_source)), copy=False)
r = NumpyVectorArray(np.random.random((count_range, dim_range)), copy=False)
return op, None, s, r
numpy_matrix_operator_with_arrays_factory_arguments = \
zip([0, 0, 2, 10], # dim_source
[0, 1, 4, 10], # dim_range
[3, 3, 3, 3], # count_source
[3, 3, 3, 3], # count_range
random_integers(4, 44)) # seed
numpy_matrix_operator_with_arrays_generators = \
[lambda args=args: numpy_matrix_operator_with_arrays_factory(*args)
for args in numpy_matrix_operator_with_arrays_factory_arguments]
numpy_matrix_operator_generators = \
[lambda args=args: numpy_matrix_operator_with_arrays_factory(*args)[0:2]
for args in numpy_matrix_operator_with_arrays_factory_arguments]
def thermalblock_factory(xblocks, yblocks, diameter, seed):
from pymor.analyticalproblems.thermalblock import ThermalBlockProblem
from pymor.discretizers.elliptic import discretize_elliptic_cg
from pymor.functions.basic import GenericFunction
from pymor.operators.cg import InterpolationOperator
p = ThermalBlockProblem((xblocks, yblocks))
d, d_data = discretize_elliptic_cg(p, diameter)
f = GenericFunction(lambda X, mu: X[..., 0]**mu['exp'] + X[..., 1],
dim_domain=2, parameter_type={'exp': tuple()})
iop = InterpolationOperator(d_data['grid'], f)
U = d.operator.source.empty()
V = d.operator.range.empty()
np.random.seed(seed)
for exp in np.random.random(5):
U.append(iop.as_vector(exp))
for exp in np.random.random(6):
V.append(iop.as_vector(exp))
return d.operator, next(d.parameter_space.sample_randomly(1, seed=seed)), U, V, d.h1_product, d.l2_product
def thermalblock_assemble_factory(xblocks, yblocks, diameter, seed):
op, mu, U, V, sp, rp = thermalblock_factory(xblocks, yblocks, diameter, seed)
return op.assemble(mu), None, U, V, sp, rp
def thermalblock_concatenation_factory(xblocks, yblocks, diameter, seed):
from pymor.operators.constructions import Concatenation
op, mu, U, V, sp, rp = thermalblock_factory(xblocks, yblocks, diameter, seed)
op = Concatenation(sp, op)
return op, mu, U, V, sp, rp
def thermalblock_identity_factory(xblocks, yblocks, diameter, seed):
from pymor.operators.constructions import IdentityOperator
_, _, U, V, sp, rp = thermalblock_factory(xblocks, yblocks, diameter, seed)
return IdentityOperator(U.space), None, U, V, sp, rp
def thermalblock_vectorarray_factory(transposed, xblocks, yblocks, diameter, seed):
from pymor.operators.constructions import VectorArrayOperator
_, _, U, V, sp, rp = thermalblock_factory(xblocks, yblocks, diameter, seed)
op = VectorArrayOperator(U, transposed)
if transposed:
U = V
V = NumpyVectorArray(np.random.random((7, op.range.dim)), copy=False)
sp = rp
rp = NumpyMatrixOperator(np.eye(op.range.dim) * 2)
else:
U = NumpyVectorArray(np.random.random((7, op.source.dim)), copy=False)
sp = NumpyMatrixOperator(np.eye(op.source.dim) * 2)
return op, None, U, V, sp, rp
def thermalblock_vector_factory(xblocks, yblocks, diameter, seed):
from pymor.operators.constructions import VectorOperator
_, _, U, V, sp, rp = thermalblock_factory(xblocks, yblocks, diameter, seed)
op = VectorOperator(U.copy(ind=0))
U = NumpyVectorArray(np.random.random((7, 1)), copy=False)
sp = NumpyMatrixOperator(np.eye(1) * 2)
return op, None, U, V, sp, rp
def thermalblock_vectorfunc_factory(product, xblocks, yblocks, diameter, seed):
from pymor.operators.constructions import VectorFunctional
_, _, U, V, sp, rp = thermalblock_factory(xblocks, yblocks, diameter, seed)
op = VectorFunctional(U.copy(ind=0), product=sp if product else None)
U = V
V = NumpyVectorArray(np.random.random((7, 1)), copy=False)
sp = rp
rp = NumpyMatrixOperator(np.eye(1) * 2)
return op, None, U, V, sp, rp
def thermalblock_fixedparam_factory(xblocks, yblocks, diameter, seed):
from pymor.operators.constructions import FixedParameterOperator
op, mu, U, V, sp, rp = thermalblock_factory(xblocks, yblocks, diameter, seed)
return FixedParameterOperator(op, mu=mu), None, U, V, sp, rp
thermalblock_factory_arguments = \
[(2, 2, 1./2., 333),
(1, 1, 1./4., 444)]
thermalblock_operator_generators = \
[lambda args=args: thermalblock_factory(*args)[0:2] for args in thermalblock_factory_arguments]
thermalblock_operator_with_arrays_generators = \
[lambda args=args: thermalblock_factory(*args)[0:4] for args in thermalblock_factory_arguments]
thermalblock_operator_with_arrays_and_products_generators = \
[lambda args=args: thermalblock_factory(*args) for args in thermalblock_factory_arguments]
thermalblock_assemble_operator_generators = \
[lambda args=args: thermalblock_assemble_factory(*args)[0:2] for args in thermalblock_factory_arguments]
thermalblock_assemble_operator_with_arrays_generators = \
[lambda args=args: thermalblock_assemble_factory(*args)[0:4] for args in thermalblock_factory_arguments]
thermalblock_assemble_operator_with_arrays_and_products_generators = \
[lambda args=args: thermalblock_assemble_factory(*args) for args in thermalblock_factory_arguments]
thermalblock_concatenation_operator_generators = \
[lambda args=args: thermalblock_concatenation_factory(*args)[0:2] for args in thermalblock_factory_arguments]
thermalblock_concatenation_operator_with_arrays_generators = \
[lambda args=args: thermalblock_concatenation_factory(*args)[0:4] for args in thermalblock_factory_arguments]
thermalblock_concatenation_operator_with_arrays_and_products_generators = \
[lambda args=args: thermalblock_concatenation_factory(*args) for args in thermalblock_factory_arguments]
thermalblock_identity_operator_generators = \
[lambda args=args: thermalblock_identity_factory(*args)[0:2] for args in thermalblock_factory_arguments]
thermalblock_identity_operator_with_arrays_generators = \
[lambda args=args: thermalblock_identity_factory(*args)[0:4] for args in thermalblock_factory_arguments]
thermalblock_identity_operator_with_arrays_and_products_generators = \
[lambda args=args: thermalblock_identity_factory(*args) for args in thermalblock_factory_arguments]
thermalblock_vectorarray_operator_generators = \
[lambda args=args: thermalblock_vectorarray_factory(False, *args)[0:2] for args in thermalblock_factory_arguments] + \
[lambda args=args: thermalblock_vectorarray_factory(True, *args)[0:2] for args in thermalblock_factory_arguments]
thermalblock_vectorarray_operator_with_arrays_generators = \
[lambda args=args: thermalblock_vectorarray_factory(False, *args)[0:4] for args in thermalblock_factory_arguments] + \
[lambda args=args: thermalblock_vectorarray_factory(True, *args)[0:4] for args in thermalblock_factory_arguments]
thermalblock_vectorarray_operator_with_arrays_and_products_generators = \
[lambda args=args: thermalblock_vectorarray_factory(False, *args) for args in thermalblock_factory_arguments] + \
[lambda args=args: thermalblock_vectorarray_factory(True, *args) for args in thermalblock_factory_arguments]
thermalblock_vector_operator_generators = \
[lambda args=args: thermalblock_vector_factory(*args)[0:2] for args in thermalblock_factory_arguments]
thermalblock_vector_operator_with_arrays_generators = \
[lambda args=args: thermalblock_vector_factory(*args)[0:4] for args in thermalblock_factory_arguments]
thermalblock_vector_operator_with_arrays_and_products_generators = \
[lambda args=args: thermalblock_vector_factory(*args) for args in thermalblock_factory_arguments]
thermalblock_vectorfunc_operator_generators = \
[lambda args=args: thermalblock_vectorfunc_factory(False, *args)[0:2] for args in thermalblock_factory_arguments] + \
[lambda args=args: thermalblock_vectorfunc_factory(True, *args)[0:2] for args in thermalblock_factory_arguments]
thermalblock_vectorfunc_operator_with_arrays_generators = \
[lambda args=args: thermalblock_vectorfunc_factory(False, *args)[0:4] for args in thermalblock_factory_arguments] + \
[lambda args=args: thermalblock_vectorfunc_factory(True, *args)[0:4] for args in thermalblock_factory_arguments]
thermalblock_vectorfunc_operator_with_arrays_and_products_generators = \
[lambda args=args: thermalblock_vectorfunc_factory(False, *args) for args in thermalblock_factory_arguments] + \
[lambda args=args: thermalblock_vectorfunc_factory(True, *args) for args in thermalblock_factory_arguments]
thermalblock_fixedparam_operator_generators = \
[lambda args=args: thermalblock_fixedparam_factory(*args)[0:2] for args in thermalblock_factory_arguments]
thermalblock_fixedparam_operator_with_arrays_generators = \
[lambda args=args: thermalblock_fixedparam_factory(*args)[0:4] for args in thermalblock_factory_arguments]
thermalblock_fixedparam_operator_with_arrays_and_products_generators = \
[lambda args=args: thermalblock_fixedparam_factory(*args) for args in thermalblock_factory_arguments]
@pytest.fixture(params=thermalblock_operator_with_arrays_and_products_generators +
thermalblock_assemble_operator_with_arrays_and_products_generators +
thermalblock_concatenation_operator_with_arrays_and_products_generators +
thermalblock_identity_operator_with_arrays_and_products_generators +
thermalblock_vectorarray_operator_with_arrays_and_products_generators +
thermalblock_vector_operator_with_arrays_and_products_generators +
thermalblock_vectorfunc_operator_with_arrays_and_products_generators +
thermalblock_fixedparam_operator_with_arrays_and_products_generators)
def operator_with_arrays_and_products(request):
return request.param()
@pytest.fixture(params=numpy_matrix_operator_with_arrays_generators +
thermalblock_operator_with_arrays_generators +
thermalblock_assemble_operator_with_arrays_generators +
thermalblock_concatenation_operator_with_arrays_generators +
thermalblock_identity_operator_with_arrays_generators +
thermalblock_vectorarray_operator_with_arrays_generators +
thermalblock_vector_operator_with_arrays_generators +
thermalblock_vectorfunc_operator_with_arrays_generators +
thermalblock_fixedparam_operator_with_arrays_generators)
def operator_with_arrays(request):
return request.param()
@pytest.fixture(params=numpy_matrix_operator_generators +
thermalblock_operator_generators +
thermalblock_assemble_operator_generators +
thermalblock_concatenation_operator_generators +
thermalblock_identity_operator_generators +
thermalblock_vectorarray_operator_generators +
thermalblock_vector_operator_generators +
thermalblock_vectorfunc_operator_generators +
thermalblock_fixedparam_operator_generators)
def operator(request):
return request.param()
| [
"numpy.eye",
"pymor.operators.constructions.Concatenation",
"numpy.random.random",
"pymor.operators.cg.InterpolationOperator",
"pymor.operators.constructions.FixedParameterOperator",
"pymor.discretizers.elliptic.discretize_elliptic_cg",
"numpy.random.randint",
"numpy.random.seed",
"pytest.fixture",
... | [((9939, 10545), 'pytest.fixture', 'pytest.fixture', ([], {'params': '(thermalblock_operator_with_arrays_and_products_generators +\n thermalblock_assemble_operator_with_arrays_and_products_generators +\n thermalblock_concatenation_operator_with_arrays_and_products_generators +\n thermalblock_identity_operator_with_arrays_and_products_generators +\n thermalblock_vectorarray_operator_with_arrays_and_products_generators +\n thermalblock_vector_operator_with_arrays_and_products_generators +\n thermalblock_vectorfunc_operator_with_arrays_and_products_generators +\n thermalblock_fixedparam_operator_with_arrays_and_products_generators)'}), '(params=\n thermalblock_operator_with_arrays_and_products_generators +\n thermalblock_assemble_operator_with_arrays_and_products_generators +\n thermalblock_concatenation_operator_with_arrays_and_products_generators +\n thermalblock_identity_operator_with_arrays_and_products_generators +\n thermalblock_vectorarray_operator_with_arrays_and_products_generators +\n thermalblock_vector_operator_with_arrays_and_products_generators +\n thermalblock_vectorfunc_operator_with_arrays_and_products_generators +\n thermalblock_fixedparam_operator_with_arrays_and_products_generators)\n', (9953, 10545), False, 'import pytest\n'), ((10752, 11300), 'pytest.fixture', 'pytest.fixture', ([], {'params': '(numpy_matrix_operator_with_arrays_generators +\n thermalblock_operator_with_arrays_generators +\n thermalblock_assemble_operator_with_arrays_generators +\n thermalblock_concatenation_operator_with_arrays_generators +\n thermalblock_identity_operator_with_arrays_generators +\n thermalblock_vectorarray_operator_with_arrays_generators +\n thermalblock_vector_operator_with_arrays_generators +\n thermalblock_vectorfunc_operator_with_arrays_generators +\n thermalblock_fixedparam_operator_with_arrays_generators)'}), '(params=numpy_matrix_operator_with_arrays_generators +\n thermalblock_operator_with_arrays_generators +\n thermalblock_assemble_operator_with_arrays_generators +\n thermalblock_concatenation_operator_with_arrays_generators +\n thermalblock_identity_operator_with_arrays_generators +\n thermalblock_vectorarray_operator_with_arrays_generators +\n thermalblock_vector_operator_with_arrays_generators +\n thermalblock_vectorfunc_operator_with_arrays_generators +\n thermalblock_fixedparam_operator_with_arrays_generators)\n', (10766, 11300), False, 'import pytest\n'), ((11518, 11958), 'pytest.fixture', 'pytest.fixture', ([], {'params': '(numpy_matrix_operator_generators + thermalblock_operator_generators +\n thermalblock_assemble_operator_generators +\n thermalblock_concatenation_operator_generators +\n thermalblock_identity_operator_generators +\n thermalblock_vectorarray_operator_generators +\n thermalblock_vector_operator_generators +\n thermalblock_vectorfunc_operator_generators +\n thermalblock_fixedparam_operator_generators)'}), '(params=numpy_matrix_operator_generators +\n thermalblock_operator_generators +\n thermalblock_assemble_operator_generators +\n thermalblock_concatenation_operator_generators +\n thermalblock_identity_operator_generators +\n thermalblock_vectorarray_operator_generators +\n thermalblock_vector_operator_generators +\n thermalblock_vectorfunc_operator_generators +\n thermalblock_fixedparam_operator_generators)\n', (11532, 11958), False, 'import pytest\n'), ((436, 456), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (450, 456), True, 'import numpy as np\n'), ((617, 637), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (631, 637), True, 'import numpy as np\n'), ((1885, 1924), 'pymor.analyticalproblems.thermalblock.ThermalBlockProblem', 'ThermalBlockProblem', (['(xblocks, yblocks)'], {}), '((xblocks, yblocks))\n', (1904, 1924), False, 'from pymor.analyticalproblems.thermalblock import ThermalBlockProblem\n'), ((1941, 1976), 'pymor.discretizers.elliptic.discretize_elliptic_cg', 'discretize_elliptic_cg', (['p', 'diameter'], {}), '(p, diameter)\n', (1963, 1976), False, 'from pymor.discretizers.elliptic import discretize_elliptic_cg\n'), ((2130, 2170), 'pymor.operators.cg.InterpolationOperator', 'InterpolationOperator', (["d_data['grid']", 'f'], {}), "(d_data['grid'], f)\n", (2151, 2170), False, 'from pymor.operators.cg import InterpolationOperator\n'), ((2242, 2262), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2256, 2262), True, 'import numpy as np\n'), ((2278, 2297), 'numpy.random.random', 'np.random.random', (['(5)'], {}), '(5)\n', (2294, 2297), True, 'import numpy as np\n'), ((2351, 2370), 'numpy.random.random', 'np.random.random', (['(6)'], {}), '(6)\n', (2367, 2370), True, 'import numpy as np\n'), ((2947, 2968), 'pymor.operators.constructions.Concatenation', 'Concatenation', (['sp', 'op'], {}), '(sp, op)\n', (2960, 2968), False, 'from pymor.operators.constructions import Concatenation\n'), ((3513, 3547), 'pymor.operators.constructions.VectorArrayOperator', 'VectorArrayOperator', (['U', 'transposed'], {}), '(U, transposed)\n', (3532, 3547), False, 'from pymor.operators.constructions import VectorArrayOperator\n'), ((473, 506), 'numpy.random.randint', 'np.random.randint', (['(0)', '(3200)', 'count'], {}), '(0, 3200, count)\n', (490, 506), True, 'import numpy as np\n'), ((667, 708), 'numpy.random.random', 'np.random.random', (['(dim_range, dim_source)'], {}), '((dim_range, dim_source))\n', (683, 708), True, 'import numpy as np\n'), ((735, 779), 'numpy.random.random', 'np.random.random', (['(count_source, dim_source)'], {}), '((count_source, dim_source))\n', (751, 779), True, 'import numpy as np\n'), ((818, 860), 'numpy.random.random', 'np.random.random', (['(count_range, dim_range)'], {}), '((count_range, dim_range))\n', (834, 860), True, 'import numpy as np\n'), ((3226, 3251), 'pymor.operators.constructions.IdentityOperator', 'IdentityOperator', (['U.space'], {}), '(U.space)\n', (3242, 3251), False, 'from pymor.operators.constructions import IdentityOperator\n'), ((4191, 4215), 'numpy.random.random', 'np.random.random', (['(7, 1)'], {}), '((7, 1))\n', (4207, 4215), True, 'import numpy as np\n'), ((4641, 4665), 'numpy.random.random', 'np.random.random', (['(7, 1)'], {}), '((7, 1))\n', (4657, 4665), True, 'import numpy as np\n'), ((5004, 5037), 'pymor.operators.constructions.FixedParameterOperator', 'FixedParameterOperator', (['op'], {'mu': 'mu'}), '(op, mu=mu)\n', (5026, 5037), False, 'from pymor.operators.constructions import FixedParameterOperator\n'), ((3610, 3645), 'numpy.random.random', 'np.random.random', (['(7, op.range.dim)'], {}), '((7, op.range.dim))\n', (3626, 3645), True, 'import numpy as np\n'), ((3773, 3809), 'numpy.random.random', 'np.random.random', (['(7, op.source.dim)'], {}), '((7, op.source.dim))\n', (3789, 3809), True, 'import numpy as np\n'), ((4258, 4267), 'numpy.eye', 'np.eye', (['(1)'], {}), '(1)\n', (4264, 4267), True, 'import numpy as np\n'), ((4720, 4729), 'numpy.eye', 'np.eye', (['(1)'], {}), '(1)\n', (4726, 4729), True, 'import numpy as np\n'), ((3708, 3728), 'numpy.eye', 'np.eye', (['op.range.dim'], {}), '(op.range.dim)\n', (3714, 3728), True, 'import numpy as np\n'), ((3856, 3877), 'numpy.eye', 'np.eye', (['op.source.dim'], {}), '(op.source.dim)\n', (3862, 3877), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 11 23:41:29 2021
"""
from PyOECP import References
from PyOECP import Transform
import matplotlib.pyplot as plt
import numpy as np
''' Example 1 Methanol
This script tries to convert the reflection coefficients from VNAs.
The data files and VNAs are as follows.
VNA
Short: LS11Short.csv
Open: LS11Open.csv
Acetone: LS11Acetone.csv
Water: LS11Water.csv
Methanol: LS11Methanol.csv
'''
''' 1.1 Low Frequency Data '''
T = 25
address = 'data/low/'
S11r0 = References.Parser(address + 'S11Short.csv')
S11r1 = References.Parser(address + 'S11Open.csv')
S11r2 = References.Parser(address + 'S11Water.csv')
S11r3 = References.Parser(address + 'S11Acetone.csv')
S11m = References.Parser(address + 'S11Methanol.csv')
frequency = S11r1[:,0]
TransformModel = Transform.Marsland(frequency,S11m,S11r0,S11r1,S11r2,S11r3,
m2='Open',m3='Water_Kaatze',m4='Acetone_Onimisi',temperature=T,
Window=81,concentrations=[None,None,None,None])
MarslandE = TransformModel.Calculate()
spacing = 10
TransformModel1 = Transform.Stuchly(frequency,S11m,S11r0,S11r1,S11r2,
m1='Short',m2='Open',m3='Water_Kaatze',Window=51)
StuchlyE = TransformModel1.Calculate()
Komarov = Transform.Komarov(frequency, S11m, S11r1, S11r2, S11r3,
'Open','Water_Kaatze','Acetone_Onimisi',
1,3.8,2.1+0*1j,M=50,Window=51)
KomarovE = Komarov.epsilon
fig, (ax1,ax2) = plt.subplots(2,1)
fig.set_size_inches((5,8))
fig.set_dpi(300)
font = {'size':15}
plt.rc('font', **font)
plt.rcParams['font.family'] = 'serif'
'''Let's visualize the data.'''
ax1.semilogx(frequency[::spacing],np.real(MarslandE)[::spacing],'o',
markerfacecolor='None',markeredgecolor='red',
markeredgewidth=1.0,markersize=7,label="$\epsilon'$ (Marsland)")
ax1.semilogx(frequency[::spacing],-np.imag(MarslandE)[::spacing],'o',
markerfacecolor='None',markeredgecolor='blue',
markeredgewidth=1.0,markersize=7,label="$\epsilon''$ (Marsland)")
ax1.semilogx(frequency[::spacing],np.real(StuchlyE)[::spacing],'s',
markerfacecolor='None',markeredgecolor='red',
markeredgewidth=1.0,markersize=7,label="$\epsilon'$ (Stuchly)")
ax1.semilogx(frequency[::spacing],-np.imag(StuchlyE)[::spacing],'s',
markerfacecolor='None',markeredgecolor='blue',
markeredgewidth=1.0,markersize=7,label="$\epsilon'$ (Stuchly)")
ax1.semilogx(frequency[::spacing],np.real(KomarovE)[::spacing],'^',
markerfacecolor='None',markeredgecolor='red',
markeredgewidth=1.0,markersize=7,label="$\epsilon'$ (Komarov)")
ax1.semilogx(frequency[::spacing],-np.imag(KomarovE)[::spacing],'^',
markerfacecolor='None',markeredgecolor='blue',
markeredgewidth=1.0,markersize=7,label="$\epsilon'$ (Komarov)")
Theoretical = References.Methanol_Barthel(frequency,temperature=T)['epsilon']
ax1.semilogx(frequency,np.real(Theoretical),color='red',linewidth=1.0,label="$\epsilon'$ (Literature)")
ax1.semilogx(frequency,-np.imag(Theoretical),'--',color='blue',linewidth=1.0,label="$\epsilon''$ (Literature)")
ax1.set_ylabel("$\epsilon$")
ax1.set_ylim([0,50])
ax1.legend(loc='upper right', ncol=2, fontsize='xx-small',edgecolor='k')
ax1.text(-0.25,1,'(a)',transform=ax1.transAxes)
''' 1.2 High Frequency Data '''
address = 'data/high/'
S11r0 = References.Parser(address + 'S11Short.csv')
S11r1 = References.Parser(address + 'S11Open.csv')
S11r2 = References.Parser(address + 'S11Water.csv')
S11r3 = References.Parser(address + 'S11Acetone.csv')
S11m = References.Parser(address + 'S11Methanol.csv')
frequency = S11r1[:,0]
TransformModel = Transform.Marsland(frequency,S11m,S11r0,S11r1,S11r2,S11r3,
m2='Open',m3='Water_Kaatze',m4='Acetone_Onimisi',temperature=T,
Window=101,concentrations=[None,None,None,None])
MarslandE = TransformModel.Calculate()
spacing = 10
TransformModel1 = Transform.Stuchly(frequency,S11m,S11r0,S11r1,S11r2,
m1='Short',m2='Open',m3='Water_Kaatze',Window=51)
StuchlyE = TransformModel1.Calculate()
Komarov = Transform.Komarov(frequency, S11m, S11r1, S11r3, S11r2,
'Open','Acetone_Onimisi','Water_Kaatze',
0.3,0.8,2.1+0*1j,M=50,Window=51)
KomarovE = Komarov.epsilon
Theoretical = References.Methanol_Barthel(frequency,temperature=T)['epsilon']
'''Let's visualize the data.'''
ax2.semilogx(frequency[::spacing],np.real(MarslandE)[::spacing],'o',
markerfacecolor='None',markeredgecolor='red',
markeredgewidth=1.0,markersize=7,label="$\epsilon'$ (Marsland)")
ax2.semilogx(frequency[::spacing],-np.imag(MarslandE)[::spacing],'o',
markerfacecolor='None',markeredgecolor='blue',
markeredgewidth=1.0,markersize=7,label="$\epsilon''$ (Marsland)")
ax2.semilogx(frequency[::spacing],np.real(StuchlyE)[::spacing],'s',
markerfacecolor='None',markeredgecolor='red',
markeredgewidth=1.0,markersize=7,label="$\epsilon'$ (Stuchly)")
ax2.semilogx(frequency[::spacing],-np.imag(StuchlyE)[::spacing],'s',
markerfacecolor='None',markeredgecolor='blue',
markeredgewidth=1.0,markersize=7,label="$\epsilon'$ (Stuchly)")
ax2.semilogx(frequency[::spacing],np.real(KomarovE)[::spacing],'^',
markerfacecolor='None',markeredgecolor='red',
markeredgewidth=1.0,markersize=7,label="$\epsilon'$ (Komarov)")
ax2.semilogx(frequency[::spacing],-np.imag(KomarovE)[::spacing],'^',
markerfacecolor='None',markeredgecolor='blue',
markeredgewidth=1.0,markersize=7,label="$\epsilon'$ (Komarov)")
ax2.semilogx(frequency,np.real(Theoretical),color='red',linewidth=1.0,label="$\epsilon'$ (Literature)")
ax2.semilogx(frequency,-np.imag(Theoretical),'--',color='blue',linewidth=1.0,label="$\epsilon''$ (Literature)")
ax2.set_xlabel("frequency [Hz]")
ax2.set_ylabel("$\epsilon$")
ax2.set_ylim([0,50])
ax2.legend(loc='upper right', ncol=2, fontsize='xx-small',edgecolor='k')
ax2.text(-0.25,1,'(b)',transform=ax2.transAxes)
plt.savefig('Figure3.pdf',dpi=300)
| [
"PyOECP.References.Methanol_Barthel",
"matplotlib.pyplot.savefig",
"PyOECP.Transform.Stuchly",
"numpy.imag",
"PyOECP.References.Parser",
"PyOECP.Transform.Marsland",
"numpy.real",
"PyOECP.Transform.Komarov",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.rc"
] | [((527, 570), 'PyOECP.References.Parser', 'References.Parser', (["(address + 'S11Short.csv')"], {}), "(address + 'S11Short.csv')\n", (544, 570), False, 'from PyOECP import References\n'), ((579, 621), 'PyOECP.References.Parser', 'References.Parser', (["(address + 'S11Open.csv')"], {}), "(address + 'S11Open.csv')\n", (596, 621), False, 'from PyOECP import References\n'), ((630, 673), 'PyOECP.References.Parser', 'References.Parser', (["(address + 'S11Water.csv')"], {}), "(address + 'S11Water.csv')\n", (647, 673), False, 'from PyOECP import References\n'), ((682, 727), 'PyOECP.References.Parser', 'References.Parser', (["(address + 'S11Acetone.csv')"], {}), "(address + 'S11Acetone.csv')\n", (699, 727), False, 'from PyOECP import References\n'), ((735, 781), 'PyOECP.References.Parser', 'References.Parser', (["(address + 'S11Methanol.csv')"], {}), "(address + 'S11Methanol.csv')\n", (752, 781), False, 'from PyOECP import References\n'), ((824, 1014), 'PyOECP.Transform.Marsland', 'Transform.Marsland', (['frequency', 'S11m', 'S11r0', 'S11r1', 'S11r2', 'S11r3'], {'m2': '"""Open"""', 'm3': '"""Water_Kaatze"""', 'm4': '"""Acetone_Onimisi"""', 'temperature': 'T', 'Window': '(81)', 'concentrations': '[None, None, None, None]'}), "(frequency, S11m, S11r0, S11r1, S11r2, S11r3, m2='Open',\n m3='Water_Kaatze', m4='Acetone_Onimisi', temperature=T, Window=81,\n concentrations=[None, None, None, None])\n", (842, 1014), False, 'from PyOECP import Transform\n'), ((1138, 1251), 'PyOECP.Transform.Stuchly', 'Transform.Stuchly', (['frequency', 'S11m', 'S11r0', 'S11r1', 'S11r2'], {'m1': '"""Short"""', 'm2': '"""Open"""', 'm3': '"""Water_Kaatze"""', 'Window': '(51)'}), "(frequency, S11m, S11r0, S11r1, S11r2, m1='Short', m2=\n 'Open', m3='Water_Kaatze', Window=51)\n", (1155, 1251), False, 'from PyOECP import Transform\n'), ((1331, 1474), 'PyOECP.Transform.Komarov', 'Transform.Komarov', (['frequency', 'S11m', 'S11r1', 'S11r2', 'S11r3', '"""Open"""', '"""Water_Kaatze"""', '"""Acetone_Onimisi"""', '(1)', '(3.8)', '(2.1 + 0 * 1.0j)'], {'M': '(50)', 'Window': '(51)'}), "(frequency, S11m, S11r1, S11r2, S11r3, 'Open',\n 'Water_Kaatze', 'Acetone_Onimisi', 1, 3.8, 2.1 + 0 * 1.0j, M=50, Window=51)\n", (1348, 1474), False, 'from PyOECP import Transform\n'), ((1561, 1579), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {}), '(2, 1)\n', (1573, 1579), True, 'import matplotlib.pyplot as plt\n'), ((1642, 1664), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {}), "('font', **font)\n", (1648, 1664), True, 'import matplotlib.pyplot as plt\n'), ((3504, 3547), 'PyOECP.References.Parser', 'References.Parser', (["(address + 'S11Short.csv')"], {}), "(address + 'S11Short.csv')\n", (3521, 3547), False, 'from PyOECP import References\n'), ((3556, 3598), 'PyOECP.References.Parser', 'References.Parser', (["(address + 'S11Open.csv')"], {}), "(address + 'S11Open.csv')\n", (3573, 3598), False, 'from PyOECP import References\n'), ((3607, 3650), 'PyOECP.References.Parser', 'References.Parser', (["(address + 'S11Water.csv')"], {}), "(address + 'S11Water.csv')\n", (3624, 3650), False, 'from PyOECP import References\n'), ((3659, 3704), 'PyOECP.References.Parser', 'References.Parser', (["(address + 'S11Acetone.csv')"], {}), "(address + 'S11Acetone.csv')\n", (3676, 3704), False, 'from PyOECP import References\n'), ((3712, 3758), 'PyOECP.References.Parser', 'References.Parser', (["(address + 'S11Methanol.csv')"], {}), "(address + 'S11Methanol.csv')\n", (3729, 3758), False, 'from PyOECP import References\n'), ((3801, 3992), 'PyOECP.Transform.Marsland', 'Transform.Marsland', (['frequency', 'S11m', 'S11r0', 'S11r1', 'S11r2', 'S11r3'], {'m2': '"""Open"""', 'm3': '"""Water_Kaatze"""', 'm4': '"""Acetone_Onimisi"""', 'temperature': 'T', 'Window': '(101)', 'concentrations': '[None, None, None, None]'}), "(frequency, S11m, S11r0, S11r1, S11r2, S11r3, m2='Open',\n m3='Water_Kaatze', m4='Acetone_Onimisi', temperature=T, Window=101,\n concentrations=[None, None, None, None])\n", (3819, 3992), False, 'from PyOECP import Transform\n'), ((4116, 4229), 'PyOECP.Transform.Stuchly', 'Transform.Stuchly', (['frequency', 'S11m', 'S11r0', 'S11r1', 'S11r2'], {'m1': '"""Short"""', 'm2': '"""Open"""', 'm3': '"""Water_Kaatze"""', 'Window': '(51)'}), "(frequency, S11m, S11r0, S11r1, S11r2, m1='Short', m2=\n 'Open', m3='Water_Kaatze', Window=51)\n", (4133, 4229), False, 'from PyOECP import Transform\n'), ((4309, 4458), 'PyOECP.Transform.Komarov', 'Transform.Komarov', (['frequency', 'S11m', 'S11r1', 'S11r3', 'S11r2', '"""Open"""', '"""Acetone_Onimisi"""', '"""Water_Kaatze"""', '(0.3)', '(0.8)', '(2.1 + 0 * 1.0j)'], {'M': '(50)', 'Window': '(51)'}), "(frequency, S11m, S11r1, S11r3, S11r2, 'Open',\n 'Acetone_Onimisi', 'Water_Kaatze', 0.3, 0.8, 2.1 + 0 * 1.0j, M=50,\n Window=51)\n", (4326, 4458), False, 'from PyOECP import Transform\n'), ((6295, 6330), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Figure3.pdf"""'], {'dpi': '(300)'}), "('Figure3.pdf', dpi=300)\n", (6306, 6330), True, 'import matplotlib.pyplot as plt\n'), ((2988, 3041), 'PyOECP.References.Methanol_Barthel', 'References.Methanol_Barthel', (['frequency'], {'temperature': 'T'}), '(frequency, temperature=T)\n', (3015, 3041), False, 'from PyOECP import References\n'), ((3075, 3095), 'numpy.real', 'np.real', (['Theoretical'], {}), '(Theoretical)\n', (3082, 3095), True, 'import numpy as np\n'), ((4538, 4591), 'PyOECP.References.Methanol_Barthel', 'References.Methanol_Barthel', (['frequency'], {'temperature': 'T'}), '(frequency, temperature=T)\n', (4565, 4591), False, 'from PyOECP import References\n'), ((5896, 5916), 'numpy.real', 'np.real', (['Theoretical'], {}), '(Theoretical)\n', (5903, 5916), True, 'import numpy as np\n'), ((1770, 1788), 'numpy.real', 'np.real', (['MarslandE'], {}), '(MarslandE)\n', (1777, 1788), True, 'import numpy as np\n'), ((2186, 2203), 'numpy.real', 'np.real', (['StuchlyE'], {}), '(StuchlyE)\n', (2193, 2203), True, 'import numpy as np\n'), ((2597, 2614), 'numpy.real', 'np.real', (['KomarovE'], {}), '(KomarovE)\n', (2604, 2614), True, 'import numpy as np\n'), ((3180, 3200), 'numpy.imag', 'np.imag', (['Theoretical'], {}), '(Theoretical)\n', (3187, 3200), True, 'import numpy as np\n'), ((4669, 4687), 'numpy.real', 'np.real', (['MarslandE'], {}), '(MarslandE)\n', (4676, 4687), True, 'import numpy as np\n'), ((5085, 5102), 'numpy.real', 'np.real', (['StuchlyE'], {}), '(StuchlyE)\n', (5092, 5102), True, 'import numpy as np\n'), ((5496, 5513), 'numpy.real', 'np.real', (['KomarovE'], {}), '(KomarovE)\n', (5503, 5513), True, 'import numpy as np\n'), ((6001, 6021), 'numpy.imag', 'np.imag', (['Theoretical'], {}), '(Theoretical)\n', (6008, 6021), True, 'import numpy as np\n'), ((1977, 1995), 'numpy.imag', 'np.imag', (['MarslandE'], {}), '(MarslandE)\n', (1984, 1995), True, 'import numpy as np\n'), ((2391, 2408), 'numpy.imag', 'np.imag', (['StuchlyE'], {}), '(StuchlyE)\n', (2398, 2408), True, 'import numpy as np\n'), ((2802, 2819), 'numpy.imag', 'np.imag', (['KomarovE'], {}), '(KomarovE)\n', (2809, 2819), True, 'import numpy as np\n'), ((4876, 4894), 'numpy.imag', 'np.imag', (['MarslandE'], {}), '(MarslandE)\n', (4883, 4894), True, 'import numpy as np\n'), ((5290, 5307), 'numpy.imag', 'np.imag', (['StuchlyE'], {}), '(StuchlyE)\n', (5297, 5307), True, 'import numpy as np\n'), ((5701, 5718), 'numpy.imag', 'np.imag', (['KomarovE'], {}), '(KomarovE)\n', (5708, 5718), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.