code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import numpy as np
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
import torch.optim as optim
import sys
from data_utils import *
from AIWAE_models import *
from sys import exit
import argparse
import time
import bisect
## parameter parser
parser = argparse.ArgumentParser(description="Annealed Importance Weighted Auto-Encoder")
parser.add_argument("--dataset", type = str,
required = True)
parser.add_argument("--hidden_size", type = int, default = 50)
parser.add_argument("--num_samples", type = int,
required = True,
help = """num of samples used in Monte Carlo estimate of
ELBO when using VAE; num of samples used in
importance weighted ELBO when using IWAE.""")
parser.add_argument("--repeat", type = int)
## parse parameters
args = parser.parse_args()
hidden_size = args.hidden_size
num_samples = args.num_samples
repeat = args.repeat
## read data
if args.dataset == "MNIST":
with open("./data/MNIST.pkl", 'rb') as file_handle:
data = pickle.load(file_handle)
train_image = data['train_image']
test_image = data['test_image']
train_data = MNIST_Dataset(train_image)
test_data = MNIST_Dataset(test_image)
elif args.dataset == "Omniglot":
with open("./data/Omniglot.pkl", 'rb') as file_handle:
data = pickle.load(file_handle)
train_image = data['train_image']
test_image = data['test_image']
train_data = OMNIGLOT_Dataset(train_image)
test_data = OMNIGLOT_Dataset(test_image)
else:
raise("Dataset is wrong!")
batch_size = 20
train_data_loader = DataLoader(train_data,
batch_size = batch_size,
shuffle = True)
test_data_loader = DataLoader(test_data, batch_size = batch_size)
## IWAE models
hidden_size = args.hidden_size
input_size = train_image.shape[-1]
output_size = train_image.shape[-1]
aiwae = AIWAE(input_size, hidden_size)
aiwae = aiwae.cuda()
## optimizer
optimizer = optim.Adam(aiwae.parameters(), lr = 0.001, eps = 1e-4)
lambda_lr = lambda epoch : 10**(-epoch/7.0)
scheduler_lr = optim.lr_scheduler.LambdaLR(optimizer, lambda_lr)
num_epoch = 3280
idx_epoch = 0
def calc_lr_idx(idx_epoch):
count = [3**i for i in range(8)]
count = np.cumsum(count)
return bisect.bisect(count, idx_epoch)
while idx_epoch < num_epoch:
lr_idx = calc_lr_idx(idx_epoch)
scheduler_lr.step(lr_idx)
for idx_step, data in enumerate(train_data_loader):
data = data.float()
x = data.cuda()
###### train decoder
if num_samples == 1:
loss = aiwae.encoder_loss(x)
else:
loss = aiwae.encoder_loss_multiple_samples(x, num_samples)
loss = torch.mean(loss)
optimizer.zero_grad()
loss.backward()
optimizer.step()
elbo = aiwae.calc_elbo(x, num_samples)
elbo = torch.mean(elbo)
print("epoch: {:>3d}, step: {:>5d}, loss: {:.3f}, elbo: {:.3f}, lr: {:.5f}".format(
idx_epoch, idx_step, loss.item(), elbo.item(), optimizer.param_groups[0]['lr']), flush = True)
# if idx_step >= 19:
# print("time used: {:.2f}".format(time.time() - start_time))
# exit()
if np.isnan(loss.item()):
model_state_dict = torch.load("./output/model/IWAE_dataset_{}_num_samples_{}_repeat_{}_restart.pt".format(args.dataset, num_samples, repeat))
aiwae.load_state_dict(model_state_dict['state_dict'])
optimizer.load_state_dict(model_state_dict['optimizer_state_dict'])
print("restart because of nan")
continue
torch.save({'state_dict': aiwae.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'args': args},
"./output/model/IWAE_dataset_{}_num_samples_{}_repeat_{}_restart.pt".format(args.dataset, num_samples, repeat))
if (idx_epoch + 1) % 10 == 0:
torch.save({'state_dict': aiwae.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'args': args},
"./output/model/IWAE_dataset_{}_num_samples_{}_epoch_{}_repeat_{}.pt".format(args.dataset, num_samples, idx_epoch, repeat))
idx_epoch += 1
torch.save({'state_dict': aiwae.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'args': args},
"./output/model/IWAE_dataset_{}_num_samples_{}_epoch_{}_repeat_{}.pt".format(args.dataset, num_samples, idx_epoch, repeat))
exit()
| [
"torch.optim.lr_scheduler.LambdaLR",
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"torch.mean",
"pickle.load",
"bisect.bisect",
"sys.exit",
"numpy.cumsum"
] | [((333, 418), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Annealed Importance Weighted Auto-Encoder"""'}), "(description='Annealed Importance Weighted Auto-Encoder'\n )\n", (356, 418), False, 'import argparse\n'), ((1763, 1822), 'torch.utils.data.DataLoader', 'DataLoader', (['train_data'], {'batch_size': 'batch_size', 'shuffle': '(True)'}), '(train_data, batch_size=batch_size, shuffle=True)\n', (1773, 1822), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((1908, 1952), 'torch.utils.data.DataLoader', 'DataLoader', (['test_data'], {'batch_size': 'batch_size'}), '(test_data, batch_size=batch_size)\n', (1918, 1952), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((2274, 2323), 'torch.optim.lr_scheduler.LambdaLR', 'optim.lr_scheduler.LambdaLR', (['optimizer', 'lambda_lr'], {}), '(optimizer, lambda_lr)\n', (2301, 2323), True, 'import torch.optim as optim\n'), ((4730, 4736), 'sys.exit', 'exit', ([], {}), '()\n', (4734, 4736), False, 'from sys import exit\n'), ((2434, 2450), 'numpy.cumsum', 'np.cumsum', (['count'], {}), '(count)\n', (2443, 2450), True, 'import numpy as np\n'), ((2462, 2493), 'bisect.bisect', 'bisect.bisect', (['count', 'idx_epoch'], {}), '(count, idx_epoch)\n', (2475, 2493), False, 'import bisect\n'), ((1162, 1186), 'pickle.load', 'pickle.load', (['file_handle'], {}), '(file_handle)\n', (1173, 1186), False, 'import pickle\n'), ((2920, 2936), 'torch.mean', 'torch.mean', (['loss'], {}), '(loss)\n', (2930, 2936), False, 'import torch\n'), ((3080, 3096), 'torch.mean', 'torch.mean', (['elbo'], {}), '(elbo)\n', (3090, 3096), False, 'import torch\n'), ((1476, 1500), 'pickle.load', 'pickle.load', (['file_handle'], {}), '(file_handle)\n', (1487, 1500), False, 'import pickle\n')] |
import pandas as pd
import numpy as np
import datetime as dt
import math
#输入H 文件名
def cal_riskrt(H,source):
source=source.iloc[:,0:6]
source=source.drop(columns=["Unnamed: 0"])
source=source.set_index('date').dropna(subset=['long_rt','short_rt','long_short_rt'],how='all')
#新建一个数据框记录各种指标
df=pd.DataFrame(columns=['rt','volatility','mdd','sharpe','calmar'],index=['long','short','long_short','excess'])
#计算多头各项指标
rt=pd.DataFrame(source['long_rt'])
rt['prod'] = np.cumprod(rt['long_rt'] + 1)
holding_period = pd.to_datetime(rt.index.values[-1]) - pd.to_datetime(rt.index.values[0])
# #年化收益率
annual_ret = pow(rt['prod'][-1], 365 / holding_period.days) - 1
# #年化波动率
volatility = rt['long_rt'].std() * (math.sqrt(250 / H))
# #sharpe
sharpe = annual_ret / volatility
# #计算最大回撤
rt['max2here'] = rt['prod'].expanding(1).max()
rt['dd2here'] = (rt['prod'] / rt['max2here']) - 1
mdd = rt['dd2here'].min()
calmar = annual_ret / abs(mdd)
df.loc['long','rt']=annual_ret
df.loc['long','volatility']=volatility
df.loc['long','mdd']=mdd
df.loc['long','sharpe']=sharpe
df.loc['long','calmar']=calmar
#计算空头组合的指标(对照组)
rt = pd.DataFrame(source['short_rt'])
rt['short_rt']=rt['short_rt']
rt['prod'] = np.cumprod(rt['short_rt'] + 1)
holding_period = pd.to_datetime(rt.index.values[-1]) - pd.to_datetime(rt.index.values[0])
# #年化收益率
annual_ret = pow(rt['prod'][-1], 365 / holding_period.days) - 1
# #年化波动率
volatility = rt['short_rt'].std() * (math.sqrt(250 / H))
# #sharpe
sharpe = annual_ret / volatility
# #计算最大回撤
rt['max2here'] = rt['prod'].expanding(1).max()
rt['dd2here'] = (rt['prod'] / rt['max2here']) - 1
mdd = rt['dd2here'].min()
calmar = annual_ret / abs(mdd)
df.loc['short', 'rt'] = annual_ret
df.loc['short', 'volatility'] = volatility
df.loc['short', 'mdd'] = mdd
df.loc['short', 'sharpe'] = sharpe
df.loc['short', 'calmar'] = calmar
# 计算多空组合的指标
rt = pd.DataFrame(source['long_short_rt'])
rt['long_short_rt'] = rt['long_short_rt']
rt['prod'] = np.cumprod(rt['long_short_rt'] + 1)
holding_period = pd.to_datetime(rt.index.values[-1]) - pd.to_datetime(rt.index.values[0])
# #年化收益率
annual_ret = pow(rt['prod'][-1], 365 / holding_period.days) - 1
# #年化波动率
volatility = rt['long_short_rt'].std() * (math.sqrt(250 / H))
# #sharpe
sharpe = annual_ret / volatility
# #计算最大回撤
rt['max2here'] = rt['prod'].expanding(1).max()
rt['dd2here'] = (rt['prod'] / rt['max2here']) - 1
mdd = rt['dd2here'].min()
calmar = annual_ret / abs(mdd)
df.loc['long_short', 'rt'] = annual_ret
df.loc['long_short', 'volatility'] = volatility
df.loc['long_short', 'mdd'] = mdd
df.loc['long_short', 'sharpe'] = sharpe
df.loc['long_short', 'calmar'] = calmar
# 计算超额收益的指标
rt = pd.DataFrame(source['long_rt']-source['benchmark'])
rt.columns=['excess_rt']
rt['prod'] = np.cumprod(rt['excess_rt'] + 1)
holding_period = pd.to_datetime(rt.index.values[-1]) - pd.to_datetime(rt.index.values[0])
# #年化收益率
annual_ret = pow(rt['prod'][-1], 365 / holding_period.days) - 1
# #年化波动率
volatility = rt['excess_rt'].std() * (math.sqrt(250 / H))
# #sharpe
sharpe = annual_ret / volatility
# #计算最大回撤
rt['max2here'] = rt['prod'].expanding(1).max()
rt['dd2here'] = (rt['prod'] / rt['max2here']) - 1
mdd = rt['dd2here'].min()
calmar = annual_ret / abs(mdd)
df.loc['excess', 'rt'] = annual_ret
df.loc['excess', 'volatility'] = volatility
df.loc['excess', 'mdd'] = mdd
df.loc['excess', 'sharpe'] = sharpe
df.loc['excess', 'calmar'] = calmar
return df
rt_df=pd.read_csv("../draw/inv_level_H30.csv")
risk_rt=cal_riskrt(20,rt_df)
risk_rt.to_csv("inv_level.csv")
rt_df=pd.read_csv("../draw/warehouseR90H5.csv")
risk_rt=cal_riskrt(5,rt_df)
risk_rt.to_csv("warehouse.csv")
rt_df=pd.read_csv("../draw/rollrt2H35.csv")
risk_rt=cal_riskrt(35,rt_df)
risk_rt.to_csv("roll_rt.csv")
rt_df=pd.read_csv("../draw/basis2H35.csv")
risk_rt=cal_riskrt(35,rt_df)
risk_rt.to_csv("basis.csv")
rt_df=pd.read_csv("../draw/basis_mom2R120H35.csv")
risk_rt=cal_riskrt(35,rt_df)
risk_rt.to_csv("basis_mom.csv")
rt_df=pd.read_csv("../draw/open_interestR5H30.csv")
risk_rt=cal_riskrt(30,rt_df)
risk_rt.to_csv("open_interest.csv")
rt_df=pd.read_csv("../draw/ts_momR120H30.csv")
risk_rt=cal_riskrt(30,rt_df)
risk_rt.to_csv("ts_mom.csv")
rt_df=pd.read_csv("../draw/cs_momR5H30.csv")
risk_rt=cal_riskrt(30,rt_df)
risk_rt.to_csv("cs_mom.csv")
rt_df=pd.read_csv("../draw/skewR120H30.csv")
risk_rt=cal_riskrt(30,rt_df)
risk_rt.to_csv("skew.csv")
rt_df=pd.read_csv("../draw/liquidityR15H30.csv")
risk_rt=cal_riskrt(30,rt_df)
risk_rt.to_csv("liquidity.csv")
rt_df=pd.read_csv("../draw/covR120H30.csv")
risk_rt=cal_riskrt(30,rt_df)
risk_rt.to_csv("cov.csv")
rt_df=pd.read_csv("../draw/idio_volR200H25.csv")
risk_rt=cal_riskrt(25,rt_df)
risk_rt.to_csv("idio_vol.csv")
rt_df=pd.read_csv("../draw/momR5H40.csv")
risk_rt=cal_riskrt(40,rt_df)
risk_rt.to_csv("inflation.csv")
rt_df=pd.read_csv("../draw/cnyrR3H40.csv")
risk_rt=cal_riskrt(40,rt_df)
risk_rt.to_csv("cny.csv") | [
"pandas.read_csv",
"pandas.to_datetime",
"math.sqrt",
"pandas.DataFrame",
"numpy.cumprod"
] | [((3751, 3791), 'pandas.read_csv', 'pd.read_csv', (['"""../draw/inv_level_H30.csv"""'], {}), "('../draw/inv_level_H30.csv')\n", (3762, 3791), True, 'import pandas as pd\n'), ((3860, 3901), 'pandas.read_csv', 'pd.read_csv', (['"""../draw/warehouseR90H5.csv"""'], {}), "('../draw/warehouseR90H5.csv')\n", (3871, 3901), True, 'import pandas as pd\n'), ((3969, 4006), 'pandas.read_csv', 'pd.read_csv', (['"""../draw/rollrt2H35.csv"""'], {}), "('../draw/rollrt2H35.csv')\n", (3980, 4006), True, 'import pandas as pd\n'), ((4073, 4109), 'pandas.read_csv', 'pd.read_csv', (['"""../draw/basis2H35.csv"""'], {}), "('../draw/basis2H35.csv')\n", (4084, 4109), True, 'import pandas as pd\n'), ((4174, 4218), 'pandas.read_csv', 'pd.read_csv', (['"""../draw/basis_mom2R120H35.csv"""'], {}), "('../draw/basis_mom2R120H35.csv')\n", (4185, 4218), True, 'import pandas as pd\n'), ((4287, 4332), 'pandas.read_csv', 'pd.read_csv', (['"""../draw/open_interestR5H30.csv"""'], {}), "('../draw/open_interestR5H30.csv')\n", (4298, 4332), True, 'import pandas as pd\n'), ((4405, 4445), 'pandas.read_csv', 'pd.read_csv', (['"""../draw/ts_momR120H30.csv"""'], {}), "('../draw/ts_momR120H30.csv')\n", (4416, 4445), True, 'import pandas as pd\n'), ((4511, 4549), 'pandas.read_csv', 'pd.read_csv', (['"""../draw/cs_momR5H30.csv"""'], {}), "('../draw/cs_momR5H30.csv')\n", (4522, 4549), True, 'import pandas as pd\n'), ((4615, 4653), 'pandas.read_csv', 'pd.read_csv', (['"""../draw/skewR120H30.csv"""'], {}), "('../draw/skewR120H30.csv')\n", (4626, 4653), True, 'import pandas as pd\n'), ((4717, 4759), 'pandas.read_csv', 'pd.read_csv', (['"""../draw/liquidityR15H30.csv"""'], {}), "('../draw/liquidityR15H30.csv')\n", (4728, 4759), True, 'import pandas as pd\n'), ((4828, 4865), 'pandas.read_csv', 'pd.read_csv', (['"""../draw/covR120H30.csv"""'], {}), "('../draw/covR120H30.csv')\n", (4839, 4865), True, 'import pandas as pd\n'), ((4928, 4970), 'pandas.read_csv', 'pd.read_csv', (['"""../draw/idio_volR200H25.csv"""'], {}), "('../draw/idio_volR200H25.csv')\n", (4939, 4970), True, 'import pandas as pd\n'), ((5038, 5073), 'pandas.read_csv', 'pd.read_csv', (['"""../draw/momR5H40.csv"""'], {}), "('../draw/momR5H40.csv')\n", (5049, 5073), True, 'import pandas as pd\n'), ((5142, 5178), 'pandas.read_csv', 'pd.read_csv', (['"""../draw/cnyrR3H40.csv"""'], {}), "('../draw/cnyrR3H40.csv')\n", (5153, 5178), True, 'import pandas as pd\n'), ((314, 437), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['rt', 'volatility', 'mdd', 'sharpe', 'calmar']", 'index': "['long', 'short', 'long_short', 'excess']"}), "(columns=['rt', 'volatility', 'mdd', 'sharpe', 'calmar'], index\n =['long', 'short', 'long_short', 'excess'])\n", (326, 437), True, 'import pandas as pd\n'), ((446, 477), 'pandas.DataFrame', 'pd.DataFrame', (["source['long_rt']"], {}), "(source['long_rt'])\n", (458, 477), True, 'import pandas as pd\n'), ((495, 524), 'numpy.cumprod', 'np.cumprod', (["(rt['long_rt'] + 1)"], {}), "(rt['long_rt'] + 1)\n", (505, 524), True, 'import numpy as np\n'), ((1215, 1247), 'pandas.DataFrame', 'pd.DataFrame', (["source['short_rt']"], {}), "(source['short_rt'])\n", (1227, 1247), True, 'import pandas as pd\n'), ((1299, 1329), 'numpy.cumprod', 'np.cumprod', (["(rt['short_rt'] + 1)"], {}), "(rt['short_rt'] + 1)\n", (1309, 1329), True, 'import numpy as np\n'), ((2037, 2074), 'pandas.DataFrame', 'pd.DataFrame', (["source['long_short_rt']"], {}), "(source['long_short_rt'])\n", (2049, 2074), True, 'import pandas as pd\n'), ((2138, 2173), 'numpy.cumprod', 'np.cumprod', (["(rt['long_short_rt'] + 1)"], {}), "(rt['long_short_rt'] + 1)\n", (2148, 2173), True, 'import numpy as np\n'), ((2912, 2965), 'pandas.DataFrame', 'pd.DataFrame', (["(source['long_rt'] - source['benchmark'])"], {}), "(source['long_rt'] - source['benchmark'])\n", (2924, 2965), True, 'import pandas as pd\n'), ((3010, 3041), 'numpy.cumprod', 'np.cumprod', (["(rt['excess_rt'] + 1)"], {}), "(rt['excess_rt'] + 1)\n", (3020, 3041), True, 'import numpy as np\n'), ((546, 581), 'pandas.to_datetime', 'pd.to_datetime', (['rt.index.values[-1]'], {}), '(rt.index.values[-1])\n', (560, 581), True, 'import pandas as pd\n'), ((584, 618), 'pandas.to_datetime', 'pd.to_datetime', (['rt.index.values[0]'], {}), '(rt.index.values[0])\n', (598, 618), True, 'import pandas as pd\n'), ((753, 771), 'math.sqrt', 'math.sqrt', (['(250 / H)'], {}), '(250 / H)\n', (762, 771), False, 'import math\n'), ((1351, 1386), 'pandas.to_datetime', 'pd.to_datetime', (['rt.index.values[-1]'], {}), '(rt.index.values[-1])\n', (1365, 1386), True, 'import pandas as pd\n'), ((1389, 1423), 'pandas.to_datetime', 'pd.to_datetime', (['rt.index.values[0]'], {}), '(rt.index.values[0])\n', (1403, 1423), True, 'import pandas as pd\n'), ((1559, 1577), 'math.sqrt', 'math.sqrt', (['(250 / H)'], {}), '(250 / H)\n', (1568, 1577), False, 'import math\n'), ((2195, 2230), 'pandas.to_datetime', 'pd.to_datetime', (['rt.index.values[-1]'], {}), '(rt.index.values[-1])\n', (2209, 2230), True, 'import pandas as pd\n'), ((2233, 2267), 'pandas.to_datetime', 'pd.to_datetime', (['rt.index.values[0]'], {}), '(rt.index.values[0])\n', (2247, 2267), True, 'import pandas as pd\n'), ((2408, 2426), 'math.sqrt', 'math.sqrt', (['(250 / H)'], {}), '(250 / H)\n', (2417, 2426), False, 'import math\n'), ((3063, 3098), 'pandas.to_datetime', 'pd.to_datetime', (['rt.index.values[-1]'], {}), '(rt.index.values[-1])\n', (3077, 3098), True, 'import pandas as pd\n'), ((3101, 3135), 'pandas.to_datetime', 'pd.to_datetime', (['rt.index.values[0]'], {}), '(rt.index.values[0])\n', (3115, 3135), True, 'import pandas as pd\n'), ((3272, 3290), 'math.sqrt', 'math.sqrt', (['(250 / H)'], {}), '(250 / H)\n', (3281, 3290), False, 'import math\n')] |
import numpy as np
from configparser import SafeConfigParser
from pyfisher.lensInterface import lensNoise
import orphics.theory.gaussianCov as gcov
from orphics.theory.cosmology import Cosmology
import orphics.tools.io as io
cc = Cosmology(lmax=6000,pickling=True)
theory = cc.theory
# Read config
iniFile = "../pyfisher/input/params.ini"
Config = SafeConfigParser()
Config.optionxform=str
Config.read(iniFile)
expName = "ColinACT"
lensName = "ColinLensing"
ls,Nls,ellbb,dlbb,efficiency = lensNoise(Config,expName,lensName,beamOverride=None,noiseTOverride=None,lkneeTOverride=None,lkneePOverride=None,alphaTOverride=None,alphaPOverride=None)
#planck_file = "input/planck_nlkk.dat"
#lp,nlplanck = np.loadtxt(planck_file,usecols=[0,1],unpack=True)
LF = gcov.LensForecast(theory)
ells = np.arange(2,6000,1)
clkk = theory.gCl('kk',ells)
pl = io.Plotter(scaleY='log')
pl.add(ells,clkk)
#pl.add(lp,nlplanck,ls="-.")
pl.add(ls,Nls,ls="-.")
pl._ax.set_ylim(5e-10,1e-5)
pl.done("output/clsn.png")
#LF.loadGenericCls("kk",ells,clkk,lp,nlplanck)
LF.loadGenericCls("kk",ells,clkk,ls,Nls)
ellBinEdges = np.arange(20,3000,20)
fsky = 0.4
specType = "kk"
sn,errs = LF.sn(ellBinEdges,fsky,specType)
print(sn)
| [
"orphics.theory.gaussianCov.LensForecast",
"orphics.theory.cosmology.Cosmology",
"pyfisher.lensInterface.lensNoise",
"orphics.tools.io.Plotter",
"numpy.arange",
"configparser.SafeConfigParser"
] | [((232, 267), 'orphics.theory.cosmology.Cosmology', 'Cosmology', ([], {'lmax': '(6000)', 'pickling': '(True)'}), '(lmax=6000, pickling=True)\n', (241, 267), False, 'from orphics.theory.cosmology import Cosmology\n'), ((352, 370), 'configparser.SafeConfigParser', 'SafeConfigParser', ([], {}), '()\n', (368, 370), False, 'from configparser import SafeConfigParser\n'), ((495, 663), 'pyfisher.lensInterface.lensNoise', 'lensNoise', (['Config', 'expName', 'lensName'], {'beamOverride': 'None', 'noiseTOverride': 'None', 'lkneeTOverride': 'None', 'lkneePOverride': 'None', 'alphaTOverride': 'None', 'alphaPOverride': 'None'}), '(Config, expName, lensName, beamOverride=None, noiseTOverride=None,\n lkneeTOverride=None, lkneePOverride=None, alphaTOverride=None,\n alphaPOverride=None)\n', (504, 663), False, 'from pyfisher.lensInterface import lensNoise\n'), ((761, 786), 'orphics.theory.gaussianCov.LensForecast', 'gcov.LensForecast', (['theory'], {}), '(theory)\n', (778, 786), True, 'import orphics.theory.gaussianCov as gcov\n'), ((795, 816), 'numpy.arange', 'np.arange', (['(2)', '(6000)', '(1)'], {}), '(2, 6000, 1)\n', (804, 816), True, 'import numpy as np\n'), ((849, 873), 'orphics.tools.io.Plotter', 'io.Plotter', ([], {'scaleY': '"""log"""'}), "(scaleY='log')\n", (859, 873), True, 'import orphics.tools.io as io\n'), ((1104, 1127), 'numpy.arange', 'np.arange', (['(20)', '(3000)', '(20)'], {}), '(20, 3000, 20)\n', (1113, 1127), True, 'import numpy as np\n')] |
import glob
import os
from os.path import join
import numpy as np
DIR_DATA = join(".", "datasets")
DIR_SAVE = os.path.join(os.environ["HOME"], "Soroosh/results_full")
DATASETS = glob.glob(DIR_DATA + "/*.txt")
DATASETS = [f_name for f_name in DATASETS if "_test.txt" not in f_name]
DATASETS.sort()
rho = np.hstack([np.round(np.linspace(1, 9, 9) * 1e-3, 3),
np.round(np.linspace(1, 9, 9) * 1e-2, 2),
np.round(np.linspace(1, 9, 9) * 1e-1, 1)])
cv = 5
repeat = 100
def file_writer_1(b_file, command):
""" command writer in a file """
for dataset in DATASETS:
for rho_1 in rho:
f_name = dataset[11:-4] + "_" + command.split("\"")[1] + "_" \
+ str(rho_1) + "_" + str(rho_1) + ".csv"
f_name = os.path.join(DIR_SAVE, f_name)
if not os.path.exists(f_name):
print(command + "\"{}\" --rho {:0.3f} {:0.3f}".format(
dataset, rho_1, rho_1), file=b_file)
return
def file_writer_2(b_file, command):
""" command writer in a file """
for dataset in DATASETS:
for rho_1 in rho:
for rho_2 in rho:
f_name = dataset[11:-4] + "_" + command.split("\"")[1] + "_" \
+ str(rho_1) + "_" + str(rho_2) + ".csv"
f_name = os.path.join(DIR_SAVE, f_name)
if not os.path.exists(f_name):
print(command + "\"{}\" --rho {:0.3f} {:0.3f}".format(
dataset, rho_1, rho_2), file=b_file)
return
with open("./tester.sh", "w") as bash_file:
for dataset in DATASETS:
print("python tester.py --method \"freg\" --cv {} --repeat {} --dataset \"{}\"".format(cv, repeat, dataset),
file=bash_file)
for method in ["wass", "reg", "sparse"]:
cmd = "python tester.py --method \"{}\" --cv {} --repeat {} --dataset ".format(
method, cv, repeat)
file_writer_1(bash_file, cmd)
for method in ["FR", "KL", "mean"]:
cmd = "python tester.py --method \"{}\" --cv {} --repeat {} --dataset ".format(
method, cv, repeat)
file_writer_2(bash_file, cmd)
| [
"os.path.exists",
"numpy.linspace",
"os.path.join",
"glob.glob"
] | [((78, 99), 'os.path.join', 'join', (['"""."""', '"""datasets"""'], {}), "('.', 'datasets')\n", (82, 99), False, 'from os.path import join\n'), ((111, 167), 'os.path.join', 'os.path.join', (["os.environ['HOME']", '"""Soroosh/results_full"""'], {}), "(os.environ['HOME'], 'Soroosh/results_full')\n", (123, 167), False, 'import os\n'), ((179, 209), 'glob.glob', 'glob.glob', (["(DIR_DATA + '/*.txt')"], {}), "(DIR_DATA + '/*.txt')\n", (188, 209), False, 'import glob\n'), ((785, 815), 'os.path.join', 'os.path.join', (['DIR_SAVE', 'f_name'], {}), '(DIR_SAVE, f_name)\n', (797, 815), False, 'import os\n'), ((325, 345), 'numpy.linspace', 'np.linspace', (['(1)', '(9)', '(9)'], {}), '(1, 9, 9)\n', (336, 345), True, 'import numpy as np\n'), ((384, 404), 'numpy.linspace', 'np.linspace', (['(1)', '(9)', '(9)'], {}), '(1, 9, 9)\n', (395, 404), True, 'import numpy as np\n'), ((443, 463), 'numpy.linspace', 'np.linspace', (['(1)', '(9)', '(9)'], {}), '(1, 9, 9)\n', (454, 463), True, 'import numpy as np\n'), ((835, 857), 'os.path.exists', 'os.path.exists', (['f_name'], {}), '(f_name)\n', (849, 857), False, 'import os\n'), ((1328, 1358), 'os.path.join', 'os.path.join', (['DIR_SAVE', 'f_name'], {}), '(DIR_SAVE, f_name)\n', (1340, 1358), False, 'import os\n'), ((1382, 1404), 'os.path.exists', 'os.path.exists', (['f_name'], {}), '(f_name)\n', (1396, 1404), False, 'import os\n')] |
from functions import *
import glob
import sys
import os
import numpy as np
from validation_ids import validation_ids
def save_data(file_name, data):
res_out = open(file_name, "w+", encoding='utf-8')
res_out.write("\n".join(data))
res_out.close()
if __name__ == "__main__":
num_validation = 10000
datasets_dir = "./datasets"
target_dir="./"
np.random.seed(1234)
if len(sys.argv) == 4:
num_validation = int(sys.argv[1])
datasets_dir = sys.argv[2]
target_dir = sys.argv[3]
else:
if len(sys.argv) != 1:
print("Usage: datasets2data.py num_validation_examples datasets_dir target_dir")
exit(-1)
train_src_file = os.path.join(target_dir, "train.src.txt")
train_tgt_file = os.path.join(target_dir, "train.tgt.txt")
validation_src_file = os.path.join(target_dir, "validation.src.txt")
validation_tgt_file = os.path.join(target_dir, "validation.tgt.txt")
csv_datasets = glob.glob("%s/*.csv"%datasets_dir)
src_datasets = glob.glob("%s/*.src.txt"%datasets_dir)
tgt_datasets = [f.replace(".src.txt", ".tgt.txt") for f in src_datasets]
print("== Merging the below lists for CSV/TXT_SRC/TXT_TGT into single train/validation data.==")
print(csv_datasets)
print(src_datasets)
print(tgt_datasets)
tmp_dir = os.path.join(target_dir, "tmp")
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
src_files = []
#Transform CSVs into src/tgt pairs of text sentences
for ds in csv_datasets:
data_file = ds
tmppath = os.path.join(tmp_dir, os.path.basename(ds))
src_file = tmppath.replace(".csv", ".src.csv")
tgt_file = tmppath.replace(".csv", ".tgt.csv")
uris_file = tmppath.replace(".csv", ".uri.csv")
#This is an interim representation, prior to the tokenization. Check out inputSnt2Tkn to tokenization details.
src_raw_file = tmppath.replace(".csv", ".src.raw.txt")
src_txt_file = tmppath.replace(".csv", ".src.txt")
tgt_txt_file = tmppath.replace(".csv", ".tgt.txt")
disjoin_source_target(data_file, src_file, tgt_file)
source_to_sentences(src_file, src_raw_file)
target_to_sentences(tgt_file, tgt_txt_file)
sentence_to_words_and_chars(src_raw_file, src_txt_file)
print("Produced %s and %s"%(os.path.basename(src_txt_file),os.path.basename(tgt_txt_file)))
src_files.append(src_txt_file)
#Note: processing of .src.txt and .tgt.txt is removed from this version.
print("Merging..")
print(src_files)
examples_src=[]
examples_tgt=[]
for src_item in src_files:
tgt_item = src_item.replace(".src.txt",".tgt.txt")
examples_src = examples_src + open(src_item, "r+", encoding='utf-8').read().splitlines()
examples_tgt = examples_tgt + open(tgt_item, "r+", encoding='utf-8').read().splitlines()
print("Splitting..")
#validation_idxs = np.random.randint(low=0, high=len(examples_src), size=num_validation).tolist()
#Instead of random generation, we read the ids from hardcoded list.
validation_idxs = [idx-1 for idx in validation_ids]
validation_src = [examples_src[idx] for idx in validation_idxs]
validation_tgt = [examples_tgt[idx] for idx in validation_idxs]
#These are commented out for the final model
#train_src = [examples_src[idx] for idx in range(len(examples_src)) if idx not in validation_idxs]
#train_tgt = [examples_tgt[idx] for idx in range(len(examples_src)) if idx not in validation_idxs]
#We know that model converges and want to use everything for training. Uncomment the above otherwise
train_src = examples_src
train_tgt = examples_tgt
print("Size %d"%len(train_src))
save_data(validation_src_file, validation_src)
save_data(validation_tgt_file, validation_tgt)
save_data(train_src_file, train_src)
save_data(train_tgt_file, train_tgt)
| [
"os.path.exists",
"os.makedirs",
"os.path.join",
"numpy.random.seed",
"os.path.basename",
"glob.glob"
] | [((373, 393), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (387, 393), True, 'import numpy as np\n'), ((709, 750), 'os.path.join', 'os.path.join', (['target_dir', '"""train.src.txt"""'], {}), "(target_dir, 'train.src.txt')\n", (721, 750), False, 'import os\n'), ((772, 813), 'os.path.join', 'os.path.join', (['target_dir', '"""train.tgt.txt"""'], {}), "(target_dir, 'train.tgt.txt')\n", (784, 813), False, 'import os\n'), ((840, 886), 'os.path.join', 'os.path.join', (['target_dir', '"""validation.src.txt"""'], {}), "(target_dir, 'validation.src.txt')\n", (852, 886), False, 'import os\n'), ((913, 959), 'os.path.join', 'os.path.join', (['target_dir', '"""validation.tgt.txt"""'], {}), "(target_dir, 'validation.tgt.txt')\n", (925, 959), False, 'import os\n'), ((981, 1017), 'glob.glob', 'glob.glob', (["('%s/*.csv' % datasets_dir)"], {}), "('%s/*.csv' % datasets_dir)\n", (990, 1017), False, 'import glob\n'), ((1036, 1076), 'glob.glob', 'glob.glob', (["('%s/*.src.txt' % datasets_dir)"], {}), "('%s/*.src.txt' % datasets_dir)\n", (1045, 1076), False, 'import glob\n'), ((1341, 1372), 'os.path.join', 'os.path.join', (['target_dir', '"""tmp"""'], {}), "(target_dir, 'tmp')\n", (1353, 1372), False, 'import os\n'), ((1384, 1407), 'os.path.exists', 'os.path.exists', (['tmp_dir'], {}), '(tmp_dir)\n', (1398, 1407), False, 'import os\n'), ((1417, 1437), 'os.makedirs', 'os.makedirs', (['tmp_dir'], {}), '(tmp_dir)\n', (1428, 1437), False, 'import os\n'), ((1609, 1629), 'os.path.basename', 'os.path.basename', (['ds'], {}), '(ds)\n', (1625, 1629), False, 'import os\n'), ((2367, 2397), 'os.path.basename', 'os.path.basename', (['src_txt_file'], {}), '(src_txt_file)\n', (2383, 2397), False, 'import os\n'), ((2398, 2428), 'os.path.basename', 'os.path.basename', (['tgt_txt_file'], {}), '(tgt_txt_file)\n', (2414, 2428), False, 'import os\n')] |
#!/usr/bin/env python
from pathlib import Path
import subprocess
import numpy as np
import pytest
R = Path(__file__).resolve().parents[1]
def test_bsr():
pytest.importorskip('oct2py')
subprocess.check_call(['octave-cli', '-q', 'Test.m'], cwd=R / 'tests')
def test_wideangle_scatter():
oct2py = pytest.importorskip('oct2py')
P = {'grazRx': 2.0,
'grazTx': np.arange(0.1, 10, .1),
'TxPol': 'V',
'FGHz': 1.5,
'SeaState': 3}
with oct2py.Oct2Py() as oc:
oc.addpath(str(R))
sigmaCo = oc.wide_angle_scatter(P, 0., 0.).squeeze()
print(sigmaCo)
def test_specular():
oct2py = pytest.importorskip('oct2py')
freqGHz = 10 # per pg. 4 of Report
seastate = [0, 2, 3, 5, 6]
sigmaH = [0.01, 0.11, 0.29, 1.03, 1.61] # Table 1 of Report for SS 0,2,3,5,6
gamma = np.arange(0, 6, .1)
with oct2py.Oct2Py() as oc:
for S, s in zip(seastate, sigmaH):
Rhos = oc.specular_reflection(s, gamma, freqGHz).squeeze()
print(Rhos)
if __name__ == '__main__':
pytest.main(['-xrsv', __file__])
| [
"pathlib.Path",
"subprocess.check_call",
"pytest.main",
"pytest.importorskip",
"numpy.arange"
] | [((161, 190), 'pytest.importorskip', 'pytest.importorskip', (['"""oct2py"""'], {}), "('oct2py')\n", (180, 190), False, 'import pytest\n'), ((195, 265), 'subprocess.check_call', 'subprocess.check_call', (["['octave-cli', '-q', 'Test.m']"], {'cwd': "(R / 'tests')"}), "(['octave-cli', '-q', 'Test.m'], cwd=R / 'tests')\n", (216, 265), False, 'import subprocess\n'), ((311, 340), 'pytest.importorskip', 'pytest.importorskip', (['"""oct2py"""'], {}), "('oct2py')\n", (330, 340), False, 'import pytest\n'), ((659, 688), 'pytest.importorskip', 'pytest.importorskip', (['"""oct2py"""'], {}), "('oct2py')\n", (678, 688), False, 'import pytest\n'), ((856, 876), 'numpy.arange', 'np.arange', (['(0)', '(6)', '(0.1)'], {}), '(0, 6, 0.1)\n', (865, 876), True, 'import numpy as np\n'), ((1080, 1112), 'pytest.main', 'pytest.main', (["['-xrsv', __file__]"], {}), "(['-xrsv', __file__])\n", (1091, 1112), False, 'import pytest\n'), ((385, 408), 'numpy.arange', 'np.arange', (['(0.1)', '(10)', '(0.1)'], {}), '(0.1, 10, 0.1)\n', (394, 408), True, 'import numpy as np\n'), ((103, 117), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (107, 117), False, 'from pathlib import Path\n')] |
# Author: <NAME>
# Date: 5/21/2019
# Reference Formula: https://anomaly.io/understand-auto-cross-correlation-normalized-shift/
# Referrence Video: https://www.youtube.com/watch?v=ngEC3sXeUb4
import math
from scipy.signal import fftconvolve
import numpy as np
# It implements the normalized, and standard correlation and an extra
# method to calculate the Discrete Linear Convolution
class Correlate:
def __init__(self):
self.signal_1 = None
self.signal_2 = None
# norm_corr(x, y) = sum(x[n]*y[n])/sqrt(sum(x^2)*sum(y^2))
# 0 <= n <= n-1
def normalized_correlation(self, x1, x2):
self.signal_1 = np.array(x1)
self.signal_2 = np.array(x2)
x1 = np.array(x1)/10000
x2 = np.array(x2)/10000
return sum(x1*x2)/math.sqrt(sum(x1**2)*sum(x2**2))
# Calculate: corr(x,y) = Sum (x[n]*y[n]), 0 <= n <= n-1
def standard_correlate(self, x1, x2):
self.signal_1 = x1
self.signal_2 = x2
x1 = np.array(x1)/10000
x2 = np.array(x2)/10000
lx1 = len(x1)
lx2 = len(x2)
size = min(lx1, lx2)
return sum(x1[:size]*x2[:size])
# http://pilot.cnxproject.org/content/collection/col10064/latest/module/m10087/latest
# Calculate: (f∗g)(n) = sum(f(k)*g(n−k)), where −∞ <= k <= ∞
def discrete_linear_convolution(self, f, g):
return fftconvolve(f, g, mode='same')
| [
"numpy.array",
"scipy.signal.fftconvolve"
] | [((640, 652), 'numpy.array', 'np.array', (['x1'], {}), '(x1)\n', (648, 652), True, 'import numpy as np\n'), ((677, 689), 'numpy.array', 'np.array', (['x2'], {}), '(x2)\n', (685, 689), True, 'import numpy as np\n'), ((1367, 1397), 'scipy.signal.fftconvolve', 'fftconvolve', (['f', 'g'], {'mode': '"""same"""'}), "(f, g, mode='same')\n", (1378, 1397), False, 'from scipy.signal import fftconvolve\n'), ((703, 715), 'numpy.array', 'np.array', (['x1'], {}), '(x1)\n', (711, 715), True, 'import numpy as np\n'), ((735, 747), 'numpy.array', 'np.array', (['x2'], {}), '(x2)\n', (743, 747), True, 'import numpy as np\n'), ((983, 995), 'numpy.array', 'np.array', (['x1'], {}), '(x1)\n', (991, 995), True, 'import numpy as np\n'), ((1015, 1027), 'numpy.array', 'np.array', (['x2'], {}), '(x2)\n', (1023, 1027), True, 'import numpy as np\n')] |
##
## Copyright (c) 2019
##
## @author: <NAME>
## @company: Technische Universität Berlin
##
## This file is part of the python package analyticcenter
## (see https://gitlab.tu-berlin.de/PassivityRadius/analyticcenter/)
##
## License: 3-clause BSD, see https://opensource.org/licenses/BSD-3-Clause
##
import control
import numpy as np
from analyticcenter import WeightedSystem
RR = 100. # Resistor Value
L = 0.1e-1 # Inductance
C = 1.e-1 # Capacitor
# RR = 1. # Resistor Value
# L = 1. # Inductance
# C = 1. # Capacitor
p = 0 # Number of systems to connect
num = np.array([1 / (L * C)])
den = np.array([1, RR / L, 1 / (L * C)])
#
# num = np.array([1 / (RR * C), 0])
# den = np.array([1, 1 / (RR * C), 1 / (L * C)])
tf = control.tf(num, den)
ss = tf
for i in range(p):
ss = control.series(ss, tf)
sys = control.tf2ss(ss)
A = sys.A
B = sys.B
C = np.asmatrix(sys.C)
D = np.asmatrix(sys.D) + 1
# D = np.matrix([1])
n = A.shape[0]
Q = np.zeros((n, n))
S = C.H
R = D + D.H
sys = WeightedSystem(A, B, C, D, Q, S, R)
# sys.check_passivity()
| [
"numpy.asmatrix",
"analyticcenter.WeightedSystem",
"control.tf2ss",
"numpy.array",
"numpy.zeros",
"control.tf",
"control.series"
] | [((574, 597), 'numpy.array', 'np.array', (['[1 / (L * C)]'], {}), '([1 / (L * C)])\n', (582, 597), True, 'import numpy as np\n'), ((604, 638), 'numpy.array', 'np.array', (['[1, RR / L, 1 / (L * C)]'], {}), '([1, RR / L, 1 / (L * C)])\n', (612, 638), True, 'import numpy as np\n'), ((733, 753), 'control.tf', 'control.tf', (['num', 'den'], {}), '(num, den)\n', (743, 753), False, 'import control\n'), ((821, 838), 'control.tf2ss', 'control.tf2ss', (['ss'], {}), '(ss)\n', (834, 838), False, 'import control\n'), ((863, 881), 'numpy.asmatrix', 'np.asmatrix', (['sys.C'], {}), '(sys.C)\n', (874, 881), True, 'import numpy as np\n'), ((951, 967), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (959, 967), True, 'import numpy as np\n'), ((994, 1029), 'analyticcenter.WeightedSystem', 'WeightedSystem', (['A', 'B', 'C', 'D', 'Q', 'S', 'R'], {}), '(A, B, C, D, Q, S, R)\n', (1008, 1029), False, 'from analyticcenter import WeightedSystem\n'), ((791, 813), 'control.series', 'control.series', (['ss', 'tf'], {}), '(ss, tf)\n', (805, 813), False, 'import control\n'), ((886, 904), 'numpy.asmatrix', 'np.asmatrix', (['sys.D'], {}), '(sys.D)\n', (897, 904), True, 'import numpy as np\n')] |
import numpy as np
import tensorflow as tf
import gzip
import cPickle
import sys
sys.path.extend(['alg/'])
import vcl
import coreset
import utils
class SplitMnistGenerator():
def __init__(self):
# Open data file
f = gzip.open('data/mnist.pkl.gz', 'rb')
train_set, valid_set, test_set = cPickle.load(f)
f.close()
# Define train and test data
self.X_train = np.vstack((train_set[0], valid_set[0]))
self.X_test = test_set[0]
self.train_label = np.hstack((train_set[1], valid_set[1]))
self.test_label = test_set[1]
# split MNIST
task1 = [0, 1]
task2 = [2, 3]
task3 = [4, 5]
task4 = [6, 7]
task5 = [8, 9]
self.sets = [task1, task2, task3, task4, task5]
self.max_iter = len(self.sets)
self.out_dim = 0 # Total number of unique classes
self.class_list = [] # List of unique classes being considered, in the order they appear
for task_id in range(self.max_iter):
for class_index in range(len(self.sets[task_id])):
if self.sets[task_id][class_index] not in self.class_list:
# Convert from MNIST digit numbers to class index number by using self.class_list.index(),
# which is done in self.classes
self.class_list.append(self.sets[task_id][class_index])
self.out_dim = self.out_dim + 1
# self.classes is the classes (with correct indices for training/testing) of interest at each task_id
self.classes = []
for task_id in range(self.max_iter):
class_idx = []
for i in range(len(self.sets[task_id])):
class_idx.append(self.class_list.index(self.sets[task_id][i]))
self.classes.append(class_idx)
self.cur_iter = 0
def get_dims(self):
# Get data input and output dimensions
return self.X_train.shape[1], self.out_dim
def next_task(self):
if self.cur_iter >= self.max_iter:
raise Exception('Number of tasks exceeded!')
else:
next_x_train = []
next_y_train = []
next_x_test = []
next_y_test = []
# Loop over all classes in current iteration
for class_index in range(np.size(self.sets[self.cur_iter])):
# Find the correct set of training inputs
train_id = np.where(self.train_label == self.sets[self.cur_iter][class_index])[0]
# Stack the training inputs
if class_index == 0:
next_x_train = self.X_train[train_id]
else:
next_x_train = np.vstack((next_x_train, self.X_train[train_id]))
# Initialise next_y_train to zeros, then change relevant entries to ones, and then stack
next_y_train_interm = np.zeros((len(train_id), self.out_dim))
next_y_train_interm[:, self.classes[self.cur_iter][class_index]] = 1
if class_index == 0:
next_y_train = next_y_train_interm
else:
next_y_train = np.vstack((next_y_train, next_y_train_interm))
# Repeat above process for test inputs
test_id = np.where(self.test_label == self.sets[self.cur_iter][class_index])[0]
if class_index == 0:
next_x_test = self.X_test[test_id]
else:
next_x_test = np.vstack((next_x_test, self.X_test[test_id]))
next_y_test_interm = np.zeros((len(test_id), self.out_dim))
next_y_test_interm[:, self.classes[self.cur_iter][class_index]] = 1
if class_index == 0:
next_y_test = next_y_test_interm
else:
next_y_test = np.vstack((next_y_test, next_y_test_interm))
self.cur_iter += 1
return next_x_train, next_y_train, next_x_test, next_y_test
def reset(self):
self.cur_iter = 0
store_weights = True # Store weights after training on each task (for plotting later)
multi_head = True # Multi-head or single-head network
hidden_size = [200] # Size and number of hidden layers
batch_size = 256 # Batch size
no_epochs = 600 # Number of training epochs per task
# No coreset
tf.compat.v1.reset_default_graph()
random_seed = 0
tf.compat.v1.set_random_seed(random_seed+1)
np.random.seed(random_seed)
path = 'model_storage/split/' # Path where to store files
data_gen = SplitMnistGenerator()
coreset_size = 0
vcl_result = vcl.run_vcl_shared(hidden_size, no_epochs, data_gen,
coreset.rand_from_batch, coreset_size, batch_size, path, multi_head, store_weights=store_weights)
# Store accuracies
np.savez(path + 'test_acc.npz', acc=vcl_result)
# Random coreset
tf.compat.v1.reset_default_graph()
random_seed = 0
tf.compat.v1.set_random_seed(random_seed+1)
np.random.seed(random_seed)
path = 'model_storage/split_coreset/' # Path where to store files
data_gen = SplitMnistGenerator()
coreset_size = 40
vcl_result_coresets = vcl.run_vcl_shared(hidden_size, no_epochs, data_gen,
coreset.rand_from_batch, coreset_size, batch_size, path, multi_head, store_weights=store_weights)
# Store accuracies
np.savez(path + 'test_acc.npz', acc=vcl_result_coresets)
# Plot average accuracy
utils.plot('model_storage/split_mnist_', vcl_result, vcl_result_coresets) | [
"vcl.run_vcl_shared",
"numpy.savez",
"gzip.open",
"numpy.hstack",
"numpy.where",
"numpy.size",
"utils.plot",
"sys.path.extend",
"numpy.random.seed",
"tensorflow.compat.v1.set_random_seed",
"numpy.vstack",
"tensorflow.compat.v1.reset_default_graph",
"cPickle.load"
] | [((81, 106), 'sys.path.extend', 'sys.path.extend', (["['alg/']"], {}), "(['alg/'])\n", (96, 106), False, 'import sys\n'), ((4431, 4465), 'tensorflow.compat.v1.reset_default_graph', 'tf.compat.v1.reset_default_graph', ([], {}), '()\n', (4463, 4465), True, 'import tensorflow as tf\n'), ((4482, 4527), 'tensorflow.compat.v1.set_random_seed', 'tf.compat.v1.set_random_seed', (['(random_seed + 1)'], {}), '(random_seed + 1)\n', (4510, 4527), True, 'import tensorflow as tf\n'), ((4526, 4553), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (4540, 4553), True, 'import numpy as np\n'), ((4678, 4837), 'vcl.run_vcl_shared', 'vcl.run_vcl_shared', (['hidden_size', 'no_epochs', 'data_gen', 'coreset.rand_from_batch', 'coreset_size', 'batch_size', 'path', 'multi_head'], {'store_weights': 'store_weights'}), '(hidden_size, no_epochs, data_gen, coreset.\n rand_from_batch, coreset_size, batch_size, path, multi_head,\n store_weights=store_weights)\n', (4696, 4837), False, 'import vcl\n'), ((4853, 4900), 'numpy.savez', 'np.savez', (["(path + 'test_acc.npz')"], {'acc': 'vcl_result'}), "(path + 'test_acc.npz', acc=vcl_result)\n", (4861, 4900), True, 'import numpy as np\n'), ((4920, 4954), 'tensorflow.compat.v1.reset_default_graph', 'tf.compat.v1.reset_default_graph', ([], {}), '()\n', (4952, 4954), True, 'import tensorflow as tf\n'), ((4971, 5016), 'tensorflow.compat.v1.set_random_seed', 'tf.compat.v1.set_random_seed', (['(random_seed + 1)'], {}), '(random_seed + 1)\n', (4999, 5016), True, 'import tensorflow as tf\n'), ((5015, 5042), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (5029, 5042), True, 'import numpy as np\n'), ((5185, 5344), 'vcl.run_vcl_shared', 'vcl.run_vcl_shared', (['hidden_size', 'no_epochs', 'data_gen', 'coreset.rand_from_batch', 'coreset_size', 'batch_size', 'path', 'multi_head'], {'store_weights': 'store_weights'}), '(hidden_size, no_epochs, data_gen, coreset.\n rand_from_batch, coreset_size, batch_size, path, multi_head,\n store_weights=store_weights)\n', (5203, 5344), False, 'import vcl\n'), ((5360, 5416), 'numpy.savez', 'np.savez', (["(path + 'test_acc.npz')"], {'acc': 'vcl_result_coresets'}), "(path + 'test_acc.npz', acc=vcl_result_coresets)\n", (5368, 5416), True, 'import numpy as np\n'), ((5442, 5515), 'utils.plot', 'utils.plot', (['"""model_storage/split_mnist_"""', 'vcl_result', 'vcl_result_coresets'], {}), "('model_storage/split_mnist_', vcl_result, vcl_result_coresets)\n", (5452, 5515), False, 'import utils\n'), ((238, 274), 'gzip.open', 'gzip.open', (['"""data/mnist.pkl.gz"""', '"""rb"""'], {}), "('data/mnist.pkl.gz', 'rb')\n", (247, 274), False, 'import gzip\n'), ((316, 331), 'cPickle.load', 'cPickle.load', (['f'], {}), '(f)\n', (328, 331), False, 'import cPickle\n'), ((411, 450), 'numpy.vstack', 'np.vstack', (['(train_set[0], valid_set[0])'], {}), '((train_set[0], valid_set[0]))\n', (420, 450), True, 'import numpy as np\n'), ((512, 551), 'numpy.hstack', 'np.hstack', (['(train_set[1], valid_set[1])'], {}), '((train_set[1], valid_set[1]))\n', (521, 551), True, 'import numpy as np\n'), ((2351, 2384), 'numpy.size', 'np.size', (['self.sets[self.cur_iter]'], {}), '(self.sets[self.cur_iter])\n', (2358, 2384), True, 'import numpy as np\n'), ((2473, 2540), 'numpy.where', 'np.where', (['(self.train_label == self.sets[self.cur_iter][class_index])'], {}), '(self.train_label == self.sets[self.cur_iter][class_index])\n', (2481, 2540), True, 'import numpy as np\n'), ((2740, 2789), 'numpy.vstack', 'np.vstack', (['(next_x_train, self.X_train[train_id])'], {}), '((next_x_train, self.X_train[train_id]))\n', (2749, 2789), True, 'import numpy as np\n'), ((3208, 3254), 'numpy.vstack', 'np.vstack', (['(next_y_train, next_y_train_interm)'], {}), '((next_y_train, next_y_train_interm))\n', (3217, 3254), True, 'import numpy as np\n'), ((3337, 3403), 'numpy.where', 'np.where', (['(self.test_label == self.sets[self.cur_iter][class_index])'], {}), '(self.test_label == self.sets[self.cur_iter][class_index])\n', (3345, 3403), True, 'import numpy as np\n'), ((3555, 3601), 'numpy.vstack', 'np.vstack', (['(next_x_test, self.X_test[test_id])'], {}), '((next_x_test, self.X_test[test_id]))\n', (3564, 3601), True, 'import numpy as np\n'), ((3909, 3953), 'numpy.vstack', 'np.vstack', (['(next_y_test, next_y_test_interm)'], {}), '((next_y_test, next_y_test_interm))\n', (3918, 3953), True, 'import numpy as np\n')] |
import os
import sys
from pbstools import PythonJob
from shutil import copyfile
import datetime
import numpy as np
python_file = r"/home/jeromel/Documents/Projects/Deep2P/repos/deepinterpolation/examples/cluster_lib/generic_ephys_process_sync.py"
output_folder = "/allen/programs/braintv/workgroups/neuralcoding/Neuropixels_Data/neuropixels_10_sessions/778998620_419112_20181114_probeD/processed_2020_03_02"
model_file = "/allen/programs/braintv/workgroups/neuralcoding/Neuropixels_Data/neuropixels_10_sessions/778998620_419112_20181114_probeD/trained_models/unet_single_ephys_1024_mean_squared_error_2020_02_29_15_28/2020_02_29_15_28_unet_single_ephys_1024_mean_squared_error-1050.h5"
dat_file = "/allen/programs/braintv/workgroups/neuralcoding/Neuropixels_Data/neuropixels_10_sessions/778998620_419112_20181114_probeD/continuous.dat2"
nb_probes = 384
raw_data = np.memmap(dat_file, dtype="int16")
img_per_movie = int(raw_data.size / nb_probes)
pre_post_frame = 30
pre_post_omission = 1
end_frame = img_per_movie - pre_post_frame - pre_post_omission - 1
nb_jobs = 200
now = datetime.datetime.now()
run_uid = now.strftime("%Y_%m_%d_%H_%M")
jobdir = output_folder
start_frame = pre_post_omission + pre_post_frame
try:
os.mkdir(jobdir)
except:
print("folder already exists")
output_terminal = os.path.join(jobdir, run_uid + "_running_terminal.txt")
script_basename = os.path.basename(__file__)
copyfile(
os.path.realpath(__file__), os.path.join(jobdir, run_uid + "_" + script_basename)
)
job_settings = {
"queue": "braintv",
"mem": "250g",
"walltime": "24:00:00",
"ppn": 16,
}
job_settings.update(
{
"outfile": os.path.join(jobdir, "$PBS_JOBID.out"),
"errfile": os.path.join(jobdir, "$PBS_JOBID.err"),
"email": "<EMAIL>",
"email_options": "a",
}
)
arg_to_pass = (
" --dat_file "
+ dat_file
+ " --output_folder "
+ output_folder
+ " --model_file "
+ model_file
)
arg_to_pass += (
" --start_frame "
+ str(start_frame)
+ " --end_frame "
+ str(end_frame)
+ " --pre_post_frame "
+ str(pre_post_frame)
)
arg_to_pass += (
" --nb_jobs " + str(nb_jobs) + " --pre_post_omission " + str(pre_post_omission)
)
PythonJob(
python_file,
python_executable="/home/jeromel/.conda/envs/deep_work2/bin/python",
conda_env="deep_work2",
jobname="movie_2p",
python_args=arg_to_pass + " > " + output_terminal,
**job_settings
).run(dryrun=False)
| [
"numpy.memmap",
"os.path.join",
"os.path.realpath",
"datetime.datetime.now",
"os.mkdir",
"os.path.basename",
"pbstools.PythonJob"
] | [((867, 901), 'numpy.memmap', 'np.memmap', (['dat_file'], {'dtype': '"""int16"""'}), "(dat_file, dtype='int16')\n", (876, 901), True, 'import numpy as np\n'), ((1080, 1103), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1101, 1103), False, 'import datetime\n'), ((1307, 1362), 'os.path.join', 'os.path.join', (['jobdir', "(run_uid + '_running_terminal.txt')"], {}), "(jobdir, run_uid + '_running_terminal.txt')\n", (1319, 1362), False, 'import os\n'), ((1381, 1407), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (1397, 1407), False, 'import os\n'), ((1228, 1244), 'os.mkdir', 'os.mkdir', (['jobdir'], {}), '(jobdir)\n', (1236, 1244), False, 'import os\n'), ((1422, 1448), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1438, 1448), False, 'import os\n'), ((1450, 1503), 'os.path.join', 'os.path.join', (['jobdir', "(run_uid + '_' + script_basename)"], {}), "(jobdir, run_uid + '_' + script_basename)\n", (1462, 1503), False, 'import os\n'), ((1659, 1697), 'os.path.join', 'os.path.join', (['jobdir', '"""$PBS_JOBID.out"""'], {}), "(jobdir, '$PBS_JOBID.out')\n", (1671, 1697), False, 'import os\n'), ((1718, 1756), 'os.path.join', 'os.path.join', (['jobdir', '"""$PBS_JOBID.err"""'], {}), "(jobdir, '$PBS_JOBID.err')\n", (1730, 1756), False, 'import os\n'), ((2227, 2443), 'pbstools.PythonJob', 'PythonJob', (['python_file'], {'python_executable': '"""/home/jeromel/.conda/envs/deep_work2/bin/python"""', 'conda_env': '"""deep_work2"""', 'jobname': '"""movie_2p"""', 'python_args': "(arg_to_pass + ' > ' + output_terminal)"}), "(python_file, python_executable=\n '/home/jeromel/.conda/envs/deep_work2/bin/python', conda_env=\n 'deep_work2', jobname='movie_2p', python_args=arg_to_pass + ' > ' +\n output_terminal, **job_settings)\n", (2236, 2443), False, 'from pbstools import PythonJob\n')] |
import numpy as np
from soco_openqa.soco_mrc.mrc_model import MrcModel
from collections import defaultdict
class Reader:
def __init__(self, model):
self.model_id = model
self.reader = MrcModel('us', n_gpu=1)
self.thresh = 0.8
def predict(self, query, top_passages):
batch = [{'q': query, 'doc': p['answer']} for p in top_passages]
preds = self.reader.batch_predict(
self.model_id,
batch,
merge_pred=True,
stride=128,
batch_size=50
)
candidates = defaultdict(list)
for a_id, a in enumerate(preds):
if a.get('missing_warning'):
continue
score = self.thresh * (a['score']) + (1 - self.thresh) * (top_passages[a_id]['score'])
candidates[a['value']].append({'combined_score': score,
'reader_score':a['score'],
'ranker_score':top_passages[a_id]['score'],
'idx': a_id,
'prob': a['prob'],
'answer_span': a['answer_span']})
# get best passages with best answer
answers = []
for k, v in candidates.items():
combined_scores = [x['combined_score'] for x in v]
reader_scores = [x['reader_score'] for x in v]
ranker_scores = [x['ranker_score'] for x in v]
idxes = [x['idx'] for x in v]
best_idx = int(np.argmax(combined_scores))
best_a_id = idxes[best_idx]
answers.append({'value': k,
'score': combined_scores[best_idx],
'reader_score': reader_scores[best_idx],
'ranker_score': ranker_scores[best_idx],
'prob': v[best_idx]['prob'],
'answer_span': v[best_idx]['answer_span'],
"source": {
'context': top_passages[best_a_id]['answer'],
'url': top_passages[best_a_id].get('meta', {}).get('url'),
'doc_id': top_passages[best_a_id].get('meta', {}).get('doc_id')
}
})
answers = sorted(answers, key=lambda x: x['score'], reverse=True)
return answers
| [
"soco_openqa.soco_mrc.mrc_model.MrcModel",
"collections.defaultdict",
"numpy.argmax"
] | [((207, 230), 'soco_openqa.soco_mrc.mrc_model.MrcModel', 'MrcModel', (['"""us"""'], {'n_gpu': '(1)'}), "('us', n_gpu=1)\n", (215, 230), False, 'from soco_openqa.soco_mrc.mrc_model import MrcModel\n'), ((628, 645), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (639, 645), False, 'from collections import defaultdict\n'), ((1619, 1645), 'numpy.argmax', 'np.argmax', (['combined_scores'], {}), '(combined_scores)\n', (1628, 1645), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from munch import Munch
from plaster.run.priors import ParamsAndPriors, Prior, Priors
from plaster.tools.aaseq.aaseq import aa_str_to_list
from plaster.tools.schema import check
from plaster.tools.schema.schema import Schema as s
from plaster.tools.utils import utils
from plaster.tools.c_common import c_common_tools
class SimV2Params(ParamsAndPriors):
channel__priors__columns = (
"ch_i",
"channel_name",
"bg_mu",
"bg_sigma",
"dye_name",
"gain_mu",
"gain_sigma",
"index",
"p_bleach",
"row_k_sigma",
)
dye__label__priors__columns = (
"channel_name",
"dye_name",
"aa",
"label_name",
"ptm_only",
"p_non_fluorescent",
"ch_i",
"bg_mu",
"bg_sigma",
"gain_mu",
"gain_sigma",
"row_k_sigma",
"p_bleach",
)
defaults = Munch(
n_pres=1,
n_mocks=0,
n_edmans=1,
dyes=[],
labels=[],
allow_edman_cterm=False,
enable_ptm_labels=False,
use_lognormal_model=False,
is_survey=False,
n_samples_train=5000,
n_samples_test=1000,
)
schema = s(
s.is_kws_r(
priors_desc=Priors.priors_desc_schema,
n_pres=s.is_int(bounds=(0, None)),
n_mocks=s.is_int(bounds=(0, None)),
n_edmans=s.is_int(bounds=(0, None)),
n_samples_train=s.is_int(bounds=(1, None)),
n_samples_test=s.is_int(bounds=(1, None)),
dyes=s.is_list(
elems=s.is_kws_r(dye_name=s.is_str(), channel_name=s.is_str(),)
),
labels=s.is_list(
elems=s.is_kws_r(
aa=s.is_str(),
dye_name=s.is_str(),
label_name=s.is_str(),
ptm_only=s.is_bool(required=False, noneable=True),
)
),
channels=s.is_dict(required=False),
allow_edman_cterm=s.is_bool(required=False, noneable=True),
enable_ptm_labels=s.is_bool(required=False, noneable=True),
use_lognormal_model=s.is_bool(),
is_survey=s.is_bool(),
)
)
def __init__(self, **kwargs):
# _skip_setup_dfs is True in fixture mode
super().__init__(source="SimV2Params", **kwargs)
self._setup_dfs()
def validate(self):
super().validate()
all_dye_names = list(set([d.dye_name for d in self.dyes]))
# No duplicate dye names
self._validate(
len(all_dye_names) == len(self.dyes), "The dye list contains a duplicate"
)
# No duplicate labels
self._validate(
len(list(set(utils.listi(self.labels, "aa")))) == len(self.labels),
"There is a duplicate label in the label_set",
)
# All labels have a legit dye name
[
self._validate(
label.dye_name in all_dye_names,
f"Label {label.label_name} does not have a valid matching dye_name",
)
for label in self.labels
]
# Channel mappings
mentioned_channels = {dye.channel_name: False for dye in self.dyes}
if "channels" in self:
# Validate that channel mapping is complete
for channel_name, ch_i in self.channels.items():
self._validate(
channel_name in mentioned_channels,
f"Channel name '{channel_name}' was not found in dyes",
)
mentioned_channels[channel_name] = True
self._validate(
all([mentioned for _, mentioned in mentioned_channels.items()]),
"Not all channels in dyes were enumerated in channels",
)
else:
# No channel mapping: assign them
self["channels"] = {
ch_name: i
for i, ch_name in enumerate(sorted(mentioned_channels.keys()))
}
@property
def n_cycles(self):
return self.n_pres + self.n_mocks + self.n_edmans
def channel_names(self):
return [
ch_name
for ch_name, _ in sorted(self.channels.items(), key=lambda item: item[1])
]
def ch_i_by_name(self):
return self.channels
@property
def n_channels(self):
# if self.is_photobleaching_run:
# return 1
return len(self.channels)
@property
def n_channels_and_cycles(self):
return self.n_channels, self.n_cycles
def _setup_dfs(self):
"""
Assemble all of the priors into several dataframes indexed differently.
(Call after validate)
* self.channel__priors:
ch_i,
ch_name,
bg_mu,
bg_sigma,
gain_mu,
gain_sigma,
row_k_sigma,
p_bleach
--> Note, does NOT have p_non_fluorescent because this is a dye property
* self.dye__label__priors:
aa,
label_name,
dye_name,
ch_i,
ch_name,
bg_mu,
bg_sigma,
gain_mu,
gain_sigma,
row_k_sigma,
p_bleach
p_non_fluorescent,
"""
# if self.is_photobleaching_run:
# # Not sure what these should be yet
# # self._ch_by_aa = {}
# # self._channel__priors = pd.DataFrame(columns=self.channel__priors__columns)
# # self._dye__label__priors = pd.DataFrame(columns=self.dye__label__priors__columns)
# self.dyes = [Munch(dye_name="zero", channel_name="zero")]
# self.channels = Munch(zero=0)
# self.labels = [
# dict(aa=".", dye_name="zero", label_name="zero", ptm_only=False)
# ]
labels_df = pd.DataFrame(self.labels)
# labels_df: (aa, dye_name, label_name, ptm_only)
# assert len(labels_df) > 0
dyes_df = pd.DataFrame(self.dyes)
# dyes_df: (dye_name, channel_name)
# assert len(dyes_df) > 0
# LOOKUP dye priors
dye_priors = []
for dye in self.dyes:
# SEARCH priors by dye name and if not found by channel
p_non_fluorescent = self.priors.get_exact(
f"p_non_fluorescent.{dye.dye_name}"
)
if p_non_fluorescent is None:
p_non_fluorescent = self.priors.get(
f"p_non_fluorescent.ch_{dye.channel_name}"
)
dye_priors += [
Munch(dye_name=dye.dye_name, p_non_fluorescent=p_non_fluorescent.prior,)
]
dye_priors_df = pd.DataFrame(dye_priors)
# dye_priors_df: (dye_name, p_non_fluorescent)
dyes_df = utils.easy_join(dyes_df, dye_priors_df, "dye_name")
# dyes_df: (dye_name, channel_name, p_non_fluorescent)
# TODO: LOOKUP label priors
# (p_failure_to_bind_aa, p_failure_to_attach_to_dye)
# LOOKUP channel priors
ch_priors = pd.DataFrame(
[
dict(
channel_name=channel_name,
ch_i=ch_i,
bg_mu=self.priors.get(f"bg_mu.ch_{ch_i}").prior,
bg_sigma=self.priors.get(f"bg_sigma.ch_{ch_i}").prior,
gain_mu=self.priors.get(f"gain_mu.ch_{ch_i}").prior,
gain_sigma=self.priors.get(f"gain_sigma.ch_{ch_i}").prior,
row_k_sigma=self.priors.get(f"row_k_sigma.ch_{ch_i}").prior,
p_bleach=self.priors.get(f"p_bleach.ch_{ch_i}").prior,
)
for channel_name, ch_i in self.channels.items()
]
)
# ch_priors: (channel_name, ch_i, ...)
self._channel__priors = (
utils.easy_join(dyes_df, ch_priors, "channel_name")
.drop(columns=["p_non_fluorescent"])
.drop_duplicates()
.reset_index()
)
# self._channel__priors: (
# 'ch_i', 'channel_name', 'bg_mu', 'bg_sigma', 'dye_name',
# 'gain_mu', 'gain_sigma', 'index', 'p_bleach', 'row_k_sigma',
# )
# SANITY check channel__priors
group_by_ch = self._channel__priors.groupby("ch_i")
for field in (
"bg_mu",
"bg_sigma",
"gain_mu",
"gain_sigma",
"row_k_sigma",
):
assert np.all(group_by_ch[field].nunique() == 1)
assert "p_non_fluorescent" not in self._channel__priors.columns
labels_dyes_df = utils.easy_join(labels_df, dyes_df, "dye_name")
self._dye__label__priors = utils.easy_join(
labels_dyes_df, ch_priors, "channel_name"
).reset_index(drop=True)
# self._dye__label__priors: (
# 'channel_name', 'dye_name', 'aa', 'label_name',
# 'ptm_only', 'p_non_fluorescent', 'ch_i', 'bg_mu', 'bg_sigma',
# 'gain_mu', 'gain_sigma', 'row_k_sigma', 'p_bleach'
# )
self._ch_by_aa = {
row.aa: row.ch_i for row in self._dye__label__priors.itertuples()
}
def ch_by_aa(self):
return self._ch_by_aa
def dye__label__priors(self):
"""
DataFrame(
'channel_name', 'dye_name', 'aa', 'label_name',
'ptm_only', 'p_non_fluorescent', 'ch_i', 'bg_mu', 'bg_sigma',
'gain_mu', 'gain_sigma', 'row_k_sigma', 'p_bleach'
)
"""
return self._dye__label__priors
def channel__priors(self):
"""
DataFrame(
'ch_i', 'channel_name', 'bg_mu', 'bg_sigma', 'dye_name',
'gain_mu', 'gain_sigma', 'index', 'p_bleach', 'row_k_sigma',
)
"""
return self._channel__priors
def by_channel(self):
return self._channel__priors.set_index("ch_i")
def to_label_list(self):
"""Summarize labels like: ["DE", "C"]"""
return [
"".join(
[label.aa for label in self.labels if label.dye_name == dye.dye_name]
)
for dye in self.dyes
]
def to_label_str(self):
"""Summarize labels like: DE,C"""
return ",".join(self.to_label_list())
def cycles_array(self):
cycles = np.zeros((self.n_cycles,), dtype=c_common_tools.CycleKindType)
i = 0
for _ in range(self.n_pres):
cycles[i] = c_common_tools.CYCLE_TYPE_PRE
i += 1
for _ in range(self.n_mocks):
cycles[i] = c_common_tools.CYCLE_TYPE_MOCK
i += 1
for _ in range(self.n_edmans):
cycles[i] = c_common_tools.CYCLE_TYPE_EDMAN
i += 1
return cycles
def pcbs(self, pep_seq_df):
"""
pcb stands for (p)ep_i, (c)hannel_i, (b)right_probability
This is a structure that is liek a "flu" but with an extra bright probability.
Each peptide has a row for each amino acid
That row has a columns (pep_i, ch_i, p_bright)
And it will have np.nan for ch_i and p_bright **IF THERE IS NO LABEL**
bright_probability is the inverse of all the ways a dye can fail to be visible
ie the probability that a dye is active.
pep_seq_df: Any DataFrame with an "aa" column
Returns:
contiguous ndarray(:, 3) where there 3 columns are:
pep_i, ch_i, p_bright
"""
labelled_pep_df = pep_seq_df.join(
self.dye__label__priors().set_index("aa"), on="aa", how="left"
)
# p_bright = is the product of (1.0 - ) all the ways the dye can fail to be visible.
labelled_pep_df["p_bright"] = (
# TODO: Sim needs to be converted to use priors sampling
# at which point this function needs to be refactored
# so that the parameters of the priors can be sampled in C.
1.0
- np.array(
[
i.sample() if isinstance(i, Prior) else np.nan
for i in labelled_pep_df.p_non_fluorescent
]
)
# TODO: Add label priors
# * (1.0 - labelled_pep_df.p_failure_to_attach_to_dye)
# * (1.0 - labelled_pep_df.p_failure_to_bind_aa)
)
labelled_pep_df.sort_values(by=["pep_i", "pep_offset_in_pro"], inplace=True)
return np.ascontiguousarray(
labelled_pep_df[["pep_i", "ch_i", "p_bright"]].values
)
@classmethod
def from_aa_list_fixture(cls, aa_list, priors=None, **kwargs):
"""
This is a helper to generate channel when you have a list of aas.
For example, two channels where ch0 is D&E and ch1 is Y.
["DE", "Y"].
"""
check.list_or_tuple_t(aa_list, str)
allowed_aa_mods = ["[", "]"]
assert all(
[
(aa.isalpha() or aa in allowed_aa_mods)
for aas in aa_list
for aa in list(aas)
]
)
dyes = [
Munch(dye_name=f"dye_{ch}", channel_name=f"ch_{ch}")
for ch, _ in enumerate(aa_list)
]
# Note the extra for loop because "DE" needs to be split into "D" & "E"
# which is done by aa_str_to_list() - which also handles PTMs like S[p]
labels = [
Munch(
aa=aa, dye_name=f"dye_{ch}", label_name=f"label_{ch}", ptm_only=False,
)
for ch, aas in enumerate(aa_list)
for aa in aa_str_to_list(aas)
]
return cls(dyes=dyes, labels=labels, priors=priors, **kwargs)
| [
"plaster.tools.schema.schema.Schema.is_bool",
"plaster.tools.schema.schema.Schema.is_str",
"plaster.tools.utils.utils.listi",
"numpy.ascontiguousarray",
"plaster.tools.schema.check.list_or_tuple_t",
"plaster.tools.utils.utils.easy_join",
"numpy.zeros",
"plaster.tools.aaseq.aaseq.aa_str_to_list",
"pl... | [((957, 1161), 'munch.Munch', 'Munch', ([], {'n_pres': '(1)', 'n_mocks': '(0)', 'n_edmans': '(1)', 'dyes': '[]', 'labels': '[]', 'allow_edman_cterm': '(False)', 'enable_ptm_labels': '(False)', 'use_lognormal_model': '(False)', 'is_survey': '(False)', 'n_samples_train': '(5000)', 'n_samples_test': '(1000)'}), '(n_pres=1, n_mocks=0, n_edmans=1, dyes=[], labels=[],\n allow_edman_cterm=False, enable_ptm_labels=False, use_lognormal_model=\n False, is_survey=False, n_samples_train=5000, n_samples_test=1000)\n', (962, 1161), False, 'from munch import Munch\n'), ((6000, 6025), 'pandas.DataFrame', 'pd.DataFrame', (['self.labels'], {}), '(self.labels)\n', (6012, 6025), True, 'import pandas as pd\n'), ((6139, 6162), 'pandas.DataFrame', 'pd.DataFrame', (['self.dyes'], {}), '(self.dyes)\n', (6151, 6162), True, 'import pandas as pd\n'), ((6846, 6870), 'pandas.DataFrame', 'pd.DataFrame', (['dye_priors'], {}), '(dye_priors)\n', (6858, 6870), True, 'import pandas as pd\n'), ((6945, 6996), 'plaster.tools.utils.utils.easy_join', 'utils.easy_join', (['dyes_df', 'dye_priors_df', '"""dye_name"""'], {}), "(dyes_df, dye_priors_df, 'dye_name')\n", (6960, 6996), False, 'from plaster.tools.utils import utils\n'), ((8771, 8818), 'plaster.tools.utils.utils.easy_join', 'utils.easy_join', (['labels_df', 'dyes_df', '"""dye_name"""'], {}), "(labels_df, dyes_df, 'dye_name')\n", (8786, 8818), False, 'from plaster.tools.utils import utils\n'), ((10477, 10539), 'numpy.zeros', 'np.zeros', (['(self.n_cycles,)'], {'dtype': 'c_common_tools.CycleKindType'}), '((self.n_cycles,), dtype=c_common_tools.CycleKindType)\n', (10485, 10539), True, 'import numpy as np\n'), ((12606, 12681), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (["labelled_pep_df[['pep_i', 'ch_i', 'p_bright']].values"], {}), "(labelled_pep_df[['pep_i', 'ch_i', 'p_bright']].values)\n", (12626, 12681), True, 'import numpy as np\n'), ((12982, 13017), 'plaster.tools.schema.check.list_or_tuple_t', 'check.list_or_tuple_t', (['aa_list', 'str'], {}), '(aa_list, str)\n', (13003, 13017), False, 'from plaster.tools.schema import check\n'), ((13271, 13323), 'munch.Munch', 'Munch', ([], {'dye_name': 'f"""dye_{ch}"""', 'channel_name': 'f"""ch_{ch}"""'}), "(dye_name=f'dye_{ch}', channel_name=f'ch_{ch}')\n", (13276, 13323), False, 'from munch import Munch\n'), ((13570, 13646), 'munch.Munch', 'Munch', ([], {'aa': 'aa', 'dye_name': 'f"""dye_{ch}"""', 'label_name': 'f"""label_{ch}"""', 'ptm_only': '(False)'}), "(aa=aa, dye_name=f'dye_{ch}', label_name=f'label_{ch}', ptm_only=False)\n", (13575, 13646), False, 'from munch import Munch\n'), ((1355, 1381), 'plaster.tools.schema.schema.Schema.is_int', 's.is_int', ([], {'bounds': '(0, None)'}), '(bounds=(0, None))\n', (1363, 1381), True, 'from plaster.tools.schema.schema import Schema as s\n'), ((1403, 1429), 'plaster.tools.schema.schema.Schema.is_int', 's.is_int', ([], {'bounds': '(0, None)'}), '(bounds=(0, None))\n', (1411, 1429), True, 'from plaster.tools.schema.schema import Schema as s\n'), ((1452, 1478), 'plaster.tools.schema.schema.Schema.is_int', 's.is_int', ([], {'bounds': '(0, None)'}), '(bounds=(0, None))\n', (1460, 1478), True, 'from plaster.tools.schema.schema import Schema as s\n'), ((1508, 1534), 'plaster.tools.schema.schema.Schema.is_int', 's.is_int', ([], {'bounds': '(1, None)'}), '(bounds=(1, None))\n', (1516, 1534), True, 'from plaster.tools.schema.schema import Schema as s\n'), ((1563, 1589), 'plaster.tools.schema.schema.Schema.is_int', 's.is_int', ([], {'bounds': '(1, None)'}), '(bounds=(1, None))\n', (1571, 1589), True, 'from plaster.tools.schema.schema import Schema as s\n'), ((2022, 2047), 'plaster.tools.schema.schema.Schema.is_dict', 's.is_dict', ([], {'required': '(False)'}), '(required=False)\n', (2031, 2047), True, 'from plaster.tools.schema.schema import Schema as s\n'), ((2079, 2119), 'plaster.tools.schema.schema.Schema.is_bool', 's.is_bool', ([], {'required': '(False)', 'noneable': '(True)'}), '(required=False, noneable=True)\n', (2088, 2119), True, 'from plaster.tools.schema.schema import Schema as s\n'), ((2151, 2191), 'plaster.tools.schema.schema.Schema.is_bool', 's.is_bool', ([], {'required': '(False)', 'noneable': '(True)'}), '(required=False, noneable=True)\n', (2160, 2191), True, 'from plaster.tools.schema.schema import Schema as s\n'), ((2225, 2236), 'plaster.tools.schema.schema.Schema.is_bool', 's.is_bool', ([], {}), '()\n', (2234, 2236), True, 'from plaster.tools.schema.schema import Schema as s\n'), ((2260, 2271), 'plaster.tools.schema.schema.Schema.is_bool', 's.is_bool', ([], {}), '()\n', (2269, 2271), True, 'from plaster.tools.schema.schema import Schema as s\n'), ((6734, 6805), 'munch.Munch', 'Munch', ([], {'dye_name': 'dye.dye_name', 'p_non_fluorescent': 'p_non_fluorescent.prior'}), '(dye_name=dye.dye_name, p_non_fluorescent=p_non_fluorescent.prior)\n', (6739, 6805), False, 'from munch import Munch\n'), ((8854, 8912), 'plaster.tools.utils.utils.easy_join', 'utils.easy_join', (['labels_dyes_df', 'ch_priors', '"""channel_name"""'], {}), "(labels_dyes_df, ch_priors, 'channel_name')\n", (8869, 8912), False, 'from plaster.tools.utils import utils\n'), ((13746, 13765), 'plaster.tools.aaseq.aaseq.aa_str_to_list', 'aa_str_to_list', (['aas'], {}), '(aas)\n', (13760, 13765), False, 'from plaster.tools.aaseq.aaseq import aa_str_to_list\n'), ((2811, 2841), 'plaster.tools.utils.utils.listi', 'utils.listi', (['self.labels', '"""aa"""'], {}), "(self.labels, 'aa')\n", (2822, 2841), False, 'from plaster.tools.utils import utils\n'), ((1661, 1671), 'plaster.tools.schema.schema.Schema.is_str', 's.is_str', ([], {}), '()\n', (1669, 1671), True, 'from plaster.tools.schema.schema import Schema as s\n'), ((1686, 1696), 'plaster.tools.schema.schema.Schema.is_str', 's.is_str', ([], {}), '()\n', (1694, 1696), True, 'from plaster.tools.schema.schema import Schema as s\n'), ((1801, 1811), 'plaster.tools.schema.schema.Schema.is_str', 's.is_str', ([], {}), '()\n', (1809, 1811), True, 'from plaster.tools.schema.schema import Schema as s\n'), ((1842, 1852), 'plaster.tools.schema.schema.Schema.is_str', 's.is_str', ([], {}), '()\n', (1850, 1852), True, 'from plaster.tools.schema.schema import Schema as s\n'), ((1885, 1895), 'plaster.tools.schema.schema.Schema.is_str', 's.is_str', ([], {}), '()\n', (1893, 1895), True, 'from plaster.tools.schema.schema import Schema as s\n'), ((1926, 1966), 'plaster.tools.schema.schema.Schema.is_bool', 's.is_bool', ([], {'required': '(False)', 'noneable': '(True)'}), '(required=False, noneable=True)\n', (1935, 1966), True, 'from plaster.tools.schema.schema import Schema as s\n'), ((7997, 8048), 'plaster.tools.utils.utils.easy_join', 'utils.easy_join', (['dyes_df', 'ch_priors', '"""channel_name"""'], {}), "(dyes_df, ch_priors, 'channel_name')\n", (8012, 8048), False, 'from plaster.tools.utils import utils\n')] |
import itertools as itt
import numpy as np
from pytriqs.gf import BlockGf, GfImFreq, inverse, GfImTime, make_zero_tail, replace_by_tail, fit_tail_on_window, fit_hermitian_tail_on_window
class MatsubaraGreensFunction(BlockGf):
"""
Greens functions interface to TRIQS. Provides convenient initialization.
gf_init creates a MGF with the same structure as gf_init, but no value initialization
__lshift__, __isub__, __iadd__ had to be extended in order to make them available to childs of
MatsubaraGreensFunction
"""
def __lshift__(self, x):
if isinstance(x, MatsubaraGreensFunction) or isinstance(x, BlockGf):
for i, g in self:
g.copy_from(x[i])
return self
else:
BlockGf.__lshift__(self, x)
def __isub__(self, x):
if isinstance(x, MatsubaraGreensFunction) or isinstance(x, BlockGf):
for (n, g) in self:
self[n] -= x[n]
else:
g = self.get_as_BlockGf()
g -= x
self << g
return self
def __iadd__(self, x):
if isinstance(x, MatsubaraGreensFunction) or isinstance(x, BlockGf):
for (n, g) in self:
self[n] += x[n]
else:
g = self.get_as_BlockGf()
g += x
self << g
return self
def copy(self):
#g = self.__class__(gf_init = self)
g = self.get_as_BlockGf()
g << self
return g
@property
def all_indices(self):
inds = list()
for bn, b in self:
for i, j in itt.product(b.indices[0], b.indices[0]):
inds.append((bn, int(i), int(j)))
return inds
def __init__(self, blocknames=None, blocksizes=None, beta=None, n_iw=1025, name='', gf_init=None, gf_struct=None, verbosity=0, **kwargs):
kwargskeys = [k for k in kwargs.keys()]
if type(gf_init) == BlockGf:
blocknames = [i for i in gf_init.indices]
blocksizes = [len([i for i in b.indices]) for bn, b in gf_init]
beta = gf_init.mesh.beta
n_iw = int(len(gf_init.mesh) * .5)
super(MatsubaraGreensFunction, self).__init__(name_block_generator=[(bn, GfImFreq(
beta=beta, n_points=n_iw, indices=range(bs))) for bn, bs in zip(blocknames, blocksizes)], name=name, make_copies=False)
elif isinstance(gf_init, MatsubaraGreensFunction):
assert isinstance(
gf_init, MatsubaraGreensFunction), "gf_init must be a Matsubara GreensFunction"
blocknames = gf_init.blocknames
blocksizes = gf_init.blocksizes
beta = gf_init.mesh.beta
n_iw = gf_init.n_iw
super(MatsubaraGreensFunction, self).__init__(name_block_generator=[(bn, GfImFreq(
beta=beta, n_points=n_iw, indices=range(bs))) for bn, bs in zip(blocknames, blocksizes)], name=name, make_copies=False)
elif 'name_block_generator' in kwargskeys: # TODO test
blocknames = [block[0]
for block in kwargs['name_block_generator']]
blocksizes = [
block[1].target_shape[0] for block in kwargs['name_block_generator']]
beta = kwargs['name_block_generator'][0][1].mesh.beta
n_iw = int(len(kwargs['name_block_generator'][0][1].mesh) * .5)
super(MatsubaraGreensFunction, self).__init__(**kwargs)
elif 'name_list' in kwargskeys: # TODO test
blocknames = kwargs['name_list']
blocksizes = [g.target_shape[0] for g in kwargs['block_list']]
beta = kwargs['block_list'][0].mesh.beta
n_iw = int(len(kwargs['block_list'][0].mesh) * .5)
super(MatsubaraGreensFunction, self).__init__(**kwargs)
elif gf_struct is not None:
assert type(
gf_struct) == list, "gf_struct must be of list-type here"
blocknames = [b[0] for b in gf_struct]
blocksizes = [len(b[1]) for b in gf_struct]
beta = beta
n_iw = n_iw
super(MatsubaraGreensFunction, self).__init__(name_block_generator=[(bn, GfImFreq(
beta=beta, n_points=n_iw, indices=range(bs))) for bn, bs in zip(blocknames, blocksizes)], name=name, make_copies=False)
else:
assert blocknames is not None and blocksizes is not None and beta is not None and n_iw is not None, "Missing parameter for initialization without gf_init and gf_struct"
assert len(blocknames) == len(
blocksizes), "Number of Block-names and blocks have to equal"
super(MatsubaraGreensFunction, self).__init__(name_block_generator=((bn, GfImFreq(
beta=beta, n_points=n_iw, indices=range(bs))) for bn, bs in zip(blocknames, blocksizes)), name=name, make_copies=False)
self.blocknames = blocknames
self.blocksizes = blocksizes
self.n_iw = n_iw
self.iw_offset = int(.5 * self.n_iw)
self.gf_struct = [(bn, range(bs))
for bn, bs in zip(blocknames, blocksizes)]
self._gf_lastloop = None
self.verbosity = verbosity
def prepare_mix(self):
self._gf_lastloop = self.copy()
def mix(self, coeff):
"""
mixes with the solution of the previous loop, coeff is the weight of the new state
"""
if not coeff is None:
self << coeff * self + (1 - coeff) * self._gf_lastloop
self._gf_lastloop << self
def symmetrize(self, block_symmetries):
"""
imposes symmetries, each sublist of block_symmetries represents a symmetry-class
"""
for symmetry in block_symmetries:
self._symmetrize_block(symmetry)
def _symmetrize_block(self, symmetry):
for s1, b1 in self:
for blocklabel_sym_part in symmetry:
if blocklabel_sym_part in s1:
sublabel = s1.replace(blocklabel_sym_part, "")
for s2, b2 in self:
symlabel_in_s2 = False
for sym in symmetry:
if sym in s2:
symlabel_in_s2 = True
if sublabel in s2 and s1 != s2 and symlabel_in_s2:
b1 << .5 * (b1 + b2)
b2 << b1
def get_as_BlockGf(self):
"""
returns object as BlockGf, e.g. for writing it into HDFArchives. That process is only
defined for the parent class BlockGf.
"""
g = BlockGf(name_block_generator=((bn, GfImFreq(beta=self.mesh.beta, n_points=self.n_iw, indices=range(
bs))) for bn, bs in zip(self.blocknames, self.blocksizes)), name=self.name, make_copies=False)
g << self
return g
def make_g_tau_real(self, n_tau):
"""
Transforms to tau space with n_tau meshsize, sets self accordingly
TODO tail
"""
self.fit_tail2()
inds_tau = range(n_tau)
g_tau = BlockGf(name_list=self.blocknames,
block_list=[GfImTime(beta=self.mesh.beta, indices=range(s),
n_points=n_tau) for s in self.blocksizes])
for bname, b in g_tau:
b.set_from_fourier(self[bname])
inds_block = range(len(b.data[0, :, :]))
for n, i, j in itt.product(inds_tau, inds_block, inds_block):
b.data[n, i, j] = b.data[n, i, j].real
self[bname].set_from_fourier(b)
def fit_tail2(self, known_moments=None, hermitian=True, fit_min_n=None, fit_max_n=None, fit_min_w=None, fit_max_w=None, fit_max_moment=None):
"""
(simplified) interface to TRIQS fit_ for convenience.
TRIQS fit_tail is also directly available
"""
if fit_min_w is not None:
fit_min_n = int(0.5*(fit_min_w*self.mesh.beta/np.pi - 1.0))
if fit_max_w is not None:
fit_max_n = int(0.5*(fit_max_w*self.mesh.beta/np.pi - 1.0))
if fit_min_n is None:
fit_min_n = int(0.8*len(self.mesh)/2)
if fit_max_n is None:
fit_max_n = int(len(self.mesh)/2)
if fit_max_moment is None:
fit_max_moment = 3
for bn, b in self:
if known_moments is None:
known_moments = make_zero_tail(b, 2)
known_moments[1] = np.eye(b.target_shape[0])
if hermitian:
tail, err = fit_hermitian_tail_on_window(
b, n_min=fit_min_n, n_max=fit_max_n, known_moments=known_moments,
n_tail_max=10 * len(b.mesh), expansion_order=fit_max_moment)
else:
tail, err = fit_tail_on_window(b, n_min=fit_min_n, n_max=fit_max_n, known_moments=known_moments,
n_tail_max=10 * len(b.mesh), expansion_order=fit_max_moment)
replace_by_tail(b, tail, n_min=fit_min_n)
def _to_blockmatrix(self, number):
bmat = dict()
for bname, bsize in zip(self.blocknames, self.blocksizes):
bmat[bname] = np.identity(bsize) * number
return bmat
def _quickplot(self, file_name, x_range=(0, 100)):
"""
for debugging
"""
from matplotlib import pyplot as plt
mesh = np.array([w.imag for w in self.mesh])
ia, ie = x_range[0] + self.n_iw, x_range[1] + self.n_iw
for s, b in self:
orbs = range(b.data.shape[1])
for i, j in itt.product(orbs, orbs):
plt.plot(mesh[ia:ie], b.data[ia:ie, i, j].imag)
plt.plot(mesh[ia:ie], b.data[ia:ie, i, j].real, ls='dashed')
plt.savefig(file_name)
plt.close()
def flip_spin(self, blocklabel):
up, dn = "up", "dn"
self._checkforspins()
if up in blocklabel:
splittedlabel = blocklabel.split(up)
new_label = splittedlabel[0] + dn + splittedlabel[1]
elif dn in blocklabel:
splittedlabel = blocklabel.split(dn)
new_label = splittedlabel[0] + up + splittedlabel[1]
assert isinstance(
new_label, str), "couldn't flip spin, spins must be labeled up/dn"
return new_label
def _checkforspins(self):
for name in self.blocknames:
assert (len(name.split("up")) == 2) ^ (len(name.split("dn")) ==
2), "the strings up and dn must occur exactly once in blocknames"
| [
"numpy.identity",
"numpy.eye",
"matplotlib.pyplot.savefig",
"itertools.product",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"numpy.array",
"pytriqs.gf.BlockGf.__lshift__",
"pytriqs.gf.make_zero_tail",
"pytriqs.gf.replace_by_tail"
] | [((9404, 9441), 'numpy.array', 'np.array', (['[w.imag for w in self.mesh]'], {}), '([w.imag for w in self.mesh])\n', (9412, 9441), True, 'import numpy as np\n'), ((9772, 9794), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file_name'], {}), '(file_name)\n', (9783, 9794), True, 'from matplotlib import pyplot as plt\n'), ((9803, 9814), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9812, 9814), True, 'from matplotlib import pyplot as plt\n'), ((760, 787), 'pytriqs.gf.BlockGf.__lshift__', 'BlockGf.__lshift__', (['self', 'x'], {}), '(self, x)\n', (778, 787), False, 'from pytriqs.gf import BlockGf, GfImFreq, inverse, GfImTime, make_zero_tail, replace_by_tail, fit_tail_on_window, fit_hermitian_tail_on_window\n'), ((1601, 1640), 'itertools.product', 'itt.product', (['b.indices[0]', 'b.indices[0]'], {}), '(b.indices[0], b.indices[0])\n', (1612, 1640), True, 'import itertools as itt\n'), ((7453, 7498), 'itertools.product', 'itt.product', (['inds_tau', 'inds_block', 'inds_block'], {}), '(inds_tau, inds_block, inds_block)\n', (7464, 7498), True, 'import itertools as itt\n'), ((8997, 9038), 'pytriqs.gf.replace_by_tail', 'replace_by_tail', (['b', 'tail'], {'n_min': 'fit_min_n'}), '(b, tail, n_min=fit_min_n)\n', (9012, 9038), False, 'from pytriqs.gf import BlockGf, GfImFreq, inverse, GfImTime, make_zero_tail, replace_by_tail, fit_tail_on_window, fit_hermitian_tail_on_window\n'), ((9598, 9621), 'itertools.product', 'itt.product', (['orbs', 'orbs'], {}), '(orbs, orbs)\n', (9609, 9621), True, 'import itertools as itt\n'), ((8413, 8433), 'pytriqs.gf.make_zero_tail', 'make_zero_tail', (['b', '(2)'], {}), '(b, 2)\n', (8427, 8433), False, 'from pytriqs.gf import BlockGf, GfImFreq, inverse, GfImTime, make_zero_tail, replace_by_tail, fit_tail_on_window, fit_hermitian_tail_on_window\n'), ((8469, 8494), 'numpy.eye', 'np.eye', (['b.target_shape[0]'], {}), '(b.target_shape[0])\n', (8475, 8494), True, 'import numpy as np\n'), ((9194, 9212), 'numpy.identity', 'np.identity', (['bsize'], {}), '(bsize)\n', (9205, 9212), True, 'import numpy as np\n'), ((9639, 9686), 'matplotlib.pyplot.plot', 'plt.plot', (['mesh[ia:ie]', 'b.data[ia:ie, i, j].imag'], {}), '(mesh[ia:ie], b.data[ia:ie, i, j].imag)\n', (9647, 9686), True, 'from matplotlib import pyplot as plt\n'), ((9703, 9763), 'matplotlib.pyplot.plot', 'plt.plot', (['mesh[ia:ie]', 'b.data[ia:ie, i, j].real'], {'ls': '"""dashed"""'}), "(mesh[ia:ie], b.data[ia:ie, i, j].real, ls='dashed')\n", (9711, 9763), True, 'from matplotlib import pyplot as plt\n')] |
try:
from api.users import credentials
from api.trusted_curator import TrustedCurator
from api.policy import Policy
from api.models import DNN_CV, OurDataset, methodology2
except:
from project2.api.users import credentials
from project2.api.trusted_curator import TrustedCurator
from project2.api.policy import Policy
from project2.api.models import DNN_CV, OurDataset, methodology2
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# call trusted curator.
tc = TrustedCurator(
user='master',
password='<PASSWORD>',
mode='off',
)
# call policy.
pl = Policy(
n_actions=3,
action_set=['Vaccine1', 'Vaccine2', 'Vaccine3'],
)
for _ in range(10):
X = tc.get_features(
n_population=10000
)
A = pl.get_actions(features=X)
Y = tc.get_outcomes(
individuals_idxs=A.index.to_list(),
actions=A,
)
pl.observe(
actions=A,
outcomes=Y
)
plt.plot(
[i for i in range(2, pl.vaccination_stage+1)],
pl.ML_expected_utilities,
color='green',
marker='o',
linestyle='dashed',
linewidth=2,
markersize=5,
label="ML Policy"
)
plt.plot(
[i for i in range(1, pl.vaccination_stage+1)],
[np.mean(pl.random_expected_utilities) for _ in range(10)],
color='red',
marker='o',
linestyle='dashed',
linewidth=2,
markersize=5,
label="Mean for Random Policy"
)
plt.plot(
[i for i in range(1, pl.vaccination_stage+1)],
pl.observed_expected_utilities,
color='orange',
marker='o',
linestyle='dashed',
linewidth=2,
markersize=5,
label="Observed Deaths"
)
plt.title('Expected utilities for ML and Random vaccination policies')
plt.xlabel('Vaccination stages')
plt.ylabel('Estimation for the number of deaths')
plt.legend()
plt.show()
| [
"numpy.mean",
"project2.api.policy.Policy",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"project2.api.trusted_curator.TrustedCurator",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((539, 603), 'project2.api.trusted_curator.TrustedCurator', 'TrustedCurator', ([], {'user': '"""master"""', 'password': '"""<PASSWORD>"""', 'mode': '"""off"""'}), "(user='master', password='<PASSWORD>', mode='off')\n", (553, 603), False, 'from project2.api.trusted_curator import TrustedCurator\n'), ((640, 708), 'project2.api.policy.Policy', 'Policy', ([], {'n_actions': '(3)', 'action_set': "['Vaccine1', 'Vaccine2', 'Vaccine3']"}), "(n_actions=3, action_set=['Vaccine1', 'Vaccine2', 'Vaccine3'])\n", (646, 708), False, 'from project2.api.policy import Policy\n'), ((1677, 1747), 'matplotlib.pyplot.title', 'plt.title', (['"""Expected utilities for ML and Random vaccination policies"""'], {}), "('Expected utilities for ML and Random vaccination policies')\n", (1686, 1747), True, 'import matplotlib.pyplot as plt\n'), ((1748, 1780), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Vaccination stages"""'], {}), "('Vaccination stages')\n", (1758, 1780), True, 'import matplotlib.pyplot as plt\n'), ((1781, 1830), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Estimation for the number of deaths"""'], {}), "('Estimation for the number of deaths')\n", (1791, 1830), True, 'import matplotlib.pyplot as plt\n'), ((1831, 1843), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1841, 1843), True, 'import matplotlib.pyplot as plt\n'), ((1844, 1854), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1852, 1854), True, 'import matplotlib.pyplot as plt\n'), ((1267, 1304), 'numpy.mean', 'np.mean', (['pl.random_expected_utilities'], {}), '(pl.random_expected_utilities)\n', (1274, 1304), True, 'import numpy as np\n')] |
# Standard library
import atexit
import os
import socket
import sys
import time
# Third-party
# Third-party
import theano
theano.config.optimizer = 'None'
theano.config.mode = 'FAST_COMPILE'
theano.config.reoptimize_unpickled_function = False
theano.config.cxx = ""
from astropy.table import QTable
import h5py
import numpy as np
from thejoker.logging import logger as joker_logger
import thejoker as tj
from thejoker.multiproc_helpers import batch_tasks
from thejoker.utils import read_batch
# Project
from hq.log import logger
from hq.config import Config
from hq.samples_analysis import extract_MAP_sample
from run_apogee import callback, tmpdir_combine
def worker(task):
apogee_ids, worker_id, c, results_path, prior, tmpdir, global_rnd = task
# This worker's results:
results_filename = os.path.join(tmpdir, f'worker-{worker_id}.hdf5')
metadata = QTable.read(c.metadata_file)
rnd = global_rnd.seed(worker_id)
logger.log(1, f"Worker {worker_id}: Creating TheJoker instance with {rnd}")
prior = c.get_prior()
joker = tj.TheJoker(prior, random_state=rnd)
logger.debug(f"Worker {worker_id} on node {socket.gethostname()}: "
f"{len(apogee_ids)} stars left to process")
# Initialize to get packed column order:
logger.log(1,
f"Worker {worker_id}: Loading prior samples from cache "
f"{c.prior_cache_file}")
with h5py.File(c.tasks_file, 'r') as tasks_f:
data = tj.RVData.from_timeseries(tasks_f[apogee_ids[0]])
joker_helper = joker._make_joker_helper(data)
_slice = slice(0, c.max_prior_samples, 1)
batch = read_batch(c.prior_cache_file, joker_helper.packed_order,
slice_or_idx=_slice,
units=joker_helper.internal_units)
ln_prior = read_batch(c.prior_cache_file, ['ln_prior'], _slice)[:, 0]
logger.log(1, f"Worker {worker_id}: Loaded {len(batch)} prior samples")
for apogee_id in apogee_ids:
if apogee_id not in metadata['APOGEE_ID']:
logger.debug(f"{apogee_id} not found in metadata file!")
continue
with h5py.File(c.tasks_file, 'r') as tasks_f:
data = tj.RVData.from_timeseries(tasks_f[apogee_id])
# Subtract out MAP sample, run on residual:
metadata_row = metadata[metadata['APOGEE_ID'] == apogee_id]
MAP_sample = extract_MAP_sample(metadata_row)
orbit = MAP_sample.get_orbit(0)
new_rv = data.rv - orbit.radial_velocity(data.t)
data = tj.RVData(t=data.t,
rv=new_rv,
rv_err=data.rv_err)
logger.debug(f"Worker {worker_id}: Running {apogee_id} "
f"({len(data)} visits)")
t0 = time.time()
try:
samples = joker.iterative_rejection_sample(
data=data, n_requested_samples=c.requested_samples_per_star,
prior_samples=batch,
init_batch_size=250_000,
growth_factor=32,
randomize_prior_order=c.randomize_prior_order,
return_logprobs=ln_prior, in_memory=True)
except Exception as e:
logger.warning(f"\t Failed sampling for star {apogee_id} "
f"\n Error: {e}")
continue
dt = time.time() - t0
logger.debug(f"Worker {worker_id}: {apogee_id} ({len(data)} visits): "
f"done sampling - {len(samples)} raw samples returned "
f"({dt:.2f} seconds)")
# Ensure only positive K values
samples.wrap_K()
with h5py.File(results_filename, 'a') as results_f:
if apogee_id in results_f:
del results_f[apogee_id]
g = results_f.create_group(apogee_id)
samples.write(g)
result = {'tmp_filename': results_filename,
'joker_results_file': results_path,
'hostname': socket.gethostname(),
'worker_id': worker_id}
return result
def main(run_name, pool, overwrite=False, seed=None):
c = Config.from_run_name(run_name)
# Get paths to files needed to run
results_path = os.path.join(c.run_path, 'thejoker-control.hdf5')
# Make directory for temp. files, one per worker:
tmpdir = os.path.join(c.run_path, 'null-control')
if os.path.exists(tmpdir):
logger.warning(f"Stale temp. file directory found at {tmpdir}: "
"combining files first...")
tmpdir_combine(tmpdir, results_path)
# ensure the results file exists
logger.debug("Loading past results...")
with h5py.File(results_path, 'a') as f:
done_apogee_ids = list(f.keys())
if overwrite:
done_apogee_ids = list()
# Get data files out of config file:
logger.debug("Loading data...")
allstar, _ = c.load_alldata()
allstar = allstar[~np.isin(allstar['APOGEE_ID'], done_apogee_ids)]
# Create TheJoker sampler instance with the specified random seed and pool
rnd = np.random.RandomState(seed=seed)
logger.debug(f"Processing pool has size = {pool.size}")
apogee_ids = np.unique(allstar['APOGEE_ID'])
if done_apogee_ids:
logger.info(f"{len(done_apogee_ids)} already completed - "
f"{len(apogee_ids)} left to process")
# Load the prior:
logger.debug("Creating JokerPrior instance...")
prior = c.get_prior()
os.makedirs(tmpdir)
atexit.register(tmpdir_combine, tmpdir, results_path)
logger.debug("Preparing tasks...")
if len(apogee_ids) > 10 * pool.size:
n_tasks = min(16 * pool.size, len(apogee_ids))
else:
n_tasks = pool.size
tasks = batch_tasks(len(apogee_ids), n_tasks, arr=apogee_ids,
args=(c, results_path, prior, tmpdir, rnd))
logger.info(f'Done preparing tasks: split into {len(tasks)} task chunks')
for r in pool.map(worker, tasks, callback=callback):
pass
if __name__ == '__main__':
from threadpoolctl import threadpool_limits
from hq.script_helpers import get_parser
# Define parser object
parser = get_parser(description='Run The Joker on APOGEE data',
loggers=[logger, joker_logger])
parser.add_argument("-s", "--seed", dest="seed", default=None, type=int,
help="Random number seed")
args = parser.parse_args()
seed = args.seed
if seed is None:
seed = np.random.randint(2**32 - 1)
logger.log(1, f"No random number seed specified, so using seed: {seed}")
with threadpool_limits(limits=1, user_api='blas'):
with args.Pool(**args.Pool_kwargs) as pool:
main(run_name=args.run_name, pool=pool, overwrite=args.overwrite,
seed=args.seed)
sys.exit(0)
| [
"hq.log.logger.warning",
"numpy.isin",
"sys.exit",
"numpy.random.RandomState",
"os.path.exists",
"hq.samples_analysis.extract_MAP_sample",
"hq.config.Config.from_run_name",
"socket.gethostname",
"atexit.register",
"hq.log.logger.log",
"run_apogee.tmpdir_combine",
"h5py.File",
"hq.log.logger.... | [((810, 858), 'os.path.join', 'os.path.join', (['tmpdir', 'f"""worker-{worker_id}.hdf5"""'], {}), "(tmpdir, f'worker-{worker_id}.hdf5')\n", (822, 858), False, 'import os\n'), ((874, 902), 'astropy.table.QTable.read', 'QTable.read', (['c.metadata_file'], {}), '(c.metadata_file)\n', (885, 902), False, 'from astropy.table import QTable\n'), ((945, 1020), 'hq.log.logger.log', 'logger.log', (['(1)', 'f"""Worker {worker_id}: Creating TheJoker instance with {rnd}"""'], {}), "(1, f'Worker {worker_id}: Creating TheJoker instance with {rnd}')\n", (955, 1020), False, 'from hq.log import logger\n'), ((1059, 1095), 'thejoker.TheJoker', 'tj.TheJoker', (['prior'], {'random_state': 'rnd'}), '(prior, random_state=rnd)\n', (1070, 1095), True, 'import thejoker as tj\n'), ((1279, 1379), 'hq.log.logger.log', 'logger.log', (['(1)', 'f"""Worker {worker_id}: Loading prior samples from cache {c.prior_cache_file}"""'], {}), "(1,\n f'Worker {worker_id}: Loading prior samples from cache {c.prior_cache_file}'\n )\n", (1289, 1379), False, 'from hq.log import logger\n'), ((1628, 1746), 'thejoker.utils.read_batch', 'read_batch', (['c.prior_cache_file', 'joker_helper.packed_order'], {'slice_or_idx': '_slice', 'units': 'joker_helper.internal_units'}), '(c.prior_cache_file, joker_helper.packed_order, slice_or_idx=\n _slice, units=joker_helper.internal_units)\n', (1638, 1746), False, 'from thejoker.utils import read_batch\n'), ((4089, 4119), 'hq.config.Config.from_run_name', 'Config.from_run_name', (['run_name'], {}), '(run_name)\n', (4109, 4119), False, 'from hq.config import Config\n'), ((4179, 4228), 'os.path.join', 'os.path.join', (['c.run_path', '"""thejoker-control.hdf5"""'], {}), "(c.run_path, 'thejoker-control.hdf5')\n", (4191, 4228), False, 'import os\n'), ((4297, 4337), 'os.path.join', 'os.path.join', (['c.run_path', '"""null-control"""'], {}), "(c.run_path, 'null-control')\n", (4309, 4337), False, 'import os\n'), ((4345, 4367), 'os.path.exists', 'os.path.exists', (['tmpdir'], {}), '(tmpdir)\n', (4359, 4367), False, 'import os\n'), ((4580, 4619), 'hq.log.logger.debug', 'logger.debug', (['"""Loading past results..."""'], {}), "('Loading past results...')\n", (4592, 4619), False, 'from hq.log import logger\n'), ((4802, 4833), 'hq.log.logger.debug', 'logger.debug', (['"""Loading data..."""'], {}), "('Loading data...')\n", (4814, 4833), False, 'from hq.log import logger\n'), ((5029, 5061), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'seed'}), '(seed=seed)\n', (5050, 5061), True, 'import numpy as np\n'), ((5066, 5121), 'hq.log.logger.debug', 'logger.debug', (['f"""Processing pool has size = {pool.size}"""'], {}), "(f'Processing pool has size = {pool.size}')\n", (5078, 5121), False, 'from hq.log import logger\n'), ((5140, 5171), 'numpy.unique', 'np.unique', (["allstar['APOGEE_ID']"], {}), "(allstar['APOGEE_ID'])\n", (5149, 5171), True, 'import numpy as np\n'), ((5349, 5396), 'hq.log.logger.debug', 'logger.debug', (['"""Creating JokerPrior instance..."""'], {}), "('Creating JokerPrior instance...')\n", (5361, 5396), False, 'from hq.log import logger\n'), ((5428, 5447), 'os.makedirs', 'os.makedirs', (['tmpdir'], {}), '(tmpdir)\n', (5439, 5447), False, 'import os\n'), ((5452, 5505), 'atexit.register', 'atexit.register', (['tmpdir_combine', 'tmpdir', 'results_path'], {}), '(tmpdir_combine, tmpdir, results_path)\n', (5467, 5505), False, 'import atexit\n'), ((5511, 5545), 'hq.log.logger.debug', 'logger.debug', (['"""Preparing tasks..."""'], {}), "('Preparing tasks...')\n", (5523, 5545), False, 'from hq.log import logger\n'), ((6126, 6216), 'hq.script_helpers.get_parser', 'get_parser', ([], {'description': '"""Run The Joker on APOGEE data"""', 'loggers': '[logger, joker_logger]'}), "(description='Run The Joker on APOGEE data', loggers=[logger,\n joker_logger])\n", (6136, 6216), False, 'from hq.script_helpers import get_parser\n'), ((6790, 6801), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (6798, 6801), False, 'import sys\n'), ((1414, 1442), 'h5py.File', 'h5py.File', (['c.tasks_file', '"""r"""'], {}), "(c.tasks_file, 'r')\n", (1423, 1442), False, 'import h5py\n'), ((1470, 1519), 'thejoker.RVData.from_timeseries', 'tj.RVData.from_timeseries', (['tasks_f[apogee_ids[0]]'], {}), '(tasks_f[apogee_ids[0]])\n', (1495, 1519), True, 'import thejoker as tj\n'), ((1803, 1855), 'thejoker.utils.read_batch', 'read_batch', (['c.prior_cache_file', "['ln_prior']", '_slice'], {}), "(c.prior_cache_file, ['ln_prior'], _slice)\n", (1813, 1855), False, 'from thejoker.utils import read_batch\n'), ((2375, 2407), 'hq.samples_analysis.extract_MAP_sample', 'extract_MAP_sample', (['metadata_row'], {}), '(metadata_row)\n', (2393, 2407), False, 'from hq.samples_analysis import extract_MAP_sample\n'), ((2520, 2570), 'thejoker.RVData', 'tj.RVData', ([], {'t': 'data.t', 'rv': 'new_rv', 'rv_err': 'data.rv_err'}), '(t=data.t, rv=new_rv, rv_err=data.rv_err)\n', (2529, 2570), True, 'import thejoker as tj\n'), ((2746, 2757), 'time.time', 'time.time', ([], {}), '()\n', (2755, 2757), False, 'import time\n'), ((3947, 3967), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (3965, 3967), False, 'import socket\n'), ((4377, 4471), 'hq.log.logger.warning', 'logger.warning', (['f"""Stale temp. file directory found at {tmpdir}: combining files first..."""'], {}), "(\n f'Stale temp. file directory found at {tmpdir}: combining files first...')\n", (4391, 4471), False, 'from hq.log import logger\n'), ((4501, 4537), 'run_apogee.tmpdir_combine', 'tmpdir_combine', (['tmpdir', 'results_path'], {}), '(tmpdir, results_path)\n', (4515, 4537), False, 'from run_apogee import callback, tmpdir_combine\n'), ((4629, 4657), 'h5py.File', 'h5py.File', (['results_path', '"""a"""'], {}), "(results_path, 'a')\n", (4638, 4657), False, 'import h5py\n'), ((6456, 6486), 'numpy.random.randint', 'np.random.randint', (['(2 ** 32 - 1)'], {}), '(2 ** 32 - 1)\n', (6473, 6486), True, 'import numpy as np\n'), ((6493, 6565), 'hq.log.logger.log', 'logger.log', (['(1)', 'f"""No random number seed specified, so using seed: {seed}"""'], {}), "(1, f'No random number seed specified, so using seed: {seed}')\n", (6503, 6565), False, 'from hq.log import logger\n'), ((6576, 6620), 'threadpoolctl.threadpool_limits', 'threadpool_limits', ([], {'limits': '(1)', 'user_api': '"""blas"""'}), "(limits=1, user_api='blas')\n", (6593, 6620), False, 'from threadpoolctl import threadpool_limits\n'), ((2035, 2091), 'hq.log.logger.debug', 'logger.debug', (['f"""{apogee_id} not found in metadata file!"""'], {}), "(f'{apogee_id} not found in metadata file!')\n", (2047, 2091), False, 'from hq.log import logger\n'), ((2127, 2155), 'h5py.File', 'h5py.File', (['c.tasks_file', '"""r"""'], {}), "(c.tasks_file, 'r')\n", (2136, 2155), False, 'import h5py\n'), ((2187, 2232), 'thejoker.RVData.from_timeseries', 'tj.RVData.from_timeseries', (['tasks_f[apogee_id]'], {}), '(tasks_f[apogee_id])\n', (2212, 2232), True, 'import thejoker as tj\n'), ((3319, 3330), 'time.time', 'time.time', ([], {}), '()\n', (3328, 3330), False, 'import time\n'), ((3616, 3648), 'h5py.File', 'h5py.File', (['results_filename', '"""a"""'], {}), "(results_filename, 'a')\n", (3625, 3648), False, 'import h5py\n'), ((4891, 4937), 'numpy.isin', 'np.isin', (["allstar['APOGEE_ID']", 'done_apogee_ids'], {}), "(allstar['APOGEE_ID'], done_apogee_ids)\n", (4898, 4937), True, 'import numpy as np\n'), ((1143, 1163), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (1161, 1163), False, 'import socket\n'), ((3180, 3254), 'hq.log.logger.warning', 'logger.warning', (['f"""\t Failed sampling for star {apogee_id} \n Error: {e}"""'], {}), '(f"""\t Failed sampling for star {apogee_id} \n Error: {e}""")\n', (3194, 3254), False, 'from hq.log import logger\n')] |
import unittest
import numpy as np
import torch
from torch.autograd import Variable
import torch.nn
from pyoptmat import ode, models, flowrules, hardening, utility, damage
from pyoptmat.temperature import ConstantParameter as CP
torch.set_default_tensor_type(torch.DoubleTensor)
torch.autograd.set_detect_anomaly(True)
def differ(mfn, p0, eps=1.0e-6):
v0 = mfn(p0).numpy()
puse = p0.numpy()
result = np.zeros(puse.shape)
for ind, val in np.ndenumerate(puse):
dp = np.abs(val) * eps
if dp < eps:
dp = eps
pcurr = np.copy(puse)
pcurr[ind] += dp
v1 = mfn(torch.tensor(pcurr)).numpy()
result[ind] = (v1 - v0) / dp
return result
def simple_diff(fn, p0):
res = []
for i in range(len(p0)):
def mfn(pi):
ps = [pp for pp in p0]
ps[i] = pi
return fn(ps)
res.append(differ(mfn, p0[i]))
return res
class CommonGradient:
def test_gradient_strain(self):
bmodel = self.model_fn([Variable(pi, requires_grad=True) for pi in self.p])
res = torch.norm(
bmodel.solve_strain(self.times, self.strains, self.temperatures)
)
res.backward()
grad = self.extract_grad(bmodel)
ngrad = simple_diff(
lambda p: torch.norm(
self.model_fn(p).solve_strain(
self.times, self.strains, self.temperatures
)
),
self.p,
)
for i, (p1, p2) in enumerate(zip(grad, ngrad)):
print(i, p1, p2)
self.assertTrue(np.allclose(p1, p2, rtol=1e-4))
def test_gradient_stress(self):
bmodel = self.model_fn([Variable(pi, requires_grad=True) for pi in self.p])
res = torch.norm(
bmodel.solve_stress(self.times, self.stresses, self.temperatures)
)
res.backward()
grad = self.extract_grad(bmodel)
ngrad = simple_diff(
lambda p: torch.norm(
self.model_fn(p).solve_stress(
self.times, self.stresses, self.temperatures
)
),
self.p,
)
# Skipping the first step helps with noise issues
for i, (p1, p2) in enumerate(zip(grad[1:], ngrad[1:])):
print(i, p1, p2)
self.assertTrue(np.allclose(p1, p2, rtol=1e-4, atol=1e-7))
class TestPerfectViscoplasticity(unittest.TestCase, CommonGradient):
def setUp(self):
self.ntime = 10
self.nbatch = 10
self.E = torch.tensor(100000.0)
self.n = torch.tensor(5.2)
self.eta = torch.tensor(110.0)
self.p = [self.E, self.n, self.eta]
self.model_fn = lambda p: models.ModelIntegrator(
models.InelasticModel(
CP(p[0]), flowrules.PerfectViscoplasticity(CP(p[1]), CP(p[2]))
),
use_adjoint=False,
)
self.extract_grad = lambda m: [
m.model.E.pvalue.grad.numpy(),
m.model.flowrule.n.pvalue.grad.numpy(),
m.model.flowrule.eta.pvalue.grad.numpy(),
]
self.times = torch.transpose(
torch.tensor(
np.array([np.linspace(0, 1, self.ntime) for i in range(self.nbatch)])
),
1,
0,
)
self.strains = torch.transpose(
torch.tensor(
np.array(
[np.linspace(0, 0.003, self.ntime) for i in range(self.nbatch)]
)
),
1,
0,
)
self.stresses = torch.transpose(
torch.tensor(
np.array(
[np.linspace(0, 100.0, self.ntime) for i in range(self.nbatch)]
)
),
1,
0,
)
self.temperatures = torch.zeros_like(self.strains)
class TestIsotropicOnly(unittest.TestCase, CommonGradient):
def setUp(self):
self.ntime = 10
self.nbatch = 10
self.E = torch.tensor(100000.0)
self.n = torch.tensor(5.2)
self.eta = torch.tensor(110.0)
self.R = torch.tensor(100.0)
self.d = torch.tensor(5.1)
self.s0 = torch.tensor(10.0)
self.p = [self.E, self.n, self.eta, self.s0, self.R, self.d]
self.model_fn = lambda p: models.ModelIntegrator(
models.InelasticModel(
CP(p[0]),
flowrules.IsoKinViscoplasticity(
CP(p[1]),
CP(p[2]),
CP(p[3]),
hardening.VoceIsotropicHardeningModel(CP(p[4]), CP(p[5])),
hardening.NoKinematicHardeningModel(),
),
),
use_adjoint=False,
)
self.extract_grad = lambda m: [
m.model.E.pvalue.grad.numpy(),
m.model.flowrule.n.pvalue.grad.numpy(),
m.model.flowrule.eta.pvalue.grad.numpy(),
m.model.flowrule.s0.pvalue.grad.numpy(),
m.model.flowrule.isotropic.R.pvalue.grad.numpy(),
m.model.flowrule.isotropic.d.pvalue.grad.numpy(),
]
self.times = torch.transpose(
torch.tensor(
np.array([np.linspace(0, 1, self.ntime) for i in range(self.nbatch)])
),
1,
0,
)
self.strains = torch.transpose(
torch.tensor(
np.array(
[np.linspace(0, 0.003, self.ntime) for i in range(self.nbatch)]
)
),
1,
0,
)
self.stresses = torch.transpose(
torch.tensor(
np.array(
[np.linspace(0, 200.0, self.ntime) for i in range(self.nbatch)]
)
),
1,
0,
)
self.temperatures = torch.zeros_like(self.strains)
class TestHardeningViscoplasticity(unittest.TestCase, CommonGradient):
def setUp(self):
self.ntime = 10
self.nbatch = 10
self.E = torch.tensor(100000.0)
self.n = torch.tensor(5.2)
self.eta = torch.tensor(110.0)
self.R = torch.tensor(100.0)
self.d = torch.tensor(5.1)
self.C = torch.tensor(1000.0)
self.g = torch.tensor(10.0)
self.s0 = torch.tensor(10.0)
self.p = [self.E, self.n, self.eta, self.s0, self.R, self.d, self.C, self.g]
self.model_fn = lambda p: models.ModelIntegrator(
models.InelasticModel(
CP(p[0]),
flowrules.IsoKinViscoplasticity(
CP(p[1]),
CP(p[2]),
CP(p[3]),
hardening.VoceIsotropicHardeningModel(CP(p[4]), CP(p[5])),
hardening.FAKinematicHardeningModel(CP(p[6]), CP(p[7])),
),
),
use_adjoint=False,
)
self.extract_grad = lambda m: [
m.model.E.pvalue.grad.numpy(),
m.model.flowrule.n.pvalue.grad.numpy(),
m.model.flowrule.eta.pvalue.grad.numpy(),
m.model.flowrule.s0.pvalue.grad.numpy(),
m.model.flowrule.isotropic.R.pvalue.grad.numpy(),
m.model.flowrule.isotropic.d.pvalue.grad.numpy(),
m.model.flowrule.kinematic.C.pvalue.grad.numpy(),
m.model.flowrule.kinematic.g.pvalue.grad.numpy(),
]
self.times = torch.transpose(
torch.tensor(
np.array([np.linspace(0, 1, self.ntime) for i in range(self.nbatch)])
),
1,
0,
)
self.strains = torch.transpose(
torch.tensor(
np.array(
[np.linspace(0, 0.003, self.ntime) for i in range(self.nbatch)]
)
),
1,
0,
)
self.stresses = torch.transpose(
torch.tensor(
np.array(
[np.linspace(0, 200.0, self.ntime) for i in range(self.nbatch)]
)
),
1,
0,
)
self.temperatures = torch.zeros_like(self.strains)
class TestHardeningViscoplasticityDamage(unittest.TestCase, CommonGradient):
def setUp(self):
self.ntime = 10
self.nbatch = 10
self.E = torch.tensor(100000.0)
self.n = torch.tensor(5.2)
self.eta = torch.tensor(110.0)
self.R = torch.tensor(100.0)
self.d = torch.tensor(5.1)
self.C = torch.tensor(1000.0)
self.g = torch.tensor(10.0)
self.s0 = torch.tensor(10.0)
self.A = torch.tensor(2000.0)
self.xi = torch.tensor(6.5)
self.phi = torch.tensor(1.7)
self.p = [
self.E,
self.n,
self.eta,
self.s0,
self.R,
self.d,
self.C,
self.g,
self.A,
self.xi,
self.phi,
]
self.model_fn = lambda p: models.ModelIntegrator(
models.InelasticModel(
CP(p[0]),
flowrules.IsoKinViscoplasticity(
CP(p[1]),
CP(p[2]),
CP(p[3]),
hardening.VoceIsotropicHardeningModel(CP(p[4]), CP(p[5])),
hardening.FAKinematicHardeningModel(CP(p[6]), CP(p[7])),
),
dmodel=damage.HayhurstLeckie(CP(p[8]), CP(p[9]), CP(p[10])),
),
use_adjoint=False,
)
self.extract_grad = lambda m: [
m.model.E.pvalue.grad.numpy(),
m.model.flowrule.n.pvalue.grad.numpy(),
m.model.flowrule.eta.pvalue.grad.numpy(),
m.model.flowrule.s0.pvalue.grad.numpy(),
m.model.flowrule.isotropic.R.pvalue.grad.numpy(),
m.model.flowrule.isotropic.d.pvalue.grad.numpy(),
m.model.flowrule.kinematic.C.pvalue.grad.numpy(),
m.model.flowrule.kinematic.g.pvalue.grad.numpy(),
m.model.dmodel.A.pvalue.grad.numpy(),
m.model.dmodel.xi.pvalue.grad.numpy(),
m.model.dmodel.phi.pvalue.grad.numpy(),
]
self.times = torch.transpose(
torch.tensor(
np.array([np.linspace(0, 1, self.ntime) for i in range(self.nbatch)])
),
1,
0,
)
self.strains = torch.transpose(
torch.tensor(
np.array([np.linspace(0, 0.03, self.ntime) for i in range(self.nbatch)])
),
1,
0,
)
self.stresses = torch.transpose(
torch.tensor(
np.array([np.linspace(0, 200, self.ntime) for i in range(self.nbatch)])
),
1,
0,
)
self.temperatures = torch.zeros_like(self.strains)
class TestChabocheViscoplasticity(unittest.TestCase, CommonGradient):
def setUp(self):
self.ntime = 10
self.nbatch = 4
self.E = torch.tensor(100000.0)
self.n = torch.tensor(5.2)
self.eta = torch.tensor(110.0)
self.R = torch.tensor(100.0)
self.d = torch.tensor(5.1)
self.C = torch.tensor([1000.0, 750.0, 100.0])
self.g = torch.tensor([10.0, 1.2, 8.6])
self.s0 = torch.tensor(10.0)
self.p = [self.E, self.n, self.eta, self.s0, self.R, self.d, self.C, self.g]
self.model_fn = lambda p: models.ModelIntegrator(
models.InelasticModel(
CP(p[0]),
flowrules.IsoKinViscoplasticity(
CP(p[1]),
CP(p[2]),
CP(p[3]),
hardening.VoceIsotropicHardeningModel(CP(p[4]), CP(p[5])),
hardening.ChabocheHardeningModel(CP(p[6]), CP(p[7])),
),
),
use_adjoint=False,
)
self.extract_grad = lambda m: [
m.model.E.pvalue.grad.numpy(),
m.model.flowrule.n.pvalue.grad.numpy(),
m.model.flowrule.eta.pvalue.grad.numpy(),
m.model.flowrule.s0.pvalue.grad.numpy(),
m.model.flowrule.isotropic.R.pvalue.grad.numpy(),
m.model.flowrule.isotropic.d.pvalue.grad.numpy(),
m.model.flowrule.kinematic.C.pvalue.grad.numpy(),
m.model.flowrule.kinematic.g.pvalue.grad.numpy(),
]
self.times = torch.transpose(
torch.tensor(
np.array([np.linspace(0, 1, self.ntime) for i in range(self.nbatch)])
),
1,
0,
)
self.strains = torch.transpose(
torch.tensor(
np.array(
[np.linspace(0, 0.003, self.ntime) for i in range(self.nbatch)]
)
),
1,
0,
)
self.stresses = torch.transpose(
torch.tensor(
np.array(
[np.linspace(0, 200.0, self.ntime) for i in range(self.nbatch)]
)
),
1,
0,
)
self.temperatures = torch.zeros_like(self.strains)
| [
"numpy.copy",
"torch.autograd.set_detect_anomaly",
"numpy.abs",
"numpy.allclose",
"pyoptmat.hardening.NoKinematicHardeningModel",
"numpy.ndenumerate",
"torch.set_default_tensor_type",
"torch.tensor",
"numpy.zeros",
"numpy.linspace",
"torch.zeros_like",
"torch.autograd.Variable",
"pyoptmat.te... | [((233, 282), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['torch.DoubleTensor'], {}), '(torch.DoubleTensor)\n', (262, 282), False, 'import torch\n'), ((283, 322), 'torch.autograd.set_detect_anomaly', 'torch.autograd.set_detect_anomaly', (['(True)'], {}), '(True)\n', (316, 322), False, 'import torch\n'), ((420, 440), 'numpy.zeros', 'np.zeros', (['puse.shape'], {}), '(puse.shape)\n', (428, 440), True, 'import numpy as np\n'), ((462, 482), 'numpy.ndenumerate', 'np.ndenumerate', (['puse'], {}), '(puse)\n', (476, 482), True, 'import numpy as np\n'), ((573, 586), 'numpy.copy', 'np.copy', (['puse'], {}), '(puse)\n', (580, 586), True, 'import numpy as np\n'), ((2570, 2592), 'torch.tensor', 'torch.tensor', (['(100000.0)'], {}), '(100000.0)\n', (2582, 2592), False, 'import torch\n'), ((2610, 2627), 'torch.tensor', 'torch.tensor', (['(5.2)'], {}), '(5.2)\n', (2622, 2627), False, 'import torch\n'), ((2647, 2666), 'torch.tensor', 'torch.tensor', (['(110.0)'], {}), '(110.0)\n', (2659, 2666), False, 'import torch\n'), ((3874, 3904), 'torch.zeros_like', 'torch.zeros_like', (['self.strains'], {}), '(self.strains)\n', (3890, 3904), False, 'import torch\n'), ((4055, 4077), 'torch.tensor', 'torch.tensor', (['(100000.0)'], {}), '(100000.0)\n', (4067, 4077), False, 'import torch\n'), ((4095, 4112), 'torch.tensor', 'torch.tensor', (['(5.2)'], {}), '(5.2)\n', (4107, 4112), False, 'import torch\n'), ((4132, 4151), 'torch.tensor', 'torch.tensor', (['(110.0)'], {}), '(110.0)\n', (4144, 4151), False, 'import torch\n'), ((4169, 4188), 'torch.tensor', 'torch.tensor', (['(100.0)'], {}), '(100.0)\n', (4181, 4188), False, 'import torch\n'), ((4206, 4223), 'torch.tensor', 'torch.tensor', (['(5.1)'], {}), '(5.1)\n', (4218, 4223), False, 'import torch\n'), ((4242, 4260), 'torch.tensor', 'torch.tensor', (['(10.0)'], {}), '(10.0)\n', (4254, 4260), False, 'import torch\n'), ((5913, 5943), 'torch.zeros_like', 'torch.zeros_like', (['self.strains'], {}), '(self.strains)\n', (5929, 5943), False, 'import torch\n'), ((6105, 6127), 'torch.tensor', 'torch.tensor', (['(100000.0)'], {}), '(100000.0)\n', (6117, 6127), False, 'import torch\n'), ((6145, 6162), 'torch.tensor', 'torch.tensor', (['(5.2)'], {}), '(5.2)\n', (6157, 6162), False, 'import torch\n'), ((6182, 6201), 'torch.tensor', 'torch.tensor', (['(110.0)'], {}), '(110.0)\n', (6194, 6201), False, 'import torch\n'), ((6219, 6238), 'torch.tensor', 'torch.tensor', (['(100.0)'], {}), '(100.0)\n', (6231, 6238), False, 'import torch\n'), ((6256, 6273), 'torch.tensor', 'torch.tensor', (['(5.1)'], {}), '(5.1)\n', (6268, 6273), False, 'import torch\n'), ((6291, 6311), 'torch.tensor', 'torch.tensor', (['(1000.0)'], {}), '(1000.0)\n', (6303, 6311), False, 'import torch\n'), ((6329, 6347), 'torch.tensor', 'torch.tensor', (['(10.0)'], {}), '(10.0)\n', (6341, 6347), False, 'import torch\n'), ((6366, 6384), 'torch.tensor', 'torch.tensor', (['(10.0)'], {}), '(10.0)\n', (6378, 6384), False, 'import torch\n'), ((8195, 8225), 'torch.zeros_like', 'torch.zeros_like', (['self.strains'], {}), '(self.strains)\n', (8211, 8225), False, 'import torch\n'), ((8393, 8415), 'torch.tensor', 'torch.tensor', (['(100000.0)'], {}), '(100000.0)\n', (8405, 8415), False, 'import torch\n'), ((8433, 8450), 'torch.tensor', 'torch.tensor', (['(5.2)'], {}), '(5.2)\n', (8445, 8450), False, 'import torch\n'), ((8470, 8489), 'torch.tensor', 'torch.tensor', (['(110.0)'], {}), '(110.0)\n', (8482, 8489), False, 'import torch\n'), ((8507, 8526), 'torch.tensor', 'torch.tensor', (['(100.0)'], {}), '(100.0)\n', (8519, 8526), False, 'import torch\n'), ((8544, 8561), 'torch.tensor', 'torch.tensor', (['(5.1)'], {}), '(5.1)\n', (8556, 8561), False, 'import torch\n'), ((8579, 8599), 'torch.tensor', 'torch.tensor', (['(1000.0)'], {}), '(1000.0)\n', (8591, 8599), False, 'import torch\n'), ((8617, 8635), 'torch.tensor', 'torch.tensor', (['(10.0)'], {}), '(10.0)\n', (8629, 8635), False, 'import torch\n'), ((8654, 8672), 'torch.tensor', 'torch.tensor', (['(10.0)'], {}), '(10.0)\n', (8666, 8672), False, 'import torch\n'), ((8690, 8710), 'torch.tensor', 'torch.tensor', (['(2000.0)'], {}), '(2000.0)\n', (8702, 8710), False, 'import torch\n'), ((8729, 8746), 'torch.tensor', 'torch.tensor', (['(6.5)'], {}), '(6.5)\n', (8741, 8746), False, 'import torch\n'), ((8766, 8783), 'torch.tensor', 'torch.tensor', (['(1.7)'], {}), '(1.7)\n', (8778, 8783), False, 'import torch\n'), ((10915, 10945), 'torch.zeros_like', 'torch.zeros_like', (['self.strains'], {}), '(self.strains)\n', (10931, 10945), False, 'import torch\n'), ((11105, 11127), 'torch.tensor', 'torch.tensor', (['(100000.0)'], {}), '(100000.0)\n', (11117, 11127), False, 'import torch\n'), ((11145, 11162), 'torch.tensor', 'torch.tensor', (['(5.2)'], {}), '(5.2)\n', (11157, 11162), False, 'import torch\n'), ((11182, 11201), 'torch.tensor', 'torch.tensor', (['(110.0)'], {}), '(110.0)\n', (11194, 11201), False, 'import torch\n'), ((11219, 11238), 'torch.tensor', 'torch.tensor', (['(100.0)'], {}), '(100.0)\n', (11231, 11238), False, 'import torch\n'), ((11256, 11273), 'torch.tensor', 'torch.tensor', (['(5.1)'], {}), '(5.1)\n', (11268, 11273), False, 'import torch\n'), ((11291, 11327), 'torch.tensor', 'torch.tensor', (['[1000.0, 750.0, 100.0]'], {}), '([1000.0, 750.0, 100.0])\n', (11303, 11327), False, 'import torch\n'), ((11345, 11375), 'torch.tensor', 'torch.tensor', (['[10.0, 1.2, 8.6]'], {}), '([10.0, 1.2, 8.6])\n', (11357, 11375), False, 'import torch\n'), ((11394, 11412), 'torch.tensor', 'torch.tensor', (['(10.0)'], {}), '(10.0)\n', (11406, 11412), False, 'import torch\n'), ((13220, 13250), 'torch.zeros_like', 'torch.zeros_like', (['self.strains'], {}), '(self.strains)\n', (13236, 13250), False, 'import torch\n'), ((497, 508), 'numpy.abs', 'np.abs', (['val'], {}), '(val)\n', (503, 508), True, 'import numpy as np\n'), ((1037, 1069), 'torch.autograd.Variable', 'Variable', (['pi'], {'requires_grad': '(True)'}), '(pi, requires_grad=True)\n', (1045, 1069), False, 'from torch.autograd import Variable\n'), ((1618, 1650), 'numpy.allclose', 'np.allclose', (['p1', 'p2'], {'rtol': '(0.0001)'}), '(p1, p2, rtol=0.0001)\n', (1629, 1650), True, 'import numpy as np\n'), ((1719, 1751), 'torch.autograd.Variable', 'Variable', (['pi'], {'requires_grad': '(True)'}), '(pi, requires_grad=True)\n', (1727, 1751), False, 'from torch.autograd import Variable\n'), ((2368, 2412), 'numpy.allclose', 'np.allclose', (['p1', 'p2'], {'rtol': '(0.0001)', 'atol': '(1e-07)'}), '(p1, p2, rtol=0.0001, atol=1e-07)\n', (2379, 2412), True, 'import numpy as np\n'), ((629, 648), 'torch.tensor', 'torch.tensor', (['pcurr'], {}), '(pcurr)\n', (641, 648), False, 'import torch\n'), ((2822, 2830), 'pyoptmat.temperature.ConstantParameter', 'CP', (['p[0]'], {}), '(p[0])\n', (2824, 2830), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((4441, 4449), 'pyoptmat.temperature.ConstantParameter', 'CP', (['p[0]'], {}), '(p[0])\n', (4443, 4449), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((6581, 6589), 'pyoptmat.temperature.ConstantParameter', 'CP', (['p[0]'], {}), '(p[0])\n', (6583, 6589), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((9150, 9158), 'pyoptmat.temperature.ConstantParameter', 'CP', (['p[0]'], {}), '(p[0])\n', (9152, 9158), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((11609, 11617), 'pyoptmat.temperature.ConstantParameter', 'CP', (['p[0]'], {}), '(p[0])\n', (11611, 11617), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((2865, 2873), 'pyoptmat.temperature.ConstantParameter', 'CP', (['p[1]'], {}), '(p[1])\n', (2867, 2873), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((2875, 2883), 'pyoptmat.temperature.ConstantParameter', 'CP', (['p[2]'], {}), '(p[2])\n', (2877, 2883), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((3232, 3261), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'self.ntime'], {}), '(0, 1, self.ntime)\n', (3243, 3261), True, 'import numpy as np\n'), ((3460, 3493), 'numpy.linspace', 'np.linspace', (['(0)', '(0.003)', 'self.ntime'], {}), '(0, 0.003, self.ntime)\n', (3471, 3493), True, 'import numpy as np\n'), ((3710, 3743), 'numpy.linspace', 'np.linspace', (['(0)', '(100.0)', 'self.ntime'], {}), '(0, 100.0, self.ntime)\n', (3721, 3743), True, 'import numpy as np\n'), ((4520, 4528), 'pyoptmat.temperature.ConstantParameter', 'CP', (['p[1]'], {}), '(p[1])\n', (4522, 4528), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((4550, 4558), 'pyoptmat.temperature.ConstantParameter', 'CP', (['p[2]'], {}), '(p[2])\n', (4552, 4558), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((4580, 4588), 'pyoptmat.temperature.ConstantParameter', 'CP', (['p[3]'], {}), '(p[3])\n', (4582, 4588), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((4689, 4726), 'pyoptmat.hardening.NoKinematicHardeningModel', 'hardening.NoKinematicHardeningModel', ([], {}), '()\n', (4724, 4726), False, 'from pyoptmat import ode, models, flowrules, hardening, utility, damage\n'), ((5271, 5300), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'self.ntime'], {}), '(0, 1, self.ntime)\n', (5282, 5300), True, 'import numpy as np\n'), ((5499, 5532), 'numpy.linspace', 'np.linspace', (['(0)', '(0.003)', 'self.ntime'], {}), '(0, 0.003, self.ntime)\n', (5510, 5532), True, 'import numpy as np\n'), ((5749, 5782), 'numpy.linspace', 'np.linspace', (['(0)', '(200.0)', 'self.ntime'], {}), '(0, 200.0, self.ntime)\n', (5760, 5782), True, 'import numpy as np\n'), ((6660, 6668), 'pyoptmat.temperature.ConstantParameter', 'CP', (['p[1]'], {}), '(p[1])\n', (6662, 6668), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((6690, 6698), 'pyoptmat.temperature.ConstantParameter', 'CP', (['p[2]'], {}), '(p[2])\n', (6692, 6698), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((6720, 6728), 'pyoptmat.temperature.ConstantParameter', 'CP', (['p[3]'], {}), '(p[3])\n', (6722, 6728), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((7553, 7582), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'self.ntime'], {}), '(0, 1, self.ntime)\n', (7564, 7582), True, 'import numpy as np\n'), ((7781, 7814), 'numpy.linspace', 'np.linspace', (['(0)', '(0.003)', 'self.ntime'], {}), '(0, 0.003, self.ntime)\n', (7792, 7814), True, 'import numpy as np\n'), ((8031, 8064), 'numpy.linspace', 'np.linspace', (['(0)', '(200.0)', 'self.ntime'], {}), '(0, 200.0, self.ntime)\n', (8042, 8064), True, 'import numpy as np\n'), ((9229, 9237), 'pyoptmat.temperature.ConstantParameter', 'CP', (['p[1]'], {}), '(p[1])\n', (9231, 9237), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((9259, 9267), 'pyoptmat.temperature.ConstantParameter', 'CP', (['p[2]'], {}), '(p[2])\n', (9261, 9267), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((9289, 9297), 'pyoptmat.temperature.ConstantParameter', 'CP', (['p[3]'], {}), '(p[3])\n', (9291, 9297), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((10352, 10381), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'self.ntime'], {}), '(0, 1, self.ntime)\n', (10363, 10381), True, 'import numpy as np\n'), ((10559, 10591), 'numpy.linspace', 'np.linspace', (['(0)', '(0.03)', 'self.ntime'], {}), '(0, 0.03, self.ntime)\n', (10570, 10591), True, 'import numpy as np\n'), ((10770, 10801), 'numpy.linspace', 'np.linspace', (['(0)', '(200)', 'self.ntime'], {}), '(0, 200, self.ntime)\n', (10781, 10801), True, 'import numpy as np\n'), ((11688, 11696), 'pyoptmat.temperature.ConstantParameter', 'CP', (['p[1]'], {}), '(p[1])\n', (11690, 11696), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((11718, 11726), 'pyoptmat.temperature.ConstantParameter', 'CP', (['p[2]'], {}), '(p[2])\n', (11720, 11726), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((11748, 11756), 'pyoptmat.temperature.ConstantParameter', 'CP', (['p[3]'], {}), '(p[3])\n', (11750, 11756), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((12578, 12607), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'self.ntime'], {}), '(0, 1, self.ntime)\n', (12589, 12607), True, 'import numpy as np\n'), ((12806, 12839), 'numpy.linspace', 'np.linspace', (['(0)', '(0.003)', 'self.ntime'], {}), '(0, 0.003, self.ntime)\n', (12817, 12839), True, 'import numpy as np\n'), ((13056, 13089), 'numpy.linspace', 'np.linspace', (['(0)', '(200.0)', 'self.ntime'], {}), '(0, 200.0, self.ntime)\n', (13067, 13089), True, 'import numpy as np\n'), ((4648, 4656), 'pyoptmat.temperature.ConstantParameter', 'CP', (['p[4]'], {}), '(p[4])\n', (4650, 4656), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((4658, 4666), 'pyoptmat.temperature.ConstantParameter', 'CP', (['p[5]'], {}), '(p[5])\n', (4660, 4666), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((6788, 6796), 'pyoptmat.temperature.ConstantParameter', 'CP', (['p[4]'], {}), '(p[4])\n', (6790, 6796), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((6798, 6806), 'pyoptmat.temperature.ConstantParameter', 'CP', (['p[5]'], {}), '(p[5])\n', (6800, 6806), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((6865, 6873), 'pyoptmat.temperature.ConstantParameter', 'CP', (['p[6]'], {}), '(p[6])\n', (6867, 6873), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((6875, 6883), 'pyoptmat.temperature.ConstantParameter', 'CP', (['p[7]'], {}), '(p[7])\n', (6877, 6883), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((9357, 9365), 'pyoptmat.temperature.ConstantParameter', 'CP', (['p[4]'], {}), '(p[4])\n', (9359, 9365), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((9367, 9375), 'pyoptmat.temperature.ConstantParameter', 'CP', (['p[5]'], {}), '(p[5])\n', (9369, 9375), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((9434, 9442), 'pyoptmat.temperature.ConstantParameter', 'CP', (['p[6]'], {}), '(p[6])\n', (9436, 9442), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((9444, 9452), 'pyoptmat.temperature.ConstantParameter', 'CP', (['p[7]'], {}), '(p[7])\n', (9446, 9452), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((9519, 9527), 'pyoptmat.temperature.ConstantParameter', 'CP', (['p[8]'], {}), '(p[8])\n', (9521, 9527), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((9529, 9537), 'pyoptmat.temperature.ConstantParameter', 'CP', (['p[9]'], {}), '(p[9])\n', (9531, 9537), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((9539, 9548), 'pyoptmat.temperature.ConstantParameter', 'CP', (['p[10]'], {}), '(p[10])\n', (9541, 9548), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((11816, 11824), 'pyoptmat.temperature.ConstantParameter', 'CP', (['p[4]'], {}), '(p[4])\n', (11818, 11824), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((11826, 11834), 'pyoptmat.temperature.ConstantParameter', 'CP', (['p[5]'], {}), '(p[5])\n', (11828, 11834), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((11890, 11898), 'pyoptmat.temperature.ConstantParameter', 'CP', (['p[6]'], {}), '(p[6])\n', (11892, 11898), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((11900, 11908), 'pyoptmat.temperature.ConstantParameter', 'CP', (['p[7]'], {}), '(p[7])\n', (11902, 11908), True, 'from pyoptmat.temperature import ConstantParameter as CP\n')] |
import typing
import numpy as np
def find_divisors(
n: int,
) -> np.array:
i = np.arange(int(n ** .5))
i += 1
i = i[n % i == 0]
i = np.hstack((i, n // i))
return np.unique(i)
def gpf(
n: int = 1 << 20,
) -> np.array:
s = np.arange(n)
s[:2] = -1
i = 0
while i * i < n - 1:
i += 1
if s[i] == i: s[i::i] = i
return s
def lpf(
n: int = 1 << 20,
) -> np.array:
s = np.arange(n)
s[:2] = -1
i = 0
while i * i < n - 1:
i += 1
if s[i] != i: continue
j = np.arange(i, n, i)
s[j][s[j] == j] = i
return s
def sieve_of_eratosthenes(
n: int = 1 << 20,
) -> np.array:
return gpf(n) == np.arange(n)
def prime_numbers(
n: int = 1 << 20,
) -> np.array:
s = sieve_of_eratosthenes(n)
return np.flatnonzero(s)
def euler_totient(
n: int,
prime_numbers: np.array,
) -> int:
c = n
for p in prime_numbers:
if p * p > n: break
if n % p: continue
c = c // p * (p - 1)
while not n % p: n //= p
if n > 1:
c = c // n * (n - 1)
return c
def solve(
p: int,
) -> typing.NoReturn:
n = p - 1
divs = find_divisors(n)
pn = prime_numbers(1 << 20)
mod = 998244353
c = 1
for d in divs:
e = euler_totient(d, pn)
e %= mod
d %= mod
c += e * d % mod
c %= mod
print(c)
def main() -> typing.NoReturn:
p = int(input())
solve(p)
import sys
OJ = 'ONLINE_JUDGE'
if sys.argv[-1] == OJ:
from numba import njit, i8
find_divisors = njit(
find_divisors,
)
lpf = njit(lpf)
gpf = njit(gpf)
sieve_of_eratosthenes = njit(
sieve_of_eratosthenes,
)
prime_numbers = njit(
prime_numbers,
)
euler_totient = njit(
euler_totient,
)
fn = solve
signature = (i8, )
from numba.pycc import CC
cc = CC('my_module')
cc.export(
fn.__name__,
signature,
)(fn)
cc.compile()
exit(0)
from my_module import solve
main() | [
"numpy.unique",
"numpy.hstack",
"numpy.flatnonzero",
"numba.njit",
"numba.pycc.CC",
"my_module.solve",
"numpy.arange"
] | [((146, 168), 'numpy.hstack', 'np.hstack', (['(i, n // i)'], {}), '((i, n // i))\n', (155, 168), True, 'import numpy as np\n'), ((178, 190), 'numpy.unique', 'np.unique', (['i'], {}), '(i)\n', (187, 190), True, 'import numpy as np\n'), ((244, 256), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (253, 256), True, 'import numpy as np\n'), ((405, 417), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (414, 417), True, 'import numpy as np\n'), ((755, 772), 'numpy.flatnonzero', 'np.flatnonzero', (['s'], {}), '(s)\n', (769, 772), True, 'import numpy as np\n'), ((1338, 1346), 'my_module.solve', 'solve', (['p'], {}), '(p)\n', (1343, 1346), False, 'from my_module import solve\n'), ((1453, 1472), 'numba.njit', 'njit', (['find_divisors'], {}), '(find_divisors)\n', (1457, 1472), False, 'from numba import njit, i8\n'), ((1490, 1499), 'numba.njit', 'njit', (['lpf'], {}), '(lpf)\n', (1494, 1499), False, 'from numba import njit, i8\n'), ((1508, 1517), 'numba.njit', 'njit', (['gpf'], {}), '(gpf)\n', (1512, 1517), False, 'from numba import njit, i8\n'), ((1544, 1571), 'numba.njit', 'njit', (['sieve_of_eratosthenes'], {}), '(sieve_of_eratosthenes)\n', (1548, 1571), False, 'from numba import njit, i8\n'), ((1599, 1618), 'numba.njit', 'njit', (['prime_numbers'], {}), '(prime_numbers)\n', (1603, 1618), False, 'from numba import njit, i8\n'), ((1646, 1665), 'numba.njit', 'njit', (['euler_totient'], {}), '(euler_totient)\n', (1650, 1665), False, 'from numba import njit, i8\n'), ((1744, 1759), 'numba.pycc.CC', 'CC', (['"""my_module"""'], {}), "('my_module')\n", (1746, 1759), False, 'from numba.pycc import CC\n'), ((508, 526), 'numpy.arange', 'np.arange', (['i', 'n', 'i'], {}), '(i, n, i)\n', (517, 526), True, 'import numpy as np\n'), ((645, 657), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (654, 657), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import sys, os, re, math, copy
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Circle, PathPatch
# import the Python wrapper
from dubinswrapper import DubinsWrapper as dubins
################################################
# Scenarios
################################################
#define planning problems:
# map file
# minimum turning radius
# sensing radius
# solver type
scenarios = [
("./problems/gdip-n10.txt", 1, 1, 'DTSPN-GDIP'),
#("./problems/gdip-n10.txt", 0.5, 1, 'DTSPN-GDIP'),
#("./problems/gdip-n10.txt", 1, 0.5, 'DTSPN-GDIP'),
]
################################################
# Settings
################################################
visualize = True
show = True
# Save figures into a directory "images"
save_figures = False
if len(sys.argv) > 1:
if "-save" in sys.argv[1]:
visualize = True
save_figures = True
if save_figures:
os.makedirs("images", exist_ok=True)
##################################################
# Functions
##################################################
def load_map(filename):
"""Read config with goal positions"""
goals = []
with open(filename) as fp:
for line in fp:
label, x, y = line.split()
goals.append((float(x), float(y)))
return goals
def plot_points(points, specs = 'b'):
x_val = [x[0] for x in points]
y_val = [x[1] for x in points]
plt.plot(x_val, y_val, specs)
def plot_circle(xy, radius):
ax = plt.gca()
circle = Circle(xy, radius, facecolor='yellow',edgecolor="orange", linewidth=1, alpha=0.2)
ax.add_patch(circle)
def dist_euclidean(coord1, coord2):
(x1, y1) = coord1
(x2, y2) = coord2
(dx, dy) = (x2 - x1, y2 - y1)
return math.sqrt(dx * dx + dy * dy)
# cache results from computing length of the GDIP
lowerPathGDIPLenCache = {}
def lowerPathGDIP(s1, s2, turning_radius, cached = False):
"""Compute lower-bound path using GDIP between two configurations
Arguments:
s1 - start; s2 - end; turning_radius
cached - compute only distance and cache the results
Returns:
Dubins maneuver (DubinsWrapper)
"""
if cached:
key = (s1, s2)
if key in lowerPathGDIPLenCache:
length = lowerPathGDIPLenCache[key]
return (None, length)
interval1 = [s1.alpha1, s1.alpha2 - s1.alpha1]
interval2 = [s2.alpha1, s2.alpha2 - s2.alpha1]
path = dubins.shortest_path_GDIP(s1.center, interval1, s1.radius, s2.center, interval2, s2.radius, turning_radius)
length = path.get_length()
if cached:
lowerPathGDIPLenCache[key] = length
return (None, length)
else:
return (path, length)
def upperPathGDIP(s1, s2, turning_radius, cached = False):
"""Compute feasible Dubins path two configurations
Arguments:
s1 - start; s2 - end; turning_radius
Returns:
(Dubins maneuver 'DubinsWrapper', length)
"""
q1 = s1.getFeasibleState()
q2 = s2.getFeasibleState()
dubins_path = dubins.shortest_path(q1, q2, turning_radius)
return (dubins_path, dubins_path.get_length())
def compute_distances(samples, dst_fce, turning_radius = 0):
n = len(samples)
distances = []
for i in range(n):
ss1 = samples[i]
ss2 = samples[(i+1) % n]
n1 = len(ss1)
n2 = len(ss2)
sh = np.full((n1, n2), np.inf)
for i1 in range(n1):
for i2 in range(n2):
dist = dst_fce(ss1[i1], ss2[i2], turning_radius, cached=True)
sh[i1][i2] = dist[1]
distances.append(sh)
return distances
def find_shortest_tour(distances):
n = len(distances)
best_len = math.inf
best_tour = []
# maximal number of samples
k_max = max([len(x) for x in distances])
no_start = len(distances[0])
for start in range(no_start):
# shortest sh[region_idx][sample_idx]
# contains (prev, length)
sh = np.full((n+1, k_max), np.inf)
# used edge
prev = np.full((n+1, k_max), -1)
sh[0,start] = 0
for region_idx in range(n):
n1 = len(distances[region_idx])
n2 = len(distances[region_idx][0])
for idx2 in range(n2):
dst = sh[region_idx][:n1] + distances[region_idx][:,idx2]
sh[region_idx+1][idx2] = np.min(dst)
prev[region_idx+1][idx2]= np.argmin(dst)
act_sol = sh[-1][start]
if act_sol < best_len:
best_len = act_sol
tour = []
act = start
for i in range(n):
act = prev[n-i][act]
tour.append(act)
best_tour = list(reversed(tour))
return best_tour
def retrieve_path(samples, dst_fce, turning_radius, selected_samples):
n = len(samples)
path = []
for a in range(0,n):
g1 = samples[a][selected_samples[a]]
g2 = samples[(a+1) % n][selected_samples[(a+1) % n]]
path.append(dst_fce(g1, g2, turning_radius))
return path
def path_len(path):
return sum_list([dub.get_length() for dub in path])
def plot_path(path, turning_radius, settings):
step_size = 0.01 * turning_radius
for dub in path:
configurations, _ = dub.sample_many(step_size)
plot_points(configurations, settings)
##################################################
# Target region given by the location and its sensing radius
##################################################
class TargetRegion:
def __init__(self, center, radius):
self.center = center
self.radius = radius
def get_position_at_boundary(self, beta):
return self.center + self.radius * np.array([math.cos(beta), math.sin(beta)])
##################################################
# Sample on the given target region
##################################################
class Sample:
def __init__(self, targetRegion):
# reference to the specific target region of the sample
self.target = targetRegion
# heading angle interval
self.alpha1 = 0
self.alpha2 = 2 * math.pi
self.alphaResolution = 1
# position (on the boundary) interval
self.beta1 = 0
self.beta2 = 2 * math.pi
self.betaResolution = 1
# center and radius of the position neighborhood on the boundary of the target region
self.center = np.array(targetRegion.center)
self.radius = targetRegion.radius
def split(self, resolution):
"""Split the actual sample into two new ones.
The first is stored directly in the actual sample, and the second is returned.
If the required resolution is already met, then nothing is done and None is returned.
Parameters:
resolution: the requred resolution
Returns:
Sample - the second new sample
"""
# prefer splitting according position resolution
if self.betaResolution < resolution:
sam1 = copy.copy(self)
sam2 = copy.copy(self)
sam1.betaResolution = sam2.betaResolution = 2 * self.betaResolution
sam1.beta2 = sam2.beta1 = (self.beta1 + self.beta2) / 2
sam1.update_center_radius()
sam2.update_center_radius()
return [sam1, sam2]
if self.alphaResolution < resolution:
sam1 = copy.copy(self)
sam2 = copy.copy(self)
sam1.alphaResolution = sam2.alphaResolution = 2 * self.alphaResolution
sam1.alpha2 = sam2.alpha1 = (self.alpha1 + self.alpha2) / 2
return [sam1, sam2]
return None
def update_center_radius(self):
p1 = self.target.get_position_at_boundary(self.beta1)
p2 = self.target.get_position_at_boundary(self.beta2)
self.center = (p1 + p2) / 2
self.radius = dist_euclidean(p1, p2) / 2
def getFeasibleState(self):
pos = self.target.get_position_at_boundary(self.beta1)
q = np.zeros(3)
q[0:2] = pos
q[2] = self.alpha1
return q
def plot(self):
ax = plt.gca()
circle = Circle(self.center, self.radius, facecolor=None ,edgecolor="green", linewidth=1, alpha=0.2)
ax.add_patch(circle)
##################################################
# Sampling structure which holds all the used samples
##################################################
class Sampling:
def __init__(self, centers, sensingRadius):
self.targets = [TargetRegion(c, sensingRadius) for c in centers]
self.samples = [[Sample(t)] for t in self.targets]
def refine_samples(self, selected, resolution):
"""Refine the seleted samples if the required resolution is not met.
Parameters:
slected: indexes of the selected samples (vector 1 x n)
resolution: the requred resolution
Returns:
boolean - true if any sample is refined
"""
n = len(self.samples)
refined = False
for i in range(n):
to_split = selected[i]
samp = self.samples[i][to_split]
res = samp.split(resolution)
if not res is None:
self.samples[i][to_split] = res[0]
self.samples[i].append(res[1])
refined = True
return refined
##################################################
# The main solver class
##################################################
class GDIPSolver:
def __init__(self, turning_radius, goals, sensing_radius):
self.turning_radius = turning_radius
self.sensing_radius = sensing_radius
self.goals = goals
self.sampling = Sampling(goals, sensing_radius)
self.lower_path = []
self.upper_path = []
self.lower_bound = 0
self.upper_bound = math.inf
def plot_map(self):
plt.clf()
plt.axis('equal')
plot_points(self.goals, 'ro')
if self.sensing_radius != None:
for goal in self.goals:
plot_circle(goal, self.sensing_radius)
def plot_tour_and_return_length(self, selected_samples, maneuver_function, color):
sampling = self.sampling
n = len(self.sampling.samples)
step_size = 0.01 * self.turning_radius
length = 0
for a in range(0,n):
g1 = sampling.samples[a][selected_samples[a]]
g2 = sampling.samples[(a+1) % n][selected_samples[(a+1) % n]]
path = maneuver_function(g1, g2, self.turning_radius)
length += path[1]
configurations, _ = path[0].sample_many(step_size)
if visualize:
plot_points(configurations, color)
return length
def plot_actual_and_return_bounds(self):
"""Plot the actual sampling, lower and upper bound path
Returns:
(double, double) - lower bound, upper bound
"""
if visualize:
self.plot_map()
for s in self.sampling.samples:
for ss in s:
ss.plot()
lower_selected_samples = self.find_lower_bound_tour()
upper_selected_samples = self.find_upper_bound_tour()
lower_bound = self.plot_tour_and_return_length(lower_selected_samples, lowerPathGDIP, 'r-')
upper_bound = self.plot_tour_and_return_length(upper_selected_samples, upperPathGDIP, 'b-')
return (lower_bound, upper_bound)
def find_lower_bound_tour(self):
"""Select the samples which represent the shortest lower bound tour
Returns:
indexes of the samples (vector 1 x n)
"""
distances = compute_distances(self.sampling.samples, lowerPathGDIP, turning_radius = self.turning_radius)
selected_samples = find_shortest_tour(distances)
return selected_samples
def find_upper_bound_tour(self):
"""Select the samples which represent the shortest upper bound (feasible) tour
Returns:
indexes of the samples (vector 1 x n)
"""
distances = compute_distances(self.sampling.samples, upperPathGDIP, turning_radius = self.turning_radius)
selected_samples = find_shortest_tour(distances)
return selected_samples
##################################################
# Main loop over selected scenarios
##################################################
for scenario in scenarios:
# Load the problem and scenario settings
filename = scenario[0]
goals = load_map(filename)
turning_radius = scenario[1]
sensing_radius = scenario[2]
solver_type = scenario[3]
#tour planning part
solver = GDIPSolver(turning_radius, goals, sensing_radius)
solver.plot_actual_and_return_bounds()
print("\n--- Problem: {} Turning radius: {:6.2f} Sensing radius: {:6.2f} ---"
.format(filename, turning_radius, sensing_radius))
if show:
plt.pause(0.1)
max_resolution = 64
act_res = 4
while act_res <= max_resolution:
refined = True
while refined:
selected_samples = solver.find_lower_bound_tour()
refined = solver.sampling.refine_samples(selected_samples, act_res)
(lower_bound, upper_bound) = solver.plot_actual_and_return_bounds()
gap = (upper_bound - lower_bound) / upper_bound * 100.0
print("Res: {:4d} Lower: {:6.2f} Upper: {:6.2f} Gap(%): {:6.2f}"
.format(act_res, lower_bound, upper_bound, gap))
if visualize:
plt.title("Maximum resolution: {:4d}".format(act_res))
if show:
plt.pause(0.1)
if save_figures:
plt.savefig("images/dtrp-res-{:04d}.png".format(act_res))
act_res *= 2
if show:
plt.pause(2)
| [
"dubinswrapper.DubinsWrapper.shortest_path_GDIP",
"dubinswrapper.DubinsWrapper.shortest_path",
"os.makedirs",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.plot",
"math.sqrt",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.axis",
"math.cos",
"numpy.array",
"numpy.zeros",
"numpy.argmin",
"numpy.mi... | [((971, 1007), 'os.makedirs', 'os.makedirs', (['"""images"""'], {'exist_ok': '(True)'}), "('images', exist_ok=True)\n", (982, 1007), False, 'import sys, os, re, math, copy\n'), ((1477, 1506), 'matplotlib.pyplot.plot', 'plt.plot', (['x_val', 'y_val', 'specs'], {}), '(x_val, y_val, specs)\n', (1485, 1506), True, 'import matplotlib.pyplot as plt\n'), ((1549, 1558), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1556, 1558), True, 'import matplotlib.pyplot as plt\n'), ((1572, 1658), 'matplotlib.patches.Circle', 'Circle', (['xy', 'radius'], {'facecolor': '"""yellow"""', 'edgecolor': '"""orange"""', 'linewidth': '(1)', 'alpha': '(0.2)'}), "(xy, radius, facecolor='yellow', edgecolor='orange', linewidth=1,\n alpha=0.2)\n", (1578, 1658), False, 'from matplotlib.patches import Circle, PathPatch\n'), ((1805, 1833), 'math.sqrt', 'math.sqrt', (['(dx * dx + dy * dy)'], {}), '(dx * dx + dy * dy)\n', (1814, 1833), False, 'import sys, os, re, math, copy\n'), ((2510, 2621), 'dubinswrapper.DubinsWrapper.shortest_path_GDIP', 'dubins.shortest_path_GDIP', (['s1.center', 'interval1', 's1.radius', 's2.center', 'interval2', 's2.radius', 'turning_radius'], {}), '(s1.center, interval1, s1.radius, s2.center,\n interval2, s2.radius, turning_radius)\n', (2535, 2621), True, 'from dubinswrapper import DubinsWrapper as dubins\n'), ((3121, 3165), 'dubinswrapper.DubinsWrapper.shortest_path', 'dubins.shortest_path', (['q1', 'q2', 'turning_radius'], {}), '(q1, q2, turning_radius)\n', (3141, 3165), True, 'from dubinswrapper import DubinsWrapper as dubins\n'), ((3468, 3493), 'numpy.full', 'np.full', (['(n1, n2)', 'np.inf'], {}), '((n1, n2), np.inf)\n', (3475, 3493), True, 'import numpy as np\n'), ((4063, 4094), 'numpy.full', 'np.full', (['(n + 1, k_max)', 'np.inf'], {}), '((n + 1, k_max), np.inf)\n', (4070, 4094), True, 'import numpy as np\n'), ((4128, 4155), 'numpy.full', 'np.full', (['(n + 1, k_max)', '(-1)'], {}), '((n + 1, k_max), -1)\n', (4135, 4155), True, 'import numpy as np\n'), ((6509, 6538), 'numpy.array', 'np.array', (['targetRegion.center'], {}), '(targetRegion.center)\n', (6517, 6538), True, 'import numpy as np\n'), ((8099, 8110), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (8107, 8110), True, 'import numpy as np\n'), ((8210, 8219), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (8217, 8219), True, 'import matplotlib.pyplot as plt\n'), ((8237, 8332), 'matplotlib.patches.Circle', 'Circle', (['self.center', 'self.radius'], {'facecolor': 'None', 'edgecolor': '"""green"""', 'linewidth': '(1)', 'alpha': '(0.2)'}), "(self.center, self.radius, facecolor=None, edgecolor='green',\n linewidth=1, alpha=0.2)\n", (8243, 8332), False, 'from matplotlib.patches import Circle, PathPatch\n'), ((9988, 9997), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (9995, 9997), True, 'import matplotlib.pyplot as plt\n'), ((10006, 10023), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (10014, 10023), True, 'import matplotlib.pyplot as plt\n'), ((13027, 13041), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (13036, 13041), True, 'import matplotlib.pyplot as plt\n'), ((13866, 13878), 'matplotlib.pyplot.pause', 'plt.pause', (['(2)'], {}), '(2)\n', (13875, 13878), True, 'import matplotlib.pyplot as plt\n'), ((7111, 7126), 'copy.copy', 'copy.copy', (['self'], {}), '(self)\n', (7120, 7126), False, 'import sys, os, re, math, copy\n'), ((7146, 7161), 'copy.copy', 'copy.copy', (['self'], {}), '(self)\n', (7155, 7161), False, 'import sys, os, re, math, copy\n'), ((7487, 7502), 'copy.copy', 'copy.copy', (['self'], {}), '(self)\n', (7496, 7502), False, 'import sys, os, re, math, copy\n'), ((7522, 7537), 'copy.copy', 'copy.copy', (['self'], {}), '(self)\n', (7531, 7537), False, 'import sys, os, re, math, copy\n'), ((13704, 13718), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (13713, 13718), True, 'import matplotlib.pyplot as plt\n'), ((4457, 4468), 'numpy.min', 'np.min', (['dst'], {}), '(dst)\n', (4463, 4468), True, 'import numpy as np\n'), ((4511, 4525), 'numpy.argmin', 'np.argmin', (['dst'], {}), '(dst)\n', (4520, 4525), True, 'import numpy as np\n'), ((5810, 5824), 'math.cos', 'math.cos', (['beta'], {}), '(beta)\n', (5818, 5824), False, 'import sys, os, re, math, copy\n'), ((5826, 5840), 'math.sin', 'math.sin', (['beta'], {}), '(beta)\n', (5834, 5840), False, 'import sys, os, re, math, copy\n')] |
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib import gridspec
from kcsd import csd_profile as CSD
from kcsd import ValidateKCSD2D
from figure_properties import *
from kCSD_with_reliability_map_2D import make_reconstruction, matrix_symmetrization
def set_axis(ax, letter=None):
"""
Formats the plot's caption.
Parameters
----------
ax: Axes object.
x: float
X-position of caption.
y: float
Y-position of caption.
letter: string
Caption of the plot.
Default: None.
Returns
-------
ax: modyfied Axes object.
"""
ax.text(
-0.05,
1.05,
letter,
fontsize=20,
weight='bold',
transform=ax.transAxes)
return ax
def make_single_subplot(ax, val_type, xs, ys, values, cax, title=None,
ele_pos=None, xlabel=False, ylabel=False, letter='',
t_max=1., mask=False, level=False):
cmap = cm.Greys
ax.set_aspect('equal')
if t_max is None:
t_max = np.max(np.abs(values))
if level is not False:
levels = level
else:
levels = np.linspace(0, 0.2, 32)
im = ax.contourf(xs, ys, values,
levels=levels, cmap=cmap, alpha=1)
CS = ax.contour(xs, ys, values, cmap='Greys')
ax.clabel(CS, # label every second level
inline=1,
fmt='%1.2f',
colors='blue')
if val_type == 'err':
ax.scatter(ele_pos[:, 0], ele_pos[:, 1], s=20, marker='.', c='black',
zorder=3)
ax.set_xlim([0, 1])
ax.set_ylim([0, 1])
if xlabel:
ax.set_xlabel('X (mm)')
if ylabel:
ax.set_ylabel('Y (mm)')
if title is not None:
ax.set_title(title)
ax.set_xticks([0, 0.5, 1])
ax.set_yticks([0, 0.5, 1])
ticks = np.linspace(0, 0.2, 3, endpoint=True)
plt.colorbar(im, cax=cax, orientation='horizontal', format='%.2f',
ticks=ticks)
set_axis(ax, letter=letter)
plt.tight_layout()
return ax, cax
def generate_reliability_map(point_error, ele_pos, title):
csd_at = np.mgrid[0.:1.:100j,
0.:1.:100j]
csd_x, csd_y = csd_at
plt.figure(figsize=(17, 6))
gs = gridspec.GridSpec(2, 1, height_ratios=[1., 0.04], left=0.415,
right=0.585, top=0.880, bottom=0.110)
ax = plt.subplot(gs[0, 0])
cax = plt.subplot(gs[1, 0])
make_single_subplot(ax, 'err', csd_x, csd_y, point_error, cax=cax,
ele_pos=ele_pos, title=None, xlabel=True, ylabel=True,
letter=' ', t_max=0.2, level=np.linspace(0, 0.2, 16))
plt.savefig(title + '.png', dpi=300)
plt.show()
if __name__ == '__main__':
CSD_PROFILE = CSD.gauss_2d_large
CSD_SEED = 16
ELE_LIMS = [0.05, 0.95] # range of electrodes space
method = 'cross-validation'
Rs = np.arange(0.2, 0.5, 0.1)
lambdas = np.zeros(1)
noise = 0
KK = ValidateKCSD2D(CSD_SEED, h=50., sigma=1., n_src_init=400,
est_xres=0.01, est_yres=0.01, ele_lims=ELE_LIMS)
k, csd_at, true_csd, ele_pos, pots = make_reconstruction(KK, CSD_PROFILE,
CSD_SEED,
total_ele=100,
noise=noise,
Rs=Rs,
lambdas=lambdas,
method=method)
error_l = np.load('error_maps_2D/point_error_large_100_all_ele.npy')
error_s = np.load('error_maps_2D/point_error_small_100_all_ele.npy')
error_all = np.concatenate((error_l, error_s))
symm_array_large = matrix_symmetrization(error_l)
symm_array_small = matrix_symmetrization(error_s)
symm_array_all = matrix_symmetrization(error_all)
generate_reliability_map(np.mean(symm_array_all, axis=0), ele_pos,
'Reliability_map_random_newRDM_symm')
generate_reliability_map(np.mean(symm_array_large, axis=0), ele_pos,
'Reliability_map_large_newRDM_symm')
generate_reliability_map(np.mean(symm_array_small, axis=0), ele_pos,
'Reliability_map_small_newRDM_symm')
| [
"numpy.mean",
"numpy.abs",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.colorbar",
"kCSD_with_reliability_map_2D.make_reconstruction",
"kCSD_with_reliability_map_2D.matrix_symmetrization",
"numpy.linspace",
"matplotlib.pyplot.figure",
"matplotlib.gridspec.GridSpec",
"numpy.zeros",
"matplotlib... | [((2030, 2067), 'numpy.linspace', 'np.linspace', (['(0)', '(0.2)', '(3)'], {'endpoint': '(True)'}), '(0, 0.2, 3, endpoint=True)\n', (2041, 2067), True, 'import numpy as np\n'), ((2072, 2151), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im'], {'cax': 'cax', 'orientation': '"""horizontal"""', 'format': '"""%.2f"""', 'ticks': 'ticks'}), "(im, cax=cax, orientation='horizontal', format='%.2f', ticks=ticks)\n", (2084, 2151), True, 'import matplotlib.pyplot as plt\n'), ((2205, 2223), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2221, 2223), True, 'import matplotlib.pyplot as plt\n'), ((2402, 2429), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(17, 6)'}), '(figsize=(17, 6))\n', (2412, 2429), True, 'import matplotlib.pyplot as plt\n'), ((2439, 2541), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(2)', '(1)'], {'height_ratios': '[1.0, 0.04]', 'left': '(0.415)', 'right': '(0.585)', 'top': '(0.88)', 'bottom': '(0.11)'}), '(2, 1, height_ratios=[1.0, 0.04], left=0.415, right=0.585,\n top=0.88, bottom=0.11)\n', (2456, 2541), False, 'from matplotlib import gridspec\n'), ((2575, 2596), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0, 0]'], {}), '(gs[0, 0])\n', (2586, 2596), True, 'import matplotlib.pyplot as plt\n'), ((2607, 2628), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1, 0]'], {}), '(gs[1, 0])\n', (2618, 2628), True, 'import matplotlib.pyplot as plt\n'), ((2861, 2897), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(title + '.png')"], {'dpi': '(300)'}), "(title + '.png', dpi=300)\n", (2872, 2897), True, 'import matplotlib.pyplot as plt\n'), ((2902, 2912), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2910, 2912), True, 'import matplotlib.pyplot as plt\n'), ((3095, 3119), 'numpy.arange', 'np.arange', (['(0.2)', '(0.5)', '(0.1)'], {}), '(0.2, 0.5, 0.1)\n', (3104, 3119), True, 'import numpy as np\n'), ((3134, 3145), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (3142, 3145), True, 'import numpy as np\n'), ((3170, 3282), 'kcsd.ValidateKCSD2D', 'ValidateKCSD2D', (['CSD_SEED'], {'h': '(50.0)', 'sigma': '(1.0)', 'n_src_init': '(400)', 'est_xres': '(0.01)', 'est_yres': '(0.01)', 'ele_lims': 'ELE_LIMS'}), '(CSD_SEED, h=50.0, sigma=1.0, n_src_init=400, est_xres=0.01,\n est_yres=0.01, ele_lims=ELE_LIMS)\n', (3184, 3282), False, 'from kcsd import ValidateKCSD2D\n'), ((3342, 3459), 'kCSD_with_reliability_map_2D.make_reconstruction', 'make_reconstruction', (['KK', 'CSD_PROFILE', 'CSD_SEED'], {'total_ele': '(100)', 'noise': 'noise', 'Rs': 'Rs', 'lambdas': 'lambdas', 'method': 'method'}), '(KK, CSD_PROFILE, CSD_SEED, total_ele=100, noise=noise,\n Rs=Rs, lambdas=lambdas, method=method)\n', (3361, 3459), False, 'from kCSD_with_reliability_map_2D import make_reconstruction, matrix_symmetrization\n'), ((3837, 3895), 'numpy.load', 'np.load', (['"""error_maps_2D/point_error_large_100_all_ele.npy"""'], {}), "('error_maps_2D/point_error_large_100_all_ele.npy')\n", (3844, 3895), True, 'import numpy as np\n'), ((3910, 3968), 'numpy.load', 'np.load', (['"""error_maps_2D/point_error_small_100_all_ele.npy"""'], {}), "('error_maps_2D/point_error_small_100_all_ele.npy')\n", (3917, 3968), True, 'import numpy as np\n'), ((3985, 4019), 'numpy.concatenate', 'np.concatenate', (['(error_l, error_s)'], {}), '((error_l, error_s))\n', (3999, 4019), True, 'import numpy as np\n'), ((4043, 4073), 'kCSD_with_reliability_map_2D.matrix_symmetrization', 'matrix_symmetrization', (['error_l'], {}), '(error_l)\n', (4064, 4073), False, 'from kCSD_with_reliability_map_2D import make_reconstruction, matrix_symmetrization\n'), ((4097, 4127), 'kCSD_with_reliability_map_2D.matrix_symmetrization', 'matrix_symmetrization', (['error_s'], {}), '(error_s)\n', (4118, 4127), False, 'from kCSD_with_reliability_map_2D import make_reconstruction, matrix_symmetrization\n'), ((4149, 4181), 'kCSD_with_reliability_map_2D.matrix_symmetrization', 'matrix_symmetrization', (['error_all'], {}), '(error_all)\n', (4170, 4181), False, 'from kCSD_with_reliability_map_2D import make_reconstruction, matrix_symmetrization\n'), ((1334, 1357), 'numpy.linspace', 'np.linspace', (['(0)', '(0.2)', '(32)'], {}), '(0, 0.2, 32)\n', (1345, 1357), True, 'import numpy as np\n'), ((4212, 4243), 'numpy.mean', 'np.mean', (['symm_array_all'], {'axis': '(0)'}), '(symm_array_all, axis=0)\n', (4219, 4243), True, 'import numpy as np\n'), ((4350, 4383), 'numpy.mean', 'np.mean', (['symm_array_large'], {'axis': '(0)'}), '(symm_array_large, axis=0)\n', (4357, 4383), True, 'import numpy as np\n'), ((4489, 4522), 'numpy.mean', 'np.mean', (['symm_array_small'], {'axis': '(0)'}), '(symm_array_small, axis=0)\n', (4496, 4522), True, 'import numpy as np\n'), ((1241, 1255), 'numpy.abs', 'np.abs', (['values'], {}), '(values)\n', (1247, 1255), True, 'import numpy as np\n'), ((2832, 2855), 'numpy.linspace', 'np.linspace', (['(0)', '(0.2)', '(16)'], {}), '(0, 0.2, 16)\n', (2843, 2855), True, 'import numpy as np\n')] |
import requests
from typing import List
from bs4 import BeautifulSoup
import http.cookiejar as cookielib
import numpy as np
import re
crawl_header = {"user-agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11"}
# 根据工资获取中间段
def get_midfix(salary = None) -> str:
if salary is None: midfix = '0000,000000,0000,00,9,99,%2B,2,'
else:
midfix = f'0000,000000,0000,00,9,{int(salary[0])}-{int(salary[1])},%2B,2,'
print(f'工资范围:{int(salary[0])} - {int(salary[1])} (月薪/元)')
return midfix
# 根据关键词获取后缀
# 经验;学历;公司类型;工作类型;公司规模
def get_suffix(config: dict) -> str:
wy, wyn = code_workyear(config['workyear'])
if wy != '99': print('工作经验:' + wyn)
df, dfn = code_degreefrom(config['degreefrom'])
if df != '99': print('学历要求:' + dfn)
ct, ctn = code_cotype(config['cotype'])
if ct != '99': print('公司类型:' + ctn)
jt, jtn = code_jobterm(config['jobterm'])
if jt != '99': print('工作类型:' + jtn)
cs, csn = code_cosize(config['cosize'])
if cs != '99': print('公司规模:' + csn)
suffix = f'.html?lang=c&postchannel=0000&workyear={wy}&cotype={ct}°reefrom={df}&jobterm={jt}&companysize={cs}&ord_field=0&dibiaoid=0&line=&welfare='
return suffix
# 将关键字转化为搜索代码
def code_content(tags: List[str], dic):
if tags is None or '所有' in tags or len(tags) == 0: # 不加搜索限制
return '99', '所有'
try:
codes = [dic[t] for t in tags]
except:
return '99'
return '%252C'.join(codes), ';'.join(tags)
def code_workyear(workyear: List[str]):
dic = {'在校生/应届生': '01', '1-3年': '02', '3-5年': '03', '5-10年': '04',
'10年以上': '05', '无需经验': '06'}
return code_content(workyear, dic)
def code_cotype(cotype: List[str]):
dic = {'国企': '04', '外资(欧美)': '01', '外资(非欧美)': '02', '上司公司': '10',
'合资': '03', '民营公司': '05', '外企代表处': '06', '政府机关': '07',
'事业单位': '08', '非营利组织': '09', '创业公司': '11'}
return code_content(cotype, dic)
def code_degreefrom(degreefrom: List[str]):
dic = {'初中及以下': '01', '高中/中技/中专': '02', '大专': '03', '本科': '04',
'硕士': '05', '博士': '06', '无学历要求': '07'}
return code_content(degreefrom, dic)
def code_jobterm(jobterm: List[str]):
dic = {'全职': '01', '兼职': '02', '实习全职': '03', '实习兼职': '04'}
if jobterm is None: return '99', '所有'
else:
try:
return dic[jobterm[0]], jobterm[0]
except:
return '99', '所有'
def code_cosize(cosize: List[str]):
dic = {'少于50人': '01', '50-150人': '02', '150-500人': '03', '500-1000人': '04',
'1000-5000人': '05', '5000-10000人': '06', '10000人以上': '07'}
return code_content(cosize, dic)
# 输入搜索结果界面链接,输出当页搜索结果链接
def parse_search_result(url):
web = requests.get(url, crawl_header)
soup = BeautifulSoup(web.content,'lxml')
body = soup.body
result = body.find_all('script',type='text/javascript')[-1]
dic_result = eval(result.text.replace('window.__SEARCH_RESULT__ = ','').strip())
result_list = dic_result['engine_search_result']
print(f'This page has {len(result_list)} results')
return result_list
# 输入全部搜索结果界面链接,输出全部搜索结果链接
def concat_all_result(url_list):
all_result = []
for url in url_list:
result = parse_search_result(url)
all_result += result
print(f'There are {len(all_result)} results in total')
return all_result
# 解析搜索结果页面
def parse_web(body):
page = body.find_all('div',class_='tCompanyPage')[0]
center = page.find_all('div',class_='tCompany_center')[0]
sider = page.find_all('div', class_='tCompany_sidebar')[0]
title, salary, cname, info, tags = parse_header(center)
com_tags = parse_com_tags(sider)
jd, special, contact, company = parse_content(center)
return title, salary, cname, info, tags, com_tags, jd, special, contact, company
# 解析搜索结果头部
def parse_header(center):
header = center.find_all('div',class_='tHeader')[0].find_all('div',class_='in')[0].find_all('div',class_='cn')[0]
try:
title = header.find_all('h1')[0].text.strip()
except:
title = ''
try:
salary = header.find_all('strong')[0].string
except:
salary = ''
try:
cname = header.find_all('p',class_='cname')[0].find_all('a',class_='catn')[0].text.strip()
except:
cname = ''
try:
info = header.find_all('p',class_='msg')[0].text.split('\xa0\xa0|\xa0\xa0')
except:
info = []
try:
jtag = header.find_all('div',class_='jtag')[0].find_all('div',class_='t1')[0]
tags = [t.text for t in jtag.find_all('span')]
except:
tags = []
while len(info) < 5:
info.append('')
return title, salary, cname, info, tags
# 解析JD
def parse_job_info(div):
main_msg = div.find_all('div',class_='bmsg job_msg inbox')[0]
msg_list = main_msg.find_all('p',class_=None)
msg = []
for m in msg_list:
if len(m.text.strip()) > 0: msg.append(m.text.strip())
sp = main_msg.find_all('div',class_='mt10')[0].find_all('p',class_='fp') # 部分岗位有sp信息
special = []
try:
for s in sp:
title = s.find_all('span')[0].text
content = [t.text for t in s.find_all('a')]
special.append({'title':title, 'content':' '.join(content)})
except:
pass
return '\n'.join(msg), special
def parse_contact_info(div):
msg = div.find_all('div',class_='bmsg')[0].find_all('p',class_='fp')
contact = []
contact = [m.text.strip() for m in msg]
return contact
def parse_company_info(div):
return div.find_all('div',class_='tmsg')[0].text.strip()
def parse_content(center):
main_text = center.find_all('div',class_='tCompany_main')[0]
div_list = main_text.find_all('div',class_='tBorderTop_box')
msg, special, contact, company = [], [], [], ''
for div in div_list:
name = div.find_all('h2')[0].find_all('span',class_='bname')[0].text.strip()
if name == '职位信息': msg, special = parse_job_info(div)
elif name == '联系方式': contact = parse_contact_info(div)
elif name == '公司信息': company = parse_company_info(div)
return msg, special, contact, company
def parse_com_tags(sider):
box = sider.find_all('div', class_='tBorderTop_box')[0]
tags = box.find_all('div', class_='com_tag')[0]
com = []
# 公司类型;公司规模;行业
for p in tags.find_all('p'):
try:
com.append(p['title'])
except:
com.append('')
while len(com) < 3:
com.append('')
return com
def parse_jd_skill(jd, skills):
# skills: [[s11,s12,s13],[s21,s21],...]
# jd: string
if jd is None: return [0 for _ in range(len(skills))], [0,0,0,0,0,0]
else: jd = jd.lower()
job_skill = [0 for _ in range(len(skills))]
for i in range(len(skills)):
for s in skills[i]:
pattern = ''
for char in s:
if char in ['+', '-', '#']: pattern += f'\{char}'
else: pattern += char
if s == 'c': j = ' '+jd.replace('c++','').replace('c#','')+' '
else: j = ' '+jd+' '
num = len(re.findall(pattern,j)) + len(re.findall(r'^a-Z'+pattern+'^a-Z',j))\
- len(re.findall(pattern+r'^a-Z',j)) - len(re.findall(r'^a-Z'+pattern,j))
job_skill[i] += num
if '奖金' in jd or '年终奖' in jd: dummys = [1,0,0,0,0,0]
else: dummys = [0,0,0,0,0,0]
if np.sum(job_skill[:72],axis=0) > 0: dummys[1] = 1
if np.sum(job_skill[72:87],axis=0) > 0: dummys[2] = 1
if np.sum(job_skill[87:100],axis=0) > 0: dummys[3] = 1
if np.sum(job_skill[100:],axis=0) > 0: dummys[4] = 1
if np.sum(job_skill[:],axis=0) > 0: dummys[5] = 1
return job_skill, dummys
# 解析工资 k/年
def parse_salary(salary):
if salary is None: return None, None
if '以下' in salary:
res = salary.replace('以下','')
s1,s2 = res.split('/')
if '元' in s1: maxi = float(s1.replace('元',''))/1000
elif '千' in s1: maxi = float(s1.replace('千',''))
elif '万' in s1: maxi = float(s1.replace('万',''))*10
else: print(salary)
if s2 == '月': maxi *= 12
elif s2 == '天': maxi *= 365
elif s2 == '年': maxi = maxi
elif s2 =='小时': maxi *= 40*52
else: print(salary)
return None, maxi
elif '以上' in salary:
res = salary.replace('以上','')
s1,s2 = res.split('/')
if '元' in s1: mini = float(s1.replace('元',''))/1000
elif '千' in s1: mini = float(s1.replace('千',''))
elif '万' in s1: mini = float(s1.replace('万',''))*10
else: print(salary)
if s2 == '月': mini *= 12
elif s2 == '天': mini *= 365
elif s2 == '年': mini = mini
elif s2 == '小时': mini *= 40*52
else: print(salary)
return mini, None
else:
if '/' not in salary: return None, None
else:
s1,s2 = salary.split('/')
s = s1[:-1]
unit = s1[-1]
if unit == '元': par = 1/1000
elif unit == '千': par = 1
elif unit == '万': par = 10
else: print(salary)
if s2 == '天' or s2 == '日': par *= 365
elif s2 == '月': par *= 12
elif s2 == '年': par *= 1
elif s2 == '小时': par *= 40*52
else: print(salary)
if '-' not in s: return float(s)*par, float(s)*par
else:
mini, maxi = s.split('-')
mini = float(mini.strip())
maxi = float(maxi.strip())
return mini*par, maxi*par | [
"bs4.BeautifulSoup",
"numpy.sum",
"re.findall",
"requests.get"
] | [((2753, 2784), 'requests.get', 'requests.get', (['url', 'crawl_header'], {}), '(url, crawl_header)\n', (2765, 2784), False, 'import requests\n'), ((2796, 2830), 'bs4.BeautifulSoup', 'BeautifulSoup', (['web.content', '"""lxml"""'], {}), "(web.content, 'lxml')\n", (2809, 2830), False, 'from bs4 import BeautifulSoup\n'), ((7409, 7439), 'numpy.sum', 'np.sum', (['job_skill[:72]'], {'axis': '(0)'}), '(job_skill[:72], axis=0)\n', (7415, 7439), True, 'import numpy as np\n'), ((7465, 7497), 'numpy.sum', 'np.sum', (['job_skill[72:87]'], {'axis': '(0)'}), '(job_skill[72:87], axis=0)\n', (7471, 7497), True, 'import numpy as np\n'), ((7523, 7556), 'numpy.sum', 'np.sum', (['job_skill[87:100]'], {'axis': '(0)'}), '(job_skill[87:100], axis=0)\n', (7529, 7556), True, 'import numpy as np\n'), ((7582, 7613), 'numpy.sum', 'np.sum', (['job_skill[100:]'], {'axis': '(0)'}), '(job_skill[100:], axis=0)\n', (7588, 7613), True, 'import numpy as np\n'), ((7639, 7667), 'numpy.sum', 'np.sum', (['job_skill[:]'], {'axis': '(0)'}), '(job_skill[:], axis=0)\n', (7645, 7667), True, 'import numpy as np\n'), ((7247, 7278), 're.findall', 're.findall', (["('^a-Z' + pattern)", 'j'], {}), "('^a-Z' + pattern, j)\n", (7257, 7278), False, 'import re\n'), ((7210, 7241), 're.findall', 're.findall', (["(pattern + '^a-Z')", 'j'], {}), "(pattern + '^a-Z', j)\n", (7220, 7241), False, 'import re\n'), ((7116, 7138), 're.findall', 're.findall', (['pattern', 'j'], {}), '(pattern, j)\n', (7126, 7138), False, 'import re\n'), ((7145, 7185), 're.findall', 're.findall', (["('^a-Z' + pattern + '^a-Z')", 'j'], {}), "('^a-Z' + pattern + '^a-Z', j)\n", (7155, 7185), False, 'import re\n')] |
import numpy as np
from pymoo.model.survival import Survival
from pymoo.util.misc import calc_constraint_violation
class FitnessSurvival(Survival):
"""
This survival method is just for single-objective algorithm.
Simply sort by first constraint violation and then fitness value and truncate the worst individuals.
"""
def _do(self, pop, size, data):
if pop.F.shape[1] != 1:
raise ValueError("FitnessSurvival can only used for single objective problems!")
if pop.G is None or len(pop.G) == 0:
CV = np.zeros(pop.F.shape[0])
else:
CV = calc_constraint_violation(pop.G)
CV[CV < 0] = 0.0
# sort by cv and fitness
sorted_idx = sorted(range(pop.size()), key=lambda x: (CV[x], pop.F[x]))
# now truncate the population
sorted_idx = sorted_idx[:size]
pop.filter(sorted_idx)
return pop
| [
"pymoo.util.misc.calc_constraint_violation",
"numpy.zeros"
] | [((565, 589), 'numpy.zeros', 'np.zeros', (['pop.F.shape[0]'], {}), '(pop.F.shape[0])\n', (573, 589), True, 'import numpy as np\n'), ((621, 653), 'pymoo.util.misc.calc_constraint_violation', 'calc_constraint_violation', (['pop.G'], {}), '(pop.G)\n', (646, 653), False, 'from pymoo.util.misc import calc_constraint_violation\n')] |
"""
Author: <NAME>
Last change: 7pm 8/12/2020
linkedin: https://www.linkedin.com/in/abraham-lemus-ruiz/
"""
import requests
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score,mean_squared_error, r2_score
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import Ridge
from sklearn.impute import KNNImputer
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import OneHotEncoder
def model_metrics(y_test,y_pred): #Calculate some metrics for the model
mse = mean_squared_error(y_test,y_pred)
print("Mean squared error: %.4f"
% mse)
#r2 = r2_score(y_test, y_pred)
#print('R2 score: %.4f' % r2 )
rmse = mse/2
print('RMSE score: %.4f' % rmse )
#Since this is a regression problem, we'll use an LR predictor inside a function for easy testing the changes
#It just tests the basic LinearRegression model on demand
def test_lineal_regression(features, target, model = LinearRegression()):
#Divide the data set for testing
X_train, X_test, y_train, y_test = train_test_split(features, target, test_size=0.25, random_state = 133)
#Create and fit basic LR model
model = model.fit(X_train, y_train)
scores=cross_val_score(model,X_train,y_train,cv=5,n_jobs=-1)
#Evaluate model
y_pred = model.predict(X_test)
#print(str(len(model.coef_))+" features: "+ str(model.coef_))
print("Cross Val R2 Score: "+ str(scores.mean().round(4)))
model_metrics(y_test, y_pred)
#Reading the data
print("Reading the data")
df_training_dataset = pd.read_csv(r'train_dataset_digitalhouse.csv')
## Defining pipeline from notebook
numeric_features = ["EDAD", "EXPERIENCIA", "AVG_DH", "MINUTES_DH"]
categorical_features = ["GENERO", "NV_ESTUDIO", "ESTUDIO_PREV"]
numeric_for_knnimputer1 = [ "EDAD", "EXPERIENCIA"]
numeric_for_knnimputer2 = [ "AVG_DH", "MINUTES_DH"]
categorical_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='constant', fill_value='missing')),
('onehot', OneHotEncoder(handle_unknown='ignore'))
])
numeric_transformer1 = Pipeline(steps=[
('knn',KNNImputer()),
('scaler', StandardScaler()),
('power-transform', PolynomialFeatures(degree = 3))
])
numeric_transformer2 = Pipeline(steps=[
('knn2',KNNImputer()),
('scaler2', StandardScaler()),
('power-transform2', PolynomialFeatures(degree = 3))
])
preprocessor = ColumnTransformer(
[
('ed_exp', numeric_transformer1, numeric_for_knnimputer1),
('min_avg', numeric_transformer2, numeric_for_knnimputer2),
('cat', categorical_transformer, categorical_features)
],
remainder="drop")
#model = Pipeline(steps=[('preprocessor', preprocessor), ('regressor', Ridge(**Ridge_GS.best_params_))])
#Using the pipeline
print("Transforming the data with the pipeline")
X = df_training_dataset.drop(columns = ["DIAS_EMP"])
y = df_training_dataset["DIAS_EMP"]
X = preprocessor.fit_transform(X, y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state = 133)
best_params = {'alpha': 1, 'fit_intercept': True, 'solver': 'sparse_cg', 'tol': 0.0001}
print("Training scikit Ridge model with best params")
model = Ridge(**best_params).fit(X_train, y_train)
print(model)
y_pred = model.predict(X_test)
model_metrics(y_test, y_pred)
print("Training LRByHand model with best params")
from LRByHand import LinearRegressionByHand
r2_scores, mse_scores = np.array([]), np.array([])
model_by_hand = LinearRegressionByHand(learning_rate = 0.001, iterations = 100000)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state = 0)
losses = model_by_hand.fit(X_train, y_train)
y_pred = model.predict(X_test)
r2, mse = r2_score(y_test, y_pred), mean_squared_error(y_test, y_pred)
print('r2 : {}'.format( r2 ))
print('mse: {}'.format( mse ))
#print(losses)
#Testing deployed model
id = 1234 #selecting student
student_to_predict = df_training_dataset.iloc[ id ].to_numpy()[:-1].tolist() #drop the target column
for i in range(len(student_to_predict)):
if type(student_to_predict[i]) == np.float64 or type(student_to_predict[i]) == np.int64 :
student_to_predict[i] = student_to_predict[i].item()
print("student to predict: ")
print(student_to_predict)
# NOTE: you must manually set API_KEY below using information retrieved from your IBM Cloud account.
#API_KEY = <KEY>"
API_KEY = "<KEY>"
token_response = requests.post('https://iam.ng.bluemix.net/identity/token', data={"apikey": API_KEY, "grant_type": 'urn:ibm:params:oauth:grant-type:apikey'})
mltoken = token_response.json()["access_token"]
header = {'Content-Type': 'application/json', 'Authorization': 'Bearer ' + mltoken}
fields = ['Unnamed: 0', 'EDAD', 'GENERO', 'RESIDENCIA', 'NV_ESTUDIO', 'ESTUDIO_PREV', 'TRACK_DH', 'AVG_DH', 'MINUTES_DH', 'EXPERIENCIA']
# NOTE: manually define and pass the array(s) of values to be scored in the next line
payload_scoring = {"input_data": [{"fields": fields, "values": [ student_to_predict ]}]}
response_scoring = requests.post('https://us-south.ml.cloud.ibm.com/ml/v4/deployments/14dcd737-84a8-4af9-a61f-68e079b29c4f/predictions?version=2020-11-23', json=payload_scoring, headers={'Authorization': 'Bearer ' + mltoken})
print("-------------------")
print("Scoring response from deployed model")
print(response_scoring.json()["predictions"][0]["values"][0][0])
print("Scoring response from manual model")
print(model_by_hand.predict(X[ id ]))
print("Expected response: ")
print(df_training_dataset.iloc[ id ]["DIAS_EMP"])
| [
"requests.post",
"sklearn.preprocessing.PolynomialFeatures",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.OneHotEncoder",
"sklearn.linear_model.Ridge",
"sklearn.impute.KNNImputer",
"sklearn.metrics.mean_squared_error",
"sklearn.preprocessing.StandardScaler",
... | [((1859, 1904), 'pandas.read_csv', 'pd.read_csv', (['"""train_dataset_digitalhouse.csv"""'], {}), "('train_dataset_digitalhouse.csv')\n", (1870, 1904), True, 'import pandas as pd\n'), ((2693, 2914), 'sklearn.compose.ColumnTransformer', 'ColumnTransformer', (["[('ed_exp', numeric_transformer1, numeric_for_knnimputer1), ('min_avg',\n numeric_transformer2, numeric_for_knnimputer2), ('cat',\n categorical_transformer, categorical_features)]"], {'remainder': '"""drop"""'}), "([('ed_exp', numeric_transformer1, numeric_for_knnimputer1\n ), ('min_avg', numeric_transformer2, numeric_for_knnimputer2), ('cat',\n categorical_transformer, categorical_features)], remainder='drop')\n", (2710, 2914), False, 'from sklearn.compose import ColumnTransformer\n'), ((3274, 3330), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.25)', 'random_state': '(133)'}), '(X, y, test_size=0.25, random_state=133)\n', (3290, 3330), False, 'from sklearn.model_selection import train_test_split\n'), ((3764, 3826), 'LRByHand.LinearRegressionByHand', 'LinearRegressionByHand', ([], {'learning_rate': '(0.001)', 'iterations': '(100000)'}), '(learning_rate=0.001, iterations=100000)\n', (3786, 3826), False, 'from LRByHand import LinearRegressionByHand\n'), ((3866, 3920), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.25)', 'random_state': '(0)'}), '(X, y, test_size=0.25, random_state=0)\n', (3882, 3920), False, 'from sklearn.model_selection import train_test_split\n'), ((4713, 4857), 'requests.post', 'requests.post', (['"""https://iam.ng.bluemix.net/identity/token"""'], {'data': "{'apikey': API_KEY, 'grant_type': 'urn:ibm:params:oauth:grant-type:apikey'}"}), "('https://iam.ng.bluemix.net/identity/token', data={'apikey':\n API_KEY, 'grant_type': 'urn:ibm:params:oauth:grant-type:apikey'})\n", (4726, 4857), False, 'import requests\n'), ((5321, 5537), 'requests.post', 'requests.post', (['"""https://us-south.ml.cloud.ibm.com/ml/v4/deployments/14dcd737-84a8-4af9-a61f-68e079b29c4f/predictions?version=2020-11-23"""'], {'json': 'payload_scoring', 'headers': "{'Authorization': 'Bearer ' + mltoken}"}), "(\n 'https://us-south.ml.cloud.ibm.com/ml/v4/deployments/14dcd737-84a8-4af9-a61f-68e079b29c4f/predictions?version=2020-11-23'\n , json=payload_scoring, headers={'Authorization': 'Bearer ' + mltoken})\n", (5334, 5537), False, 'import requests\n'), ((832, 866), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (850, 866), False, 'from sklearn.metrics import accuracy_score, mean_squared_error, r2_score\n'), ((1264, 1282), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (1280, 1282), False, 'from sklearn.linear_model import LinearRegression\n'), ((1361, 1429), 'sklearn.model_selection.train_test_split', 'train_test_split', (['features', 'target'], {'test_size': '(0.25)', 'random_state': '(133)'}), '(features, target, test_size=0.25, random_state=133)\n', (1377, 1429), False, 'from sklearn.model_selection import train_test_split\n'), ((1519, 1576), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['model', 'X_train', 'y_train'], {'cv': '(5)', 'n_jobs': '(-1)'}), '(model, X_train, y_train, cv=5, n_jobs=-1)\n', (1534, 1576), False, 'from sklearn.model_selection import cross_val_score\n'), ((3721, 3733), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3729, 3733), True, 'import numpy as np\n'), ((3735, 3747), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3743, 3747), True, 'import numpy as np\n'), ((4009, 4033), 'sklearn.metrics.r2_score', 'r2_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (4017, 4033), False, 'from sklearn.metrics import accuracy_score, mean_squared_error, r2_score\n'), ((4035, 4069), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (4053, 4069), False, 'from sklearn.metrics import accuracy_score, mean_squared_error, r2_score\n'), ((3484, 3504), 'sklearn.linear_model.Ridge', 'Ridge', ([], {}), '(**best_params)\n', (3489, 3504), False, 'from sklearn.linear_model import Ridge\n'), ((2237, 2293), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'strategy': '"""constant"""', 'fill_value': '"""missing"""'}), "(strategy='constant', fill_value='missing')\n", (2250, 2293), False, 'from sklearn.impute import SimpleImputer\n'), ((2311, 2349), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""ignore"""'}), "(handle_unknown='ignore')\n", (2324, 2349), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((2406, 2418), 'sklearn.impute.KNNImputer', 'KNNImputer', ([], {}), '()\n', (2416, 2418), False, 'from sklearn.impute import KNNImputer\n'), ((2436, 2452), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2450, 2452), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2479, 2507), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', ([], {'degree': '(3)'}), '(degree=3)\n', (2497, 2507), False, 'from sklearn.preprocessing import PolynomialFeatures\n'), ((2567, 2579), 'sklearn.impute.KNNImputer', 'KNNImputer', ([], {}), '()\n', (2577, 2579), False, 'from sklearn.impute import KNNImputer\n'), ((2598, 2614), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2612, 2614), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2642, 2670), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', ([], {'degree': '(3)'}), '(degree=3)\n', (2660, 2670), False, 'from sklearn.preprocessing import PolynomialFeatures\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019-04-17 15:58
# @Author : erwin
import numpy as np
from common.util_function import *
np.set_printoptions(precision=3)
arr = np.linspace(0, 100, 10).reshape((2, 5))
print_line("原始数据")
print_br(arr)
print_line("单个array操作")
print_br(np.add(arr, 2))
print_br(np.subtract(arr, 2))
print_br(np.multiply(arr, 2))
print_br(np.divide(arr, 2))
print_br(np.power(arr, 2))
print_line("平方以及开方")
print_br(np.power(arr, 2))
print_br(np.sqrt(arr))
print_line("sin/cos/log/abs")
print_br(np.sin(arr))
print_br(np.cos(arr))
# print_br(np.log(arr1))
print_br(np.abs(arr))
print_line("向上取整/向下取整/四舍五入")
print_br(np.ceil(arr))
print_br(np.floor(arr))
print_br(np.round(arr))
| [
"numpy.abs",
"numpy.multiply",
"numpy.ceil",
"numpy.sqrt",
"numpy.add",
"numpy.power",
"numpy.round",
"numpy.floor",
"numpy.subtract",
"numpy.linspace",
"numpy.cos",
"numpy.sin",
"numpy.divide",
"numpy.set_printoptions"
] | [((150, 182), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(3)'}), '(precision=3)\n', (169, 182), True, 'import numpy as np\n'), ((297, 311), 'numpy.add', 'np.add', (['arr', '(2)'], {}), '(arr, 2)\n', (303, 311), True, 'import numpy as np\n'), ((322, 341), 'numpy.subtract', 'np.subtract', (['arr', '(2)'], {}), '(arr, 2)\n', (333, 341), True, 'import numpy as np\n'), ((352, 371), 'numpy.multiply', 'np.multiply', (['arr', '(2)'], {}), '(arr, 2)\n', (363, 371), True, 'import numpy as np\n'), ((382, 399), 'numpy.divide', 'np.divide', (['arr', '(2)'], {}), '(arr, 2)\n', (391, 399), True, 'import numpy as np\n'), ((410, 426), 'numpy.power', 'np.power', (['arr', '(2)'], {}), '(arr, 2)\n', (418, 426), True, 'import numpy as np\n'), ((459, 475), 'numpy.power', 'np.power', (['arr', '(2)'], {}), '(arr, 2)\n', (467, 475), True, 'import numpy as np\n'), ((486, 498), 'numpy.sqrt', 'np.sqrt', (['arr'], {}), '(arr)\n', (493, 498), True, 'import numpy as np\n'), ((540, 551), 'numpy.sin', 'np.sin', (['arr'], {}), '(arr)\n', (546, 551), True, 'import numpy as np\n'), ((562, 573), 'numpy.cos', 'np.cos', (['arr'], {}), '(arr)\n', (568, 573), True, 'import numpy as np\n'), ((609, 620), 'numpy.abs', 'np.abs', (['arr'], {}), '(arr)\n', (615, 620), True, 'import numpy as np\n'), ((661, 673), 'numpy.ceil', 'np.ceil', (['arr'], {}), '(arr)\n', (668, 673), True, 'import numpy as np\n'), ((684, 697), 'numpy.floor', 'np.floor', (['arr'], {}), '(arr)\n', (692, 697), True, 'import numpy as np\n'), ((708, 721), 'numpy.round', 'np.round', (['arr'], {}), '(arr)\n', (716, 721), True, 'import numpy as np\n'), ((190, 213), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(10)'], {}), '(0, 100, 10)\n', (201, 213), True, 'import numpy as np\n')] |
import networkx
import numpy
import chainer
from chainer_chemistry.dataset.graph_dataset.base_graph_dataset import PaddingGraphDataset, SparseGraphDataset # NOQA
from chainer_chemistry.dataset.graph_dataset.base_graph_data import PaddingGraphData, SparseGraphData # NOQA
from chainer_chemistry.dataset.graph_dataset.feature_converters import batch_without_padding # NOQA
class BaseNetworkxPreprocessor(object):
"""Base class to preprocess `Networkx::Graph` object"""
def __init__(self, *args, **kwargs):
pass
def get_x(self, graph):
if 'x' in graph.graph:
x = graph.graph['x']
else:
feature_dim, = graph.nodes[0]['x'].shape
x = numpy.empty((graph.number_of_nodes(), feature_dim),
dtype=numpy.float32)
for v, data in graph.nodes.data():
x[v] = data['x']
return x
def get_y(self, graph):
if 'y' in graph.graph:
y = graph.graph['y']
else:
y = numpy.empty(graph.number_of_nodes(), dtype=numpy.int32)
for v, data in graph.nodes.data():
y[v] = data['y']
return y
class BasePaddingNetworkxPreprocessor(BaseNetworkxPreprocessor):
"""Base class to preprocess `Networkx::Graph` into `PaddingGraphDataset`
""" # NOQA
def __init__(self, use_coo=False, *args, **kwargs):
self.use_coo = use_coo
def construct_data(self, graph):
"""Construct `PaddingGraphData` from `Networkx::Graph`
Args:
graph (Networkx::Graph): graph
Returns:
PaddingGraphData: graph data of padding pattern
"""
if not self.use_coo:
return PaddingGraphData(
x=self.get_x(graph),
adj=networkx.to_numpy_array(graph, dtype=numpy.float32),
y=self.get_y(graph),
label_num=graph.graph['label_num']
)
n_edges = graph.number_of_edges() * 2
row = numpy.empty((n_edges), dtype=numpy.int)
col = numpy.empty((n_edges), dtype=numpy.int)
data = numpy.ones((n_edges), dtype=numpy.float32)
for i, edge in enumerate(graph.edges):
row[2 * i] = edge[0]
row[2 * i + 1] = edge[1]
col[2 * i] = edge[1]
col[2 * i + 1] = edge[0]
# ensure row is sorted
if not numpy.all(row[:-1] <= row[1:]):
order = numpy.argsort(row)
row = row[order]
col = col[order]
assert numpy.all(row[:-1] <= row[1:])
adj = chainer.utils.CooMatrix(
data=data, row=row, col=col,
shape=(graph.number_of_nodes(), graph.number_of_nodes()),
order='C')
return PaddingGraphData(
x=self.get_x(graph),
adj=adj,
y=self.get_y(graph),
label_num=graph.graph['label_num']
)
def create_dataset(self, graph_list):
"""Create `PaddingGraphDataset` from list of `Networkx::Graph`
Args:
graph_list (list[Networkx::Graph]): list of graphs
Returns:
PaddingGraphDataset: graph dataset of padding pattern
"""
data_list = [
self.construct_data(graph) for graph in graph_list
]
dataset = PaddingGraphDataset(data_list)
dataset.register_feature('label_num', batch_without_padding)
return dataset
class BaseSparseNetworkxPreprocessor(BaseNetworkxPreprocessor):
"""Base class to preprocess `Networkx::Graph` into `SparseGraphDataset`
"""
def construct_data(self, graph):
"""Construct `SparseGraphData` from `Networkx::Graph`
Args:
graph (Networkx::Graph): graph
Returns:
SparseGraphData: graph data of sparse pattern
"""
edge_index = numpy.empty((2, graph.number_of_edges() * 2),
dtype=numpy.int)
for i, edge in enumerate(graph.edges):
edge_index[0][2 * i] = edge[0]
edge_index[0][2 * i + 1] = edge[1]
edge_index[1][2 * i] = edge[1]
edge_index[1][2 * i + 1] = edge[0]
return SparseGraphData(
x=self.get_x(graph),
edge_index=numpy.array(edge_index, dtype=numpy.int),
y=self.get_y(graph),
label_num=graph.graph['label_num']
)
def add_self_loop(self, graph):
for v in range(graph.number_of_nodes()):
graph.add_edge(v, v)
return graph
def create_dataset(self, graph_list):
"""Create `SparseGraphDataset` from list of `Networkx::Graph`
Args:
graph_list (list[Networkx::Graph]): list of graphs
Returns:
SparseGraphDataset: graph dataset of sparse pattern
"""
data_list = [
self.construct_data(graph) for graph in graph_list
]
dataset = SparseGraphDataset(data_list)
dataset.register_feature('label_num', batch_without_padding)
return dataset
| [
"chainer_chemistry.dataset.graph_dataset.base_graph_dataset.SparseGraphDataset",
"numpy.ones",
"networkx.to_numpy_array",
"numpy.argsort",
"numpy.array",
"numpy.empty",
"chainer_chemistry.dataset.graph_dataset.base_graph_dataset.PaddingGraphDataset",
"numpy.all"
] | [((2023, 2060), 'numpy.empty', 'numpy.empty', (['n_edges'], {'dtype': 'numpy.int'}), '(n_edges, dtype=numpy.int)\n', (2034, 2060), False, 'import numpy\n'), ((2077, 2114), 'numpy.empty', 'numpy.empty', (['n_edges'], {'dtype': 'numpy.int'}), '(n_edges, dtype=numpy.int)\n', (2088, 2114), False, 'import numpy\n'), ((2132, 2172), 'numpy.ones', 'numpy.ones', (['n_edges'], {'dtype': 'numpy.float32'}), '(n_edges, dtype=numpy.float32)\n', (2142, 2172), False, 'import numpy\n'), ((2553, 2583), 'numpy.all', 'numpy.all', (['(row[:-1] <= row[1:])'], {}), '(row[:-1] <= row[1:])\n', (2562, 2583), False, 'import numpy\n'), ((3337, 3367), 'chainer_chemistry.dataset.graph_dataset.base_graph_dataset.PaddingGraphDataset', 'PaddingGraphDataset', (['data_list'], {}), '(data_list)\n', (3356, 3367), False, 'from chainer_chemistry.dataset.graph_dataset.base_graph_dataset import PaddingGraphDataset, SparseGraphDataset\n'), ((4960, 4989), 'chainer_chemistry.dataset.graph_dataset.base_graph_dataset.SparseGraphDataset', 'SparseGraphDataset', (['data_list'], {}), '(data_list)\n', (4978, 4989), False, 'from chainer_chemistry.dataset.graph_dataset.base_graph_dataset import PaddingGraphDataset, SparseGraphDataset\n'), ((2409, 2439), 'numpy.all', 'numpy.all', (['(row[:-1] <= row[1:])'], {}), '(row[:-1] <= row[1:])\n', (2418, 2439), False, 'import numpy\n'), ((2461, 2479), 'numpy.argsort', 'numpy.argsort', (['row'], {}), '(row)\n', (2474, 2479), False, 'import numpy\n'), ((4290, 4330), 'numpy.array', 'numpy.array', (['edge_index'], {'dtype': 'numpy.int'}), '(edge_index, dtype=numpy.int)\n', (4301, 4330), False, 'import numpy\n'), ((1807, 1858), 'networkx.to_numpy_array', 'networkx.to_numpy_array', (['graph'], {'dtype': 'numpy.float32'}), '(graph, dtype=numpy.float32)\n', (1830, 1858), False, 'import networkx\n')] |
import tensorflow as tf
from datetime import datetime
from packaging import version
from tensorflow import keras
import numpy as np
def celsius_to_fahrenheit(c):
return (c * (9/5)) + 32
model = keras.models.Sequential([
keras.layers.Dense(16, input_dim=1, activation='relu'),
keras.layers.Dense(6, activation='relu'),
keras.layers.Dense(1)
])
logdir = "logs/graph/"
tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir)
model.compile(
optimizer='SGD',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
random_training_images = np.random.normal(size=(500,224,224,3))
random_training_labels = list(range(0, 500))
random_training_labels[-1] = 999
model.fit(random_training_images,
random_training_labels,
epochs=1,
batch_size=32,
callbacks=[tensorboard_callback])
| [
"numpy.random.normal",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.callbacks.TensorBoard"
] | [((410, 453), 'tensorflow.keras.callbacks.TensorBoard', 'keras.callbacks.TensorBoard', ([], {'log_dir': 'logdir'}), '(log_dir=logdir)\n', (437, 453), False, 'from tensorflow import keras\n'), ((589, 630), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(500, 224, 224, 3)'}), '(size=(500, 224, 224, 3))\n', (605, 630), True, 'import numpy as np\n'), ((231, 285), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(16)'], {'input_dim': '(1)', 'activation': '"""relu"""'}), "(16, input_dim=1, activation='relu')\n", (249, 285), False, 'from tensorflow import keras\n'), ((291, 331), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(6)'], {'activation': '"""relu"""'}), "(6, activation='relu')\n", (309, 331), False, 'from tensorflow import keras\n'), ((337, 358), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(1)'], {}), '(1)\n', (355, 358), False, 'from tensorflow import keras\n')] |
# Copyright (c) 2020, salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
import sys
import torch
import transformers
import numpy as np
from contexttimer import Timer
from typing import List, Dict, Any
from transformers import GlueDataset
from transformers import TrainingArguments
from transformers import default_data_collator
from influence_utils import parallel
from influence_utils import faiss_utils
from influence_utils import nn_influence_utils
from influence_utils.nn_influence_utils import compute_s_test
from experiments import constants
from experiments import misc_utils
from experiments import remote_utils
from experiments.data_utils import (
glue_output_modes,
glue_compute_metrics)
def one_experiment(
model: torch.nn.Module,
train_dataset: GlueDataset,
test_inputs: Dict[str, torch.Tensor],
batch_size: int,
random: bool,
n_gpu: int,
device: torch.device,
damp: float,
scale: float,
num_samples: int,
) -> List[torch.Tensor]:
params_filter = [
n for n, p in model.named_parameters()
if not p.requires_grad]
weight_decay_ignores = [
"bias",
"LayerNorm.weight"] + [
n for n, p in model.named_parameters()
if not p.requires_grad]
# Make sure each dataloader is re-initialized
batch_train_data_loader = misc_utils.get_dataloader(
dataset=train_dataset,
batch_size=batch_size,
random=random)
s_test = compute_s_test(
n_gpu=n_gpu,
device=device,
model=model,
test_inputs=test_inputs,
train_data_loaders=[batch_train_data_loader],
params_filter=params_filter,
weight_decay=constants.WEIGHT_DECAY,
weight_decay_ignores=weight_decay_ignores,
damp=damp,
scale=scale,
num_samples=num_samples)
return [X.cpu() for X in s_test]
def main(
mode: str,
num_examples_to_test: int = 5,
num_repetitions: int = 4,
) -> List[Dict[str, Any]]:
if mode not in ["only-correct", "only-incorrect"]:
raise ValueError(f"Unrecognized mode {mode}")
task_tokenizer, task_model = misc_utils.create_tokenizer_and_model(
constants.MNLI_MODEL_PATH)
train_dataset, eval_dataset = misc_utils.create_datasets(
task_name="mnli",
tokenizer=task_tokenizer)
eval_instance_data_loader = misc_utils.get_dataloader(
dataset=eval_dataset,
batch_size=1,
random=False)
output_mode = glue_output_modes["mnli"]
def build_compute_metrics_fn(task_name: str):
def compute_metrics_fn(p):
if output_mode == "classification":
preds = np.argmax(p.predictions, axis=1)
elif output_mode == "regression":
preds = np.squeeze(p.predictions)
return glue_compute_metrics(task_name, preds, p.label_ids)
return compute_metrics_fn
# Most of these arguments are placeholders
# and are not really used at all, so ignore
# the exact values of these.
trainer = transformers.Trainer(
model=task_model,
args=TrainingArguments(
output_dir="./tmp-output",
per_device_train_batch_size=128,
per_device_eval_batch_size=128,
learning_rate=5e-5,
logging_steps=100),
data_collator=default_data_collator,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
compute_metrics=build_compute_metrics_fn("mnli"),
)
task_model.cuda()
num_examples_tested = 0
output_collections = []
for test_index, test_inputs in enumerate(eval_instance_data_loader):
if num_examples_tested >= num_examples_to_test:
break
# Skip when we only want cases of correction prediction but the
# prediction is incorrect, or vice versa
prediction_is_correct = misc_utils.is_prediction_correct(
trainer=trainer,
model=task_model,
inputs=test_inputs)
if mode == "only-correct" and prediction_is_correct is False:
continue
if mode == "only-incorrect" and prediction_is_correct is True:
continue
for k, v in test_inputs.items():
if isinstance(v, torch.Tensor):
test_inputs[k] = v.to(torch.device("cuda"))
# with batch-size 128, 1500 iterations is enough
for num_samples in range(700, 1300 + 1, 100): # 7 choices
for batch_size in [1, 2, 4, 8, 16, 32, 64, 128]: # 8 choices
for repetition in range(num_repetitions):
print(f"Running #{test_index} "
f"N={num_samples} "
f"B={batch_size} "
f"R={repetition} takes ...", end=" ")
with Timer() as timer:
s_test = one_experiment(
model=task_model,
train_dataset=train_dataset,
test_inputs=test_inputs,
batch_size=batch_size,
random=True,
n_gpu=1,
device=torch.device("cuda"),
damp=constants.DEFAULT_INFLUENCE_HPARAMS["mnli"]["mnli"]["damp"],
scale=constants.DEFAULT_INFLUENCE_HPARAMS["mnli"]["mnli"]["scale"],
num_samples=num_samples)
time_elapsed = timer.elapsed
print(f"{time_elapsed:.2f} seconds")
outputs = {
"test_index": test_index,
"num_samples": num_samples,
"batch_size": batch_size,
"repetition": repetition,
"s_test": s_test,
"time_elapsed": time_elapsed,
"correct": prediction_is_correct,
}
output_collections.append(outputs)
remote_utils.save_and_mirror_scp_to_remote(
object_to_save=outputs,
file_name=f"stest.{mode}.{num_examples_to_test}."
f"{test_index}.{num_samples}."
f"{batch_size}.{repetition}.pth")
num_examples_tested += 1
return output_collections
| [
"transformers.TrainingArguments",
"experiments.misc_utils.is_prediction_correct",
"experiments.misc_utils.create_datasets",
"experiments.remote_utils.save_and_mirror_scp_to_remote",
"numpy.argmax",
"numpy.squeeze",
"experiments.misc_utils.get_dataloader",
"experiments.misc_utils.create_tokenizer_and_m... | [((1520, 1610), 'experiments.misc_utils.get_dataloader', 'misc_utils.get_dataloader', ([], {'dataset': 'train_dataset', 'batch_size': 'batch_size', 'random': 'random'}), '(dataset=train_dataset, batch_size=batch_size,\n random=random)\n', (1545, 1610), False, 'from experiments import misc_utils\n'), ((1646, 1947), 'influence_utils.nn_influence_utils.compute_s_test', 'compute_s_test', ([], {'n_gpu': 'n_gpu', 'device': 'device', 'model': 'model', 'test_inputs': 'test_inputs', 'train_data_loaders': '[batch_train_data_loader]', 'params_filter': 'params_filter', 'weight_decay': 'constants.WEIGHT_DECAY', 'weight_decay_ignores': 'weight_decay_ignores', 'damp': 'damp', 'scale': 'scale', 'num_samples': 'num_samples'}), '(n_gpu=n_gpu, device=device, model=model, test_inputs=\n test_inputs, train_data_loaders=[batch_train_data_loader],\n params_filter=params_filter, weight_decay=constants.WEIGHT_DECAY,\n weight_decay_ignores=weight_decay_ignores, damp=damp, scale=scale,\n num_samples=num_samples)\n', (1660, 1947), False, 'from influence_utils.nn_influence_utils import compute_s_test\n'), ((2321, 2385), 'experiments.misc_utils.create_tokenizer_and_model', 'misc_utils.create_tokenizer_and_model', (['constants.MNLI_MODEL_PATH'], {}), '(constants.MNLI_MODEL_PATH)\n', (2358, 2385), False, 'from experiments import misc_utils\n'), ((2429, 2499), 'experiments.misc_utils.create_datasets', 'misc_utils.create_datasets', ([], {'task_name': '"""mnli"""', 'tokenizer': 'task_tokenizer'}), "(task_name='mnli', tokenizer=task_tokenizer)\n", (2455, 2499), False, 'from experiments import misc_utils\n'), ((2549, 2624), 'experiments.misc_utils.get_dataloader', 'misc_utils.get_dataloader', ([], {'dataset': 'eval_dataset', 'batch_size': '(1)', 'random': '(False)'}), '(dataset=eval_dataset, batch_size=1, random=False)\n', (2574, 2624), False, 'from experiments import misc_utils\n'), ((4064, 4156), 'experiments.misc_utils.is_prediction_correct', 'misc_utils.is_prediction_correct', ([], {'trainer': 'trainer', 'model': 'task_model', 'inputs': 'test_inputs'}), '(trainer=trainer, model=task_model, inputs=\n test_inputs)\n', (4096, 4156), False, 'from experiments import misc_utils\n'), ((3001, 3052), 'experiments.data_utils.glue_compute_metrics', 'glue_compute_metrics', (['task_name', 'preds', 'p.label_ids'], {}), '(task_name, preds, p.label_ids)\n', (3021, 3052), False, 'from experiments.data_utils import glue_output_modes, glue_compute_metrics\n'), ((3292, 3451), 'transformers.TrainingArguments', 'TrainingArguments', ([], {'output_dir': '"""./tmp-output"""', 'per_device_train_batch_size': '(128)', 'per_device_eval_batch_size': '(128)', 'learning_rate': '(5e-05)', 'logging_steps': '(100)'}), "(output_dir='./tmp-output', per_device_train_batch_size=\n 128, per_device_eval_batch_size=128, learning_rate=5e-05, logging_steps=100\n )\n", (3309, 3451), False, 'from transformers import TrainingArguments\n'), ((2853, 2885), 'numpy.argmax', 'np.argmax', (['p.predictions'], {'axis': '(1)'}), '(p.predictions, axis=1)\n', (2862, 2885), True, 'import numpy as np\n'), ((2956, 2981), 'numpy.squeeze', 'np.squeeze', (['p.predictions'], {}), '(p.predictions)\n', (2966, 2981), True, 'import numpy as np\n'), ((4498, 4518), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (4510, 4518), False, 'import torch\n'), ((6261, 6448), 'experiments.remote_utils.save_and_mirror_scp_to_remote', 'remote_utils.save_and_mirror_scp_to_remote', ([], {'object_to_save': 'outputs', 'file_name': 'f"""stest.{mode}.{num_examples_to_test}.{test_index}.{num_samples}.{batch_size}.{repetition}.pth"""'}), "(object_to_save=outputs,\n file_name=\n f'stest.{mode}.{num_examples_to_test}.{test_index}.{num_samples}.{batch_size}.{repetition}.pth'\n )\n", (6303, 6448), False, 'from experiments import remote_utils\n'), ((5009, 5016), 'contexttimer.Timer', 'Timer', ([], {}), '()\n', (5014, 5016), False, 'from contexttimer import Timer\n'), ((5396, 5416), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (5408, 5416), False, 'import torch\n')] |
import numpy as np
# PyTorch stuff
import torch
# ------ MINE-F LOSS FUNCTION ------ #
def minef_loss(x_sample, y_sample, model, device):
# Shuffle y-data for the second expectation
idxs = np.random.choice(
range(len(y_sample)), size=len(y_sample), replace=False)
# We need y_shuffle attached to the design d
y_shuffle = y_sample[idxs]
# Get predictions from network
pred_joint = model(x_sample, y_sample)
pred_marg = model(x_sample, y_shuffle)
# Compute the MINE-f (or NWJ) lower bound
Z = torch.tensor(np.exp(1), device=device, dtype=torch.float)
mi_ma = torch.mean(pred_joint) - torch.mean(
torch.exp(pred_marg) / Z + torch.log(Z) - 1)
# we want to maximize the lower bound; PyTorch minimizes
loss = - mi_ma
return loss
def minef_gradients(x_sample, y_sample, ygrads, model, device):
# obtain marginal data and log-likelihood gradients
idx = np.random.permutation(len(y_sample))
y_shuffle = y_sample[idx]
ygrads_shuffle = ygrads[idx]
# Need to create new tensors for the autograd computation to work;
# This is because y is not a leaf variable in the computation graph
x_sample = torch.tensor(
x_sample, dtype=torch.float, device=device, requires_grad=True)
y_sample = torch.tensor(
y_sample, dtype=torch.float, device=device, requires_grad=True)
y_shuffle = torch.tensor(
y_shuffle, dtype=torch.float, device=device, requires_grad=True)
# Get predictions from network
pred_joint = model(x_sample, y_sample)
pred_marg = model(x_sample, y_shuffle)
# Compute gradients of lower bound with respect to data y
dIdy_joint = torch.autograd.grad(
pred_joint.sum(), (x_sample, y_sample), retain_graph=True)[1].data
dIdy_marg = torch.autograd.grad(
pred_marg.sum(), (x_sample, y_shuffle), retain_graph=True)[1].data
# Compute gradient through forward differentiation
dE1 = torch.mean(dIdy_joint * ygrads, axis=0)
Z = torch.tensor(np.exp(1), device=device, dtype=torch.float)
dE2 = torch.mean(
dIdy_marg * ygrads_shuffle * torch.exp(pred_marg) / Z, axis=0)
dI = dE1.reshape(-1, 1) - dE2.reshape(-1, 1)
# we want to maximize the lower bound; PyTorch minimizes
loss_gradients = - dI
return loss_gradients
| [
"torch.log",
"torch.mean",
"torch.exp",
"numpy.exp",
"torch.tensor"
] | [((1190, 1266), 'torch.tensor', 'torch.tensor', (['x_sample'], {'dtype': 'torch.float', 'device': 'device', 'requires_grad': '(True)'}), '(x_sample, dtype=torch.float, device=device, requires_grad=True)\n', (1202, 1266), False, 'import torch\n'), ((1291, 1367), 'torch.tensor', 'torch.tensor', (['y_sample'], {'dtype': 'torch.float', 'device': 'device', 'requires_grad': '(True)'}), '(y_sample, dtype=torch.float, device=device, requires_grad=True)\n', (1303, 1367), False, 'import torch\n'), ((1393, 1470), 'torch.tensor', 'torch.tensor', (['y_shuffle'], {'dtype': 'torch.float', 'device': 'device', 'requires_grad': '(True)'}), '(y_shuffle, dtype=torch.float, device=device, requires_grad=True)\n', (1405, 1470), False, 'import torch\n'), ((1956, 1995), 'torch.mean', 'torch.mean', (['(dIdy_joint * ygrads)'], {'axis': '(0)'}), '(dIdy_joint * ygrads, axis=0)\n', (1966, 1995), False, 'import torch\n'), ((554, 563), 'numpy.exp', 'np.exp', (['(1)'], {}), '(1)\n', (560, 563), True, 'import numpy as np\n'), ((611, 633), 'torch.mean', 'torch.mean', (['pred_joint'], {}), '(pred_joint)\n', (621, 633), False, 'import torch\n'), ((2017, 2026), 'numpy.exp', 'np.exp', (['(1)'], {}), '(1)\n', (2023, 2026), True, 'import numpy as np\n'), ((2121, 2141), 'torch.exp', 'torch.exp', (['pred_marg'], {}), '(pred_marg)\n', (2130, 2141), False, 'import torch\n'), ((683, 695), 'torch.log', 'torch.log', (['Z'], {}), '(Z)\n', (692, 695), False, 'import torch\n'), ((656, 676), 'torch.exp', 'torch.exp', (['pred_marg'], {}), '(pred_marg)\n', (665, 676), False, 'import torch\n')] |
import numpy as np
import matplotlib.pyplot as plot
import cPickle
import mscentipede
import argparse
import gzip
def plot_profile(footprint_model, background_model, mlen, protocol):
foreground = np.array([1])
for j in xrange(footprint_model.J):
foreground = np.array([p for val in foreground for p in [val,val]])
vals = np.array([i for v in footprint_model.value[j] for i in [v,1-v]])
foreground = vals*foreground
background = np.array([1])
for j in xrange(background_model.J):
background = np.array([p for val in background for p in [val,val]])
vals = np.array([i for v in background_model.value[j] for i in [v,1-v]])
background = vals*background
figure = plot.figure()
subplot = figure.add_axes([0.1,0.1,0.8,0.8])
L = foreground.size
if protocol=='DNase_seq':
footprint = [foreground[:L/2], -1*foreground[L/2:]]
footprintbg = [background[:L/2], -1*background[L/2:]]
xvals = np.arange(-L/4,L/4)
subplot.plot(xvals, footprint[0], linewidth=1, color='b')
subplot.plot(xvals, footprint[1], linewidth=1, color='b')
subplot.plot(xvals, footprintbg[0], linewidth=1, color='#888888')
subplot.plot(xvals, footprintbg[1], linewidth=1, color='#888888')
ymin = footprint[1].min()
ymax = footprint[0].max()
yticks = [ymin, ymin/2, 0, ymax/2, ymax]
elif protocol=='ATAC_seq':
footprint = foreground.copy()
footprintbg = background.copy()
xvals = np.arange(-L/2,L/2)
subplot.plot(xvals, footprint, linewidth=1, color='b')
subplot.plot(xvals, footprintbg, linewidth=1, color='#888888')
ymin = 0
ymax = footprint[0].max()
yticks = [ymin, ymax/2, ymax]
subplot.axis([xvals.min()-1, xvals.max()+1, 1.01*ymin, 1.01*ymax])
xticks_right = np.linspace(0,xvals.max()+1,3).astype('int')
xticks_left = np.linspace(xvals.min(), 0, 3).astype('int')[:-1]
xticks = [x for x in xticks_left]
xticks.extend([x for x in xticks_right])
xticklabels = ['%d'%i for i in xticks]
subplot.set_xticks(xticks)
subplot.set_xticklabels(xticklabels, fontsize=8, color='k')
yticklabels = ['%.2f'%y for y in yticks]
subplot.set_yticks(yticks)
subplot.set_yticklabels(yticklabels, fontsize=8, color='k')
subplot.axvline(0, linestyle='--', linewidth=0.2, color='k')
subplot.axvline(mlen, linestyle='--', linewidth=0.2, color='k')
subplot.axhline(0, linestyle='--', linewidth=0.2, color='k')
figure.text(0.12, 0.88, 'profile at bound sites', \
color='b', fontsize=9, horizontalalignment='left', verticalalignment='top')
figure.text(0.12, 0.85, 'profile at unbound sites', \
color='#888888', fontsize=9, horizontalalignment='left', verticalalignment='top')
return figure
def parse_args():
parser = argparse.ArgumentParser(description="plots the cleavage profile, "
"constructed from the estimated model parameters")
parser.add_argument("--protocol",
choices=("ATAC_seq","DNase_seq"),
default="DNase_seq",
help="specifies the chromatin accessibility protocol (default:DNase_seq)")
parser.add_argument("--model",
choices=("msCentipede", "msCentipede_flexbg", "msCentipede_flexbgmean"),
default="msCentipede",
help="models differ in how they capture background rate of enzyme cleavage (default:msCentipede)")
parser.add_argument("motif_file",
action="store",
help="name of a gzipped text file containing "
" positional information and other attributes for motif instances "
" of a transcription factor. columns of the file should be as follows. "
" Chromosome Start End Strand PWM_Score [Attribute_1 Attribute_2 ...]. "
" additional attributes are optional.")
options = parser.parse_args()
# if no motif file is provided, throw an error
if options.motif_file is None:
parser.error("Need to provide a file of motifs for a transcription factor")
return options
def main():
options = parse_args()
model_file = "%s_%s_model_parameters.pkl"%(options.motif_file.split('.')[0], '_'.join(options.model.split('-')))
figure_file = "%s_%s_footprint_profile.pdf"%(options.motif_file.split('.')[0], '_'.join(options.model.split('-')))
# load model parameters
handle = open(model_file, 'r')
model = cPickle.load(handle)
handle.close()
footprint_model = model[0]
background_model = model[2]
# get motif length
handle = gzip.open(options.motif_file, 'rb')
handle.next()
row = handle.next().strip().split()
handle.close()
mlen = int(row[2])-int(row[1])
# create figure
figure = plot_profile(footprint_model, background_model, mlen, options.protocol)
# save figure
figure.savefig(figure_file, dpi=450)
if __name__=="__main__":
main()
| [
"argparse.ArgumentParser",
"gzip.open",
"numpy.array",
"matplotlib.pyplot.figure",
"cPickle.load",
"numpy.arange"
] | [((202, 215), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (210, 215), True, 'import numpy as np\n'), ((467, 480), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (475, 480), True, 'import numpy as np\n'), ((730, 743), 'matplotlib.pyplot.figure', 'plot.figure', ([], {}), '()\n', (741, 743), True, 'import matplotlib.pyplot as plot\n'), ((2886, 3010), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""plots the cleavage profile, constructed from the estimated model parameters"""'}), "(description=\n 'plots the cleavage profile, constructed from the estimated model parameters'\n )\n", (2909, 3010), False, 'import argparse\n'), ((4635, 4655), 'cPickle.load', 'cPickle.load', (['handle'], {}), '(handle)\n', (4647, 4655), False, 'import cPickle\n'), ((4775, 4810), 'gzip.open', 'gzip.open', (['options.motif_file', '"""rb"""'], {}), "(options.motif_file, 'rb')\n", (4784, 4810), False, 'import gzip\n'), ((277, 332), 'numpy.array', 'np.array', (['[p for val in foreground for p in [val, val]]'], {}), '([p for val in foreground for p in [val, val]])\n', (285, 332), True, 'import numpy as np\n'), ((347, 414), 'numpy.array', 'np.array', (['[i for v in footprint_model.value[j] for i in [v, 1 - v]]'], {}), '([i for v in footprint_model.value[j] for i in [v, 1 - v]])\n', (355, 414), True, 'import numpy as np\n'), ((543, 598), 'numpy.array', 'np.array', (['[p for val in background for p in [val, val]]'], {}), '([p for val in background for p in [val, val]])\n', (551, 598), True, 'import numpy as np\n'), ((613, 681), 'numpy.array', 'np.array', (['[i for v in background_model.value[j] for i in [v, 1 - v]]'], {}), '([i for v in background_model.value[j] for i in [v, 1 - v]])\n', (621, 681), True, 'import numpy as np\n'), ((986, 1010), 'numpy.arange', 'np.arange', (['(-L / 4)', '(L / 4)'], {}), '(-L / 4, L / 4)\n', (995, 1010), True, 'import numpy as np\n'), ((1533, 1557), 'numpy.arange', 'np.arange', (['(-L / 2)', '(L / 2)'], {}), '(-L / 2, L / 2)\n', (1542, 1557), True, 'import numpy as np\n')] |
"""
!git
clone
https: // bitbucket.org / jadslim / german - traffic - signs
!ls
german - traffic - sign
"""
import numpy as np
import matplotlib.pyplot as plt
import keras
from keras.models import Sequential
from keras.optimizers import Adam
from keras.layers import Dense
from keras.layers import Flatten, Dropout
from keras.utils.np_utils import to_categorical
from keras.layers.convolutional import Conv2D, MaxPooling2D
import random
import pickle
import pandas as pd
import cv2
from keras.callbacks import LearningRateScheduler, ModelCheckpoint
#% matplotlib inline
np.random.seed(0)
# TODO: Implement load the data here.
with open('german-traffic-signs/train.p', 'rb') as f:
train_data = pickle.load(f)
with open('german-traffic-signs/valid.p', 'rb') as f:
val_data = pickle.load(f)
# TODO: Load test data
with open('german-traffic-signs/test.p', 'rb') as f:
test_data = pickle.load(f)
# Split out features and labels
X_train, y_train = train_data['features'], train_data['labels']
X_val, y_val = val_data['features'], val_data['labels']
X_test, y_test = test_data['features'], test_data['labels']
# already 4 dimensional
print(X_train.shape)
print(X_test.shape)
print(X_val.shape)
# STOP: Do not change the tests below. Your implementation should pass these tests.
assert (X_train.shape[0] == y_train.shape[0]), "The number of images is not equal to the number of labels."
assert (X_train.shape[1:] == (32, 32, 3)), "The dimensions of the images are not 32 x 32 x 3."
assert (X_val.shape[0] == y_val.shape[0]), "The number of images is not equal to the number of labels."
assert (X_val.shape[1:] == (32, 32, 3)), "The dimensions of the images are not 32 x 32 x 3."
assert (X_test.shape[0] == y_test.shape[0]), "The number of images is not equal to the number of labels."
assert (X_test.shape[1:] == (32, 32, 3)), "The dimensions of the images are not 32 x 32 x 3."
data = pd.read_csv('german-traffic-signs/signnames.csv')
num_of_samples = []
cols = 5
num_classes = 43
fig, axs = plt.subplots(nrows=num_classes, ncols=cols, figsize=(5, 50))
fig.tight_layout()
for i in range(cols):
for j, row in data.iterrows():
x_selected = X_train[y_train == j]
axs[j][i].imshow(x_selected[random.randint(0, (len(x_selected) - 1)), :, :], cmap=plt.get_cmap('gray'))
axs[j][i].axis("off")
if i == 2:
axs[j][i].set_title(str(j) + " - " + row["SignName"])
num_of_samples.append(len(x_selected))
print(num_of_samples)
plt.figure(figsize=(12, 4))
plt.bar(range(0, num_classes), num_of_samples)
plt.title("Distribution of the train dataset")
plt.xlabel("Class number")
plt.ylabel("Number of images")
plt.show()
import cv2
plt.imshow(X_train[1000])
plt.axis("off")
print(X_train[1000].shape)
print(y_train[1000])
def grayscale(img):
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
return img
img = grayscale(X_train[1000])
plt.imshow(img)
plt.axis("off")
print(img.shape)
def equalize(img):
img = cv2.equalizeHist(img)
return img
img = equalize(img)
plt.imshow(img)
plt.axis("off")
print(img.shape)
def preprocess(img):
img = grayscale(img)
img = equalize(img)
img = img / 255
return img
X_train = np.array(list(map(preprocess, X_train)))
X_test = np.array(list(map(preprocess, X_test)))
X_val = np.array(list(map(preprocess, X_val)))
plt.imshow(X_train[random.randint(0, len(X_train) - 1)])
plt.axis('off')
print(X_train.shape)
X_train = X_train.reshape(34799, 32, 32, 1)
X_test = X_test.reshape(12630, 32, 32, 1)
X_val = X_val.reshape(4410, 32, 32, 1)
from keras.preprocessing.image import ImageDataGenerator
datagen = ImageDataGenerator(width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2,
shear_range=0.1,
rotation_range=10.)
datagen.fit(X_train)
# for X_batch, y_batch in
batches = datagen.flow(X_train, y_train, batch_size=15)
X_batch, y_batch = next(batches)
fig, axs = plt.subplots(1, 15, figsize=(20, 5))
fig.tight_layout()
for i in range(15):
axs[i].imshow(X_batch[i].reshape(32, 32))
axs[i].axis("off")
print(X_batch.shape)
y_train = to_categorical(y_train, 43)
y_test = to_categorical(y_test, 43)
y_val = to_categorical(y_val, 43)
# create model
def modified_model():
model = Sequential()
model.add(Conv2D(60, (5, 5), input_shape=(32, 32, 1), activation='relu'))
model.add(Conv2D(60, (5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(30, (3, 3), activation='relu'))
model.add(Conv2D(30, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(500, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(43, activation='softmax'))
model.compile(Adam(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy'])
return model
model = modified_model()
print(model.summary())
history = model.fit_generator(datagen.flow(X_train, y_train, batch_size=50),
steps_per_epoch=2000,
epochs=10,
validation_data=(X_val, y_val), shuffle=1)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Loss')
plt.xlabel('epoch')
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.legend(['training', 'test'])
plt.title('Accuracy')
plt.xlabel('epoch')
# TODO: Evaluate model on test data
score = model.evaluate(X_test, y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
# predict internet number
import requests
from PIL import Image
url = 'https://c8.alamy.com/comp/A0RX23/cars-and-automobiles-must-turn-left-ahead-sign-A0RX23.jpg'
r = requests.get(url, stream=True)
img = Image.open(r.raw)
plt.imshow(img, cmap=plt.get_cmap('gray'))
img = np.asarray(img)
img = cv2.resize(img, (32, 32))
img = preprocess(img)
plt.imshow(img, cmap=plt.get_cmap('gray'))
print(img.shape)
img = img.reshape(1, 32, 32, 1)
print("predicted sign: " + str(model.predict_classes(img)))
| [
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"keras.preprocessing.image.ImageDataGenerator",
"keras.layers.Dense",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.asarray",
"numpy.random.seed",
"matplotlib.pyplot.axis",
"keras.optimizers.Adam",
"keras... | [((574, 591), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (588, 591), True, 'import numpy as np\n'), ((1900, 1949), 'pandas.read_csv', 'pd.read_csv', (['"""german-traffic-signs/signnames.csv"""'], {}), "('german-traffic-signs/signnames.csv')\n", (1911, 1949), True, 'import pandas as pd\n'), ((2010, 2070), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'num_classes', 'ncols': 'cols', 'figsize': '(5, 50)'}), '(nrows=num_classes, ncols=cols, figsize=(5, 50))\n', (2022, 2070), True, 'import matplotlib.pyplot as plt\n'), ((2492, 2519), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 4)'}), '(figsize=(12, 4))\n', (2502, 2519), True, 'import matplotlib.pyplot as plt\n'), ((2567, 2613), 'matplotlib.pyplot.title', 'plt.title', (['"""Distribution of the train dataset"""'], {}), "('Distribution of the train dataset')\n", (2576, 2613), True, 'import matplotlib.pyplot as plt\n'), ((2614, 2640), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Class number"""'], {}), "('Class number')\n", (2624, 2640), True, 'import matplotlib.pyplot as plt\n'), ((2641, 2671), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of images"""'], {}), "('Number of images')\n", (2651, 2671), True, 'import matplotlib.pyplot as plt\n'), ((2672, 2682), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2680, 2682), True, 'import matplotlib.pyplot as plt\n'), ((2696, 2721), 'matplotlib.pyplot.imshow', 'plt.imshow', (['X_train[1000]'], {}), '(X_train[1000])\n', (2706, 2721), True, 'import matplotlib.pyplot as plt\n'), ((2722, 2737), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2730, 2737), True, 'import matplotlib.pyplot as plt\n'), ((2904, 2919), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (2914, 2919), True, 'import matplotlib.pyplot as plt\n'), ((2920, 2935), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2928, 2935), True, 'import matplotlib.pyplot as plt\n'), ((3043, 3058), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (3053, 3058), True, 'import matplotlib.pyplot as plt\n'), ((3059, 3074), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3067, 3074), True, 'import matplotlib.pyplot as plt\n'), ((3406, 3421), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3414, 3421), True, 'import matplotlib.pyplot as plt\n'), ((3638, 3761), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'width_shift_range': '(0.1)', 'height_shift_range': '(0.1)', 'zoom_range': '(0.2)', 'shear_range': '(0.1)', 'rotation_range': '(10.0)'}), '(width_shift_range=0.1, height_shift_range=0.1,\n zoom_range=0.2, shear_range=0.1, rotation_range=10.0)\n', (3656, 3761), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((4024, 4060), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(15)'], {'figsize': '(20, 5)'}), '(1, 15, figsize=(20, 5))\n', (4036, 4060), True, 'import matplotlib.pyplot as plt\n'), ((4203, 4230), 'keras.utils.np_utils.to_categorical', 'to_categorical', (['y_train', '(43)'], {}), '(y_train, 43)\n', (4217, 4230), False, 'from keras.utils.np_utils import to_categorical\n'), ((4240, 4266), 'keras.utils.np_utils.to_categorical', 'to_categorical', (['y_test', '(43)'], {}), '(y_test, 43)\n', (4254, 4266), False, 'from keras.utils.np_utils import to_categorical\n'), ((4275, 4300), 'keras.utils.np_utils.to_categorical', 'to_categorical', (['y_val', '(43)'], {}), '(y_val, 43)\n', (4289, 4300), False, 'from keras.utils.np_utils import to_categorical\n'), ((5244, 5277), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['loss']"], {}), "(history.history['loss'])\n", (5252, 5277), True, 'import matplotlib.pyplot as plt\n'), ((5278, 5315), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_loss']"], {}), "(history.history['val_loss'])\n", (5286, 5315), True, 'import matplotlib.pyplot as plt\n'), ((5316, 5333), 'matplotlib.pyplot.title', 'plt.title', (['"""Loss"""'], {}), "('Loss')\n", (5325, 5333), True, 'import matplotlib.pyplot as plt\n'), ((5334, 5353), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (5344, 5353), True, 'import matplotlib.pyplot as plt\n'), ((5355, 5387), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['acc']"], {}), "(history.history['acc'])\n", (5363, 5387), True, 'import matplotlib.pyplot as plt\n'), ((5388, 5424), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_acc']"], {}), "(history.history['val_acc'])\n", (5396, 5424), True, 'import matplotlib.pyplot as plt\n'), ((5425, 5457), 'matplotlib.pyplot.legend', 'plt.legend', (["['training', 'test']"], {}), "(['training', 'test'])\n", (5435, 5457), True, 'import matplotlib.pyplot as plt\n'), ((5458, 5479), 'matplotlib.pyplot.title', 'plt.title', (['"""Accuracy"""'], {}), "('Accuracy')\n", (5467, 5479), True, 'import matplotlib.pyplot as plt\n'), ((5480, 5499), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (5490, 5499), True, 'import matplotlib.pyplot as plt\n'), ((5822, 5852), 'requests.get', 'requests.get', (['url'], {'stream': '(True)'}), '(url, stream=True)\n', (5834, 5852), False, 'import requests\n'), ((5859, 5876), 'PIL.Image.open', 'Image.open', (['r.raw'], {}), '(r.raw)\n', (5869, 5876), False, 'from PIL import Image\n'), ((5927, 5942), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (5937, 5942), True, 'import numpy as np\n'), ((5949, 5974), 'cv2.resize', 'cv2.resize', (['img', '(32, 32)'], {}), '(img, (32, 32))\n', (5959, 5974), False, 'import cv2\n'), ((702, 716), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (713, 716), False, 'import pickle\n'), ((786, 800), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (797, 800), False, 'import pickle\n'), ((894, 908), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (905, 908), False, 'import pickle\n'), ((2818, 2855), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (2830, 2855), False, 'import cv2\n'), ((2984, 3005), 'cv2.equalizeHist', 'cv2.equalizeHist', (['img'], {}), '(img)\n', (3000, 3005), False, 'import cv2\n'), ((4353, 4365), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (4363, 4365), False, 'from keras.models import Sequential\n'), ((4380, 4442), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(60)', '(5, 5)'], {'input_shape': '(32, 32, 1)', 'activation': '"""relu"""'}), "(60, (5, 5), input_shape=(32, 32, 1), activation='relu')\n", (4386, 4442), False, 'from keras.layers.convolutional import Conv2D, MaxPooling2D\n'), ((4458, 4495), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(60)', '(5, 5)'], {'activation': '"""relu"""'}), "(60, (5, 5), activation='relu')\n", (4464, 4495), False, 'from keras.layers.convolutional import Conv2D, MaxPooling2D\n'), ((4511, 4541), 'keras.layers.convolutional.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (4523, 4541), False, 'from keras.layers.convolutional import Conv2D, MaxPooling2D\n'), ((4558, 4595), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(30)', '(3, 3)'], {'activation': '"""relu"""'}), "(30, (3, 3), activation='relu')\n", (4564, 4595), False, 'from keras.layers.convolutional import Conv2D, MaxPooling2D\n'), ((4611, 4648), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(30)', '(3, 3)'], {'activation': '"""relu"""'}), "(30, (3, 3), activation='relu')\n", (4617, 4648), False, 'from keras.layers.convolutional import Conv2D, MaxPooling2D\n'), ((4664, 4694), 'keras.layers.convolutional.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (4676, 4694), False, 'from keras.layers.convolutional import Conv2D, MaxPooling2D\n'), ((4711, 4720), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (4718, 4720), False, 'from keras.layers import Flatten, Dropout\n'), ((4736, 4765), 'keras.layers.Dense', 'Dense', (['(500)'], {'activation': '"""relu"""'}), "(500, activation='relu')\n", (4741, 4765), False, 'from keras.layers import Dense\n'), ((4781, 4793), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (4788, 4793), False, 'from keras.layers import Flatten, Dropout\n'), ((4809, 4840), 'keras.layers.Dense', 'Dense', (['(43)'], {'activation': '"""softmax"""'}), "(43, activation='softmax')\n", (4814, 4840), False, 'from keras.layers import Dense\n'), ((4861, 4875), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (4865, 4875), False, 'from keras.optimizers import Adam\n'), ((5898, 5918), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""gray"""'], {}), "('gray')\n", (5910, 5918), True, 'import matplotlib.pyplot as plt\n'), ((6018, 6038), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""gray"""'], {}), "('gray')\n", (6030, 6038), True, 'import matplotlib.pyplot as plt\n'), ((2281, 2301), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""gray"""'], {}), "('gray')\n", (2293, 2301), True, 'import matplotlib.pyplot as plt\n')] |
import unittest
import numpy as np
from scipy.spatial.distance import cdist
from hmc import mmd
class TestMMD(unittest.TestCase):
def test_mmd(self):
n = int(1000*np.random.uniform())
m = int(1000*np.random.uniform())
k = int(10*np.random.uniform())
x = np.random.normal(size=(m, k))
y = np.random.normal(size=(n, k))
bw = np.random.exponential()
u = mmd(x, y, bw)
Kxx = np.exp(-0.5*cdist(x, x, 'sqeuclidean') / bw**2)
Kyy = np.exp(-0.5*cdist(y, y, 'sqeuclidean') / bw**2)
Kxy = np.exp(-0.5*cdist(x, y, 'sqeuclidean') / bw**2)
a = 2*np.sum(Kxx[np.triu_indices(m, 1)]) / (m*(m-1))
b = 2*np.sum(Kyy[np.triu_indices(n, 1)]) / (n*(n-1))
c = -2*np.sum(Kxy) / (m*n)
v = a + b + c
self.assertTrue(np.allclose(u, v))
| [
"numpy.random.normal",
"numpy.allclose",
"numpy.triu_indices",
"scipy.spatial.distance.cdist",
"numpy.random.exponential",
"numpy.sum",
"numpy.random.uniform",
"hmc.mmd"
] | [((293, 322), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(m, k)'}), '(size=(m, k))\n', (309, 322), True, 'import numpy as np\n'), ((335, 364), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(n, k)'}), '(size=(n, k))\n', (351, 364), True, 'import numpy as np\n'), ((378, 401), 'numpy.random.exponential', 'np.random.exponential', ([], {}), '()\n', (399, 401), True, 'import numpy as np\n'), ((414, 427), 'hmc.mmd', 'mmd', (['x', 'y', 'bw'], {}), '(x, y, bw)\n', (417, 427), False, 'from hmc import mmd\n'), ((818, 835), 'numpy.allclose', 'np.allclose', (['u', 'v'], {}), '(u, v)\n', (829, 835), True, 'import numpy as np\n'), ((178, 197), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (195, 197), True, 'import numpy as np\n'), ((220, 239), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (237, 239), True, 'import numpy as np\n'), ((260, 279), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (277, 279), True, 'import numpy as np\n'), ((752, 763), 'numpy.sum', 'np.sum', (['Kxy'], {}), '(Kxy)\n', (758, 763), True, 'import numpy as np\n'), ((455, 481), 'scipy.spatial.distance.cdist', 'cdist', (['x', 'x', '"""sqeuclidean"""'], {}), "(x, x, 'sqeuclidean')\n", (460, 481), False, 'from scipy.spatial.distance import cdist\n'), ((517, 543), 'scipy.spatial.distance.cdist', 'cdist', (['y', 'y', '"""sqeuclidean"""'], {}), "(y, y, 'sqeuclidean')\n", (522, 543), False, 'from scipy.spatial.distance import cdist\n'), ((579, 605), 'scipy.spatial.distance.cdist', 'cdist', (['x', 'y', '"""sqeuclidean"""'], {}), "(x, y, 'sqeuclidean')\n", (584, 605), False, 'from scipy.spatial.distance import cdist\n'), ((640, 661), 'numpy.triu_indices', 'np.triu_indices', (['m', '(1)'], {}), '(m, 1)\n', (655, 661), True, 'import numpy as np\n'), ((701, 722), 'numpy.triu_indices', 'np.triu_indices', (['n', '(1)'], {}), '(n, 1)\n', (716, 722), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import numpy as np
import os
import shutil
from PIL import Image
import cv2
from scipy.misc import imread
pascal_colormap = [
0, 0, 0,
0.5020, 0, 0,
0, 0.5020, 0,
0.5020, 0.5020, 0,
0, 0, 0.5020,
0.5020, 0, 0.5020,
0, 0.5020, 0.5020,
0.5020, 0.5020, 0.5020,
0.2510, 0, 0,
0.7529, 0, 0,
0.2510, 0.5020, 0,
0.7529, 0.5020, 0,
0.2510, 0, 0.5020,
0.7529, 0, 0.5020,
0.2510, 0.5020, 0.5020,
0.7529, 0.5020, 0.5020,
0, 0.2510, 0,
0.5020, 0.2510, 0,
0, 0.7529, 0,
0.5020, 0.7529, 0,
0, 0.2510, 0.5020,
0.5020, 0.2510, 0.5020,
0, 0.7529, 0.5020,
0.5020, 0.7529, 0.5020,
0.2510, 0.2510, 0,
0.7529, 0.2510, 0,
0.2510, 0.7529, 0,
0.7529, 0.7529, 0,
0.2510, 0.2510, 0.5020,
0.7529, 0.2510, 0.5020,
0.2510, 0.7529, 0.5020,
0.7529, 0.7529, 0.5020,
0, 0, 0.2510,
0.5020, 0, 0.2510,
0, 0.5020, 0.2510,
0.5020, 0.5020, 0.2510,
0, 0, 0.7529,
0.5020, 0, 0.7529,
0, 0.5020, 0.7529,
0.5020, 0.5020, 0.7529,
0.2510, 0, 0.2510,
0.7529, 0, 0.2510,
0.2510, 0.5020, 0.2510,
0.7529, 0.5020, 0.2510,
0.2510, 0, 0.7529,
0.7529, 0, 0.7529,
0.2510, 0.5020, 0.7529,
0.7529, 0.5020, 0.7529,
0, 0.2510, 0.2510,
0.5020, 0.2510, 0.2510,
0, 0.7529, 0.2510,
0.5020, 0.7529, 0.2510,
0, 0.2510, 0.7529,
0.5020, 0.2510, 0.7529,
0, 0.7529, 0.7529,
0.5020, 0.7529, 0.7529,
0.2510, 0.2510, 0.2510,
0.7529, 0.2510, 0.2510,
0.2510, 0.7529, 0.2510,
0.7529, 0.7529, 0.2510,
0.2510, 0.2510, 0.7529,
0.7529, 0.2510, 0.7529,
0.2510, 0.7529, 0.7529,
0.7529, 0.7529, 0.7529,
0.1255, 0, 0,
0.6275, 0, 0,
0.1255, 0.5020, 0,
0.6275, 0.5020, 0,
0.1255, 0, 0.5020,
0.6275, 0, 0.5020,
0.1255, 0.5020, 0.5020,
0.6275, 0.5020, 0.5020,
0.3765, 0, 0,
0.8784, 0, 0,
0.3765, 0.5020, 0,
0.8784, 0.5020, 0,
0.3765, 0, 0.5020,
0.8784, 0, 0.5020,
0.3765, 0.5020, 0.5020,
0.8784, 0.5020, 0.5020,
0.1255, 0.2510, 0,
0.6275, 0.2510, 0,
0.1255, 0.7529, 0,
0.6275, 0.7529, 0,
0.1255, 0.2510, 0.5020,
0.6275, 0.2510, 0.5020,
0.1255, 0.7529, 0.5020,
0.6275, 0.7529, 0.5020,
0.3765, 0.2510, 0,
0.8784, 0.2510, 0,
0.3765, 0.7529, 0,
0.8784, 0.7529, 0,
0.3765, 0.2510, 0.5020,
0.8784, 0.2510, 0.5020,
0.3765, 0.7529, 0.5020,
0.8784, 0.7529, 0.5020,
0.1255, 0, 0.2510,
0.6275, 0, 0.2510,
0.1255, 0.5020, 0.2510,
0.6275, 0.5020, 0.2510,
0.1255, 0, 0.7529,
0.6275, 0, 0.7529,
0.1255, 0.5020, 0.7529,
0.6275, 0.5020, 0.7529,
0.3765, 0, 0.2510,
0.8784, 0, 0.2510,
0.3765, 0.5020, 0.2510,
0.8784, 0.5020, 0.2510,
0.3765, 0, 0.7529,
0.8784, 0, 0.7529,
0.3765, 0.5020, 0.7529,
0.8784, 0.5020, 0.7529,
0.1255, 0.2510, 0.2510,
0.6275, 0.2510, 0.2510,
0.1255, 0.7529, 0.2510,
0.6275, 0.7529, 0.2510,
0.1255, 0.2510, 0.7529,
0.6275, 0.2510, 0.7529,
0.1255, 0.7529, 0.7529,
0.6275, 0.7529, 0.7529,
0.3765, 0.2510, 0.2510,
0.8784, 0.2510, 0.2510,
0.3765, 0.7529, 0.2510,
0.8784, 0.7529, 0.2510,
0.3765, 0.2510, 0.7529,
0.8784, 0.2510, 0.7529,
0.3765, 0.7529, 0.7529,
0.8784, 0.7529, 0.7529,
0, 0.1255, 0,
0.5020, 0.1255, 0,
0, 0.6275, 0,
0.5020, 0.6275, 0,
0, 0.1255, 0.5020,
0.5020, 0.1255, 0.5020,
0, 0.6275, 0.5020,
0.5020, 0.6275, 0.5020,
0.2510, 0.1255, 0,
0.7529, 0.1255, 0,
0.2510, 0.6275, 0,
0.7529, 0.6275, 0,
0.2510, 0.1255, 0.5020,
0.7529, 0.1255, 0.5020,
0.2510, 0.6275, 0.5020,
0.7529, 0.6275, 0.5020,
0, 0.3765, 0,
0.5020, 0.3765, 0,
0, 0.8784, 0,
0.5020, 0.8784, 0,
0, 0.3765, 0.5020,
0.5020, 0.3765, 0.5020,
0, 0.8784, 0.5020,
0.5020, 0.8784, 0.5020,
0.2510, 0.3765, 0,
0.7529, 0.3765, 0,
0.2510, 0.8784, 0,
0.7529, 0.8784, 0,
0.2510, 0.3765, 0.5020,
0.7529, 0.3765, 0.5020,
0.2510, 0.8784, 0.5020,
0.7529, 0.8784, 0.5020,
0, 0.1255, 0.2510,
0.5020, 0.1255, 0.2510,
0, 0.6275, 0.2510,
0.5020, 0.6275, 0.2510,
0, 0.1255, 0.7529,
0.5020, 0.1255, 0.7529,
0, 0.6275, 0.7529,
0.5020, 0.6275, 0.7529,
0.2510, 0.1255, 0.2510,
0.7529, 0.1255, 0.2510,
0.2510, 0.6275, 0.2510,
0.7529, 0.6275, 0.2510,
0.2510, 0.1255, 0.7529,
0.7529, 0.1255, 0.7529,
0.2510, 0.6275, 0.7529,
0.7529, 0.6275, 0.7529,
0, 0.3765, 0.2510,
0.5020, 0.3765, 0.2510,
0, 0.8784, 0.2510,
0.5020, 0.8784, 0.2510,
0, 0.3765, 0.7529,
0.5020, 0.3765, 0.7529,
0, 0.8784, 0.7529,
0.5020, 0.8784, 0.7529,
0.2510, 0.3765, 0.2510,
0.7529, 0.3765, 0.2510,
0.2510, 0.8784, 0.2510,
0.7529, 0.8784, 0.2510,
0.2510, 0.3765, 0.7529,
0.7529, 0.3765, 0.7529,
0.2510, 0.8784, 0.7529,
0.7529, 0.8784, 0.7529,
0.1255, 0.1255, 0,
0.6275, 0.1255, 0,
0.1255, 0.6275, 0,
0.6275, 0.6275, 0,
0.1255, 0.1255, 0.5020,
0.6275, 0.1255, 0.5020,
0.1255, 0.6275, 0.5020,
0.6275, 0.6275, 0.5020,
0.3765, 0.1255, 0,
0.8784, 0.1255, 0,
0.3765, 0.6275, 0,
0.8784, 0.6275, 0,
0.3765, 0.1255, 0.5020,
0.8784, 0.1255, 0.5020,
0.3765, 0.6275, 0.5020,
0.8784, 0.6275, 0.5020,
0.1255, 0.3765, 0,
0.6275, 0.3765, 0,
0.1255, 0.8784, 0,
0.6275, 0.8784, 0,
0.1255, 0.3765, 0.5020,
0.6275, 0.3765, 0.5020,
0.1255, 0.8784, 0.5020,
0.6275, 0.8784, 0.5020,
0.3765, 0.3765, 0,
0.8784, 0.3765, 0,
0.3765, 0.8784, 0,
0.8784, 0.8784, 0,
0.3765, 0.3765, 0.5020,
0.8784, 0.3765, 0.5020,
0.3765, 0.8784, 0.5020,
0.8784, 0.8784, 0.5020,
0.1255, 0.1255, 0.2510,
0.6275, 0.1255, 0.2510,
0.1255, 0.6275, 0.2510,
0.6275, 0.6275, 0.2510,
0.1255, 0.1255, 0.7529,
0.6275, 0.1255, 0.7529,
0.1255, 0.6275, 0.7529,
0.6275, 0.6275, 0.7529,
0.3765, 0.1255, 0.2510,
0.8784, 0.1255, 0.2510,
0.3765, 0.6275, 0.2510,
0.8784, 0.6275, 0.2510,
0.3765, 0.1255, 0.7529,
0.8784, 0.1255, 0.7529,
0.3765, 0.6275, 0.7529,
0.8784, 0.6275, 0.7529,
0.1255, 0.3765, 0.2510,
0.6275, 0.3765, 0.2510,
0.1255, 0.8784, 0.2510,
0.6275, 0.8784, 0.2510,
0.1255, 0.3765, 0.7529,
0.6275, 0.3765, 0.7529,
0.1255, 0.8784, 0.7529,
0.6275, 0.8784, 0.7529,
0.3765, 0.3765, 0.2510,
0.8784, 0.3765, 0.2510,
0.3765, 0.8784, 0.2510,
0.8784, 0.8784, 0.2510,
0.3765, 0.3765, 0.7529,
0.8784, 0.3765, 0.7529,
0.3765, 0.8784, 0.7529,
0.8784, 0.8784, 0.7529]
detectron_colormap = [
0.000, 0.447, 0.741,
0.850, 0.325, 0.098,
0.929, 0.694, 0.125,
0.494, 0.184, 0.556,
0.466, 0.674, 0.188,
0.301, 0.745, 0.933,
0.635, 0.078, 0.184,
0.300, 0.300, 0.300,
0.600, 0.600, 0.600,
1.000, 0.000, 0.000,
1.000, 0.500, 0.000,
0.749, 0.749, 0.000,
0.000, 1.000, 0.000,
0.000, 0.000, 1.000,
0.667, 0.000, 1.000,
0.333, 0.333, 0.000,
0.333, 0.667, 0.000,
0.333, 1.000, 0.000,
0.667, 0.333, 0.000,
0.667, 0.667, 0.000,
0.667, 1.000, 0.000,
1.000, 0.333, 0.000,
1.000, 0.667, 0.000,
1.000, 1.000, 0.000,
0.000, 0.333, 0.500,
0.000, 0.667, 0.500,
0.000, 1.000, 0.500,
0.333, 0.000, 0.500,
0.333, 0.333, 0.500,
0.333, 0.667, 0.500,
0.333, 1.000, 0.500,
0.667, 0.000, 0.500,
0.667, 0.333, 0.500,
0.667, 0.667, 0.500,
0.667, 1.000, 0.500,
1.000, 0.000, 0.500,
1.000, 0.333, 0.500,
1.000, 0.667, 0.500,
1.000, 1.000, 0.500,
0.000, 0.333, 1.000,
0.000, 0.667, 1.000,
0.000, 1.000, 1.000,
0.333, 0.000, 1.000,
0.333, 0.333, 1.000,
0.333, 0.667, 1.000,
0.333, 1.000, 1.000,
0.667, 0.000, 1.000,
0.667, 0.333, 1.000,
0.667, 0.667, 1.000,
0.667, 1.000, 1.000,
1.000, 0.000, 1.000,
1.000, 0.333, 1.000,
1.000, 0.667, 1.000,
0.167, 0.000, 0.000,
0.333, 0.000, 0.000,
0.500, 0.000, 0.000,
0.667, 0.000, 0.000,
0.833, 0.000, 0.000,
1.000, 0.000, 0.000,
0.000, 0.167, 0.000,
0.000, 0.333, 0.000,
0.000, 0.500, 0.000,
0.000, 0.667, 0.000,
0.000, 0.833, 0.000,
0.000, 1.000, 0.000,
0.000, 0.000, 0.167,
0.000, 0.000, 0.333,
0.000, 0.000, 0.500,
0.000, 0.000, 0.667,
0.000, 0.000, 0.833,
0.000, 0.000, 1.000,
0.000, 0.000, 0.000,
0.143, 0.143, 0.143,
0.286, 0.286, 0.286,
0.429, 0.429, 0.429,
0.571, 0.571, 0.571,
0.714, 0.714, 0.714,
0.857, 0.857, 0.857,
1.000, 1.000, 1.000
]
def draw_mask(im, mask, alpha=0.5, color=None):
colmap = (np.array(pascal_colormap) * 255).round().astype("uint8").reshape(256, 3)
if color is None:
color = detectron_colormap[np.random.choice(len(detectron_colormap))][::-1]
else:
while color >= 255:
color = color - 254
color = colmap[color]
im = np.where(np.repeat((mask > 0)[:, :, None], 3, axis=2),
im * (1 - alpha) + color * alpha, im)
im = im.astype('uint8')
return im
def save_jpg(masks, t, image_dir, viz_dir, mask_ids, name=None):
if name is not None:
viz_dir = viz_dir % name
if not os.path.exists(viz_dir):
os.makedirs(viz_dir)
img = imread(image_dir)
img = img[:, :, :3]
for i, (idx, mask) in enumerate(zip(mask_ids, masks)):
img = draw_mask(img, mask, color=idx)
cv2.imwrite(viz_dir + '/' + str(t + 1).zfill(5) + '.jpg', cv2.cvtColor(img, cv2.COLOR_RGB2BGR))
def save_with_pascal_colormap(img_dir, img):
colmap = (np.array(pascal_colormap) * 255).round().astype("uint8")
palimage = Image.new('P', (16, 16))
palimage.putpalette(colmap)
im = Image.fromarray(np.squeeze(img.astype("uint8")))
im2 = im.quantize(palette=palimage)
im2.save(img_dir)
def visualize_tracklets(tracklets, all_props, image_size, output_directory, name=None):
if name is not None:
output_directory = output_directory % name
if os.path.exists(output_directory): # os.path.exists(output_directory % name):
shutil.rmtree(output_directory)
png = np.zeros(image_size, dtype=int)
if len(tracklets) > 0:
for t, props in enumerate(all_props):
if len(props) > 0:
props_to_use = tracklets[:, t]
props_to_use_ind = np.where(tracklets[:, t] != -1)[0].tolist()
for j, i in enumerate(props_to_use_ind):
png[props["mask"][props_to_use[i]].astype("bool")] = 2
tracklet_directory = output_directory + 'tracklet_' + str(i) + '/'
if not os.path.exists(tracklet_directory):
os.makedirs(tracklet_directory)
save_with_pascal_colormap(tracklet_directory + str(t + 1).zfill(5) + '.png', png)
png = np.zeros(image_size)
def visualize_proposals(proposals, image_size, output_directory, name=None):
png = np.zeros(image_size, dtype=int)
if name is not None:
output_directory = output_directory % name
for t, props in enumerate(proposals):
directory = output_directory + 'time_' + str(t) + '/'
if not os.path.exists(directory):
os.makedirs(directory)
if len(props['seg']) > 0:
for i in range(len(props['mask'])):
png[props["mask"][i].astype("bool")] = 2
save_with_pascal_colormap(directory + str(i).zfill(5) + '.png', png)
png = np.zeros_like(props["mask"][0])
else:
save_with_pascal_colormap(directory + str(i).zfill(5) + '.png', png)
| [
"os.path.exists",
"numpy.repeat",
"os.makedirs",
"numpy.where",
"PIL.Image.new",
"numpy.array",
"scipy.misc.imread",
"numpy.zeros",
"cv2.cvtColor",
"shutil.rmtree",
"numpy.zeros_like"
] | [((9388, 9405), 'scipy.misc.imread', 'imread', (['image_dir'], {}), '(image_dir)\n', (9394, 9405), False, 'from scipy.misc import imread\n'), ((9769, 9793), 'PIL.Image.new', 'Image.new', (['"""P"""', '(16, 16)'], {}), "('P', (16, 16))\n", (9778, 9793), False, 'from PIL import Image\n'), ((10120, 10152), 'os.path.exists', 'os.path.exists', (['output_directory'], {}), '(output_directory)\n', (10134, 10152), False, 'import os\n'), ((10249, 10280), 'numpy.zeros', 'np.zeros', (['image_size'], {'dtype': 'int'}), '(image_size, dtype=int)\n', (10257, 10280), True, 'import numpy as np\n'), ((11088, 11119), 'numpy.zeros', 'np.zeros', (['image_size'], {'dtype': 'int'}), '(image_size, dtype=int)\n', (11096, 11119), True, 'import numpy as np\n'), ((9042, 9086), 'numpy.repeat', 'np.repeat', (['(mask > 0)[:, :, None]', '(3)'], {'axis': '(2)'}), '((mask > 0)[:, :, None], 3, axis=2)\n', (9051, 9086), True, 'import numpy as np\n'), ((9323, 9346), 'os.path.exists', 'os.path.exists', (['viz_dir'], {}), '(viz_dir)\n', (9337, 9346), False, 'import os\n'), ((9356, 9376), 'os.makedirs', 'os.makedirs', (['viz_dir'], {}), '(viz_dir)\n', (9367, 9376), False, 'import os\n'), ((9598, 9634), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2BGR'], {}), '(img, cv2.COLOR_RGB2BGR)\n', (9610, 9634), False, 'import cv2\n'), ((10206, 10237), 'shutil.rmtree', 'shutil.rmtree', (['output_directory'], {}), '(output_directory)\n', (10219, 10237), False, 'import shutil\n'), ((11318, 11343), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (11332, 11343), False, 'import os\n'), ((11357, 11379), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (11368, 11379), False, 'import os\n'), ((11627, 11658), 'numpy.zeros_like', 'np.zeros_like', (["props['mask'][0]"], {}), "(props['mask'][0])\n", (11640, 11658), True, 'import numpy as np\n'), ((10978, 10998), 'numpy.zeros', 'np.zeros', (['image_size'], {}), '(image_size)\n', (10986, 10998), True, 'import numpy as np\n'), ((9697, 9722), 'numpy.array', 'np.array', (['pascal_colormap'], {}), '(pascal_colormap)\n', (9705, 9722), True, 'import numpy as np\n'), ((10758, 10792), 'os.path.exists', 'os.path.exists', (['tracklet_directory'], {}), '(tracklet_directory)\n', (10772, 10792), False, 'import os\n'), ((10818, 10849), 'os.makedirs', 'os.makedirs', (['tracklet_directory'], {}), '(tracklet_directory)\n', (10829, 10849), False, 'import os\n'), ((10468, 10499), 'numpy.where', 'np.where', (['(tracklets[:, t] != -1)'], {}), '(tracklets[:, t] != -1)\n', (10476, 10499), True, 'import numpy as np\n'), ((8743, 8768), 'numpy.array', 'np.array', (['pascal_colormap'], {}), '(pascal_colormap)\n', (8751, 8768), True, 'import numpy as np\n')] |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains DCC functionality for 3ds Max
"""
from __future__ import print_function, division, absolute_import
from collections import OrderedDict
from Qt.QtWidgets import QApplication, QMainWindow
import numpy as np
from pymxs import runtime as rt
from tpDcc.core import dcc
from tpDcc.libs.python import decorators, mathlib, path as path_utils
from tpDcc.dccs.max.core import gui, helpers, scene, directory, viewport, constants as max_constants, node as node_utils
from tpDcc.dccs.max.core import name as name_utils
# =================================================================================================================
# GENERAL
# =================================================================================================================
def get_name():
"""
Returns the name of the DCC
:return: str
"""
return dcc.Dccs.Max
def get_extensions():
"""
Returns supported extensions of the DCC
:return: list(str)
"""
return ['.max']
def get_version():
"""
Returns version of the DCC
:return: int
"""
return int(helpers.get_max_version())
def get_version_name():
"""
Returns version of the DCC
:return: str
"""
return str(helpers.get_max_version())
def is_batch():
"""
Returns whether DCC is being executed in batch mode or not
:return: bool
"""
# TODO: Find a way to check if 3ds Max is being executed in batch mode or not
return False
def set_workspace(workspace_path):
"""
Sets current workspace to the given path
:param workspace_path: str
"""
return rt.pathConfig.setCurrentProjectFolder(workspace_path)
def fit_view(animation=True):
"""
Fits current viewport to current selection
:param animation: bool, Animated fit is available
"""
# Zoom Extents Selected action
rt.actionMan.executeAction(0, "310")
# =================================================================================================================
# GUI
# =================================================================================================================
def get_dpi(value=1):
"""
Returns current DPI used by DCC
:param value: float
:return: float
"""
qt_dpi = QApplication.devicePixelRatio() if is_batch() else QMainWindow().devicePixelRatio()
return qt_dpi * value
def get_dpi_scale(value):
"""
Returns current DPI scale used by DCC
:return: float
"""
# TODO: As far as I know there is kno way to return DPI info from 3ds Max
return 1.0
def get_main_window():
"""
Returns Qt object that references to the main DCC window
:return:
"""
return gui.get_max_window()
def get_main_menubar():
"""
Returns Qt object that references to the main DCC menubar
:return:
"""
win = get_main_window()
menu_bar = win.menuBar()
return menu_bar
def select_file_dialog(title, start_directory=None, pattern=None):
"""
Shows select file dialog
:param title: str
:param start_directory: str
:param pattern: str
:return: str
"""
return directory.open_file_dialog(caption=title, start_directory=start_directory, filters=pattern)
def save_file_dialog(title, start_directory=None, pattern=None):
"""
Shows save file dialog
:param title: str
:param start_directory: str
:param pattern: str
:return: str
"""
return directory.save_file_dialog(caption=title, start_directory=start_directory, filters=pattern)
# =================================================================================================================
# OBJECTS / NODES
# =================================================================================================================
def node_types():
"""
Returns dictionary that provides a mapping between tpDcc object types and DCC specific node types
Can be the situation where a tpDcc object maps maps to more than one MFn object
None values are ignored. This is because either do not exists or there is not equivalent type in Maya
:return: dict
"""
return OrderedDict()
def dcc_to_tpdcc_types():
"""
Returns a dictionary that provides a mapping between Dcc object types and tpDcc object types
:return:
"""
dcc_to_abstract_types = OrderedDict()
for abstract_type, dcc_type in node_types().items():
if isinstance(dcc_type[0], (tuple, list)):
for item in dcc_type[0]:
dcc_to_abstract_types[item] = abstract_type
else:
dcc_to_abstract_types[dcc_type[0]] = abstract_type
def rename_node(node, new_name, **kwargs):
"""
Renames given node with new given name
:param node: str
:param new_name: str
:return: str
"""
pymxs_node = node_utils.get_pymxs_node(node)
pymxs_node.name = new_name
return pymxs_node.name
# =================================================================================================================
# NAMING
# =================================================================================================================
def find_unique_name(
obj_names=None, filter_type=None, include_last_number=True, do_rename=False,
search_hierarchy=False, selection_only=True, **kwargs):
"""
Returns a unique node name by adding a number to the end of the node name
:param obj_names: str, name or list of names to find unique name from
:param filter_type: str, find unique name on nodes that matches given filter criteria
:param include_last_number: bool
:param do_rename: bool
:param search_hierarchy: bool, Whether to search objects in hierarchies
:param selection_only: bool, Whether to search only selected objects or all scene object
:return: str
"""
return name_utils.find_unique_name(obj_names=obj_names)
def add_name_prefix(
prefix, obj_names=None, filter_type=None, add_underscore=False, search_hierarchy=False,
selection_only=True, **kwargs):
"""
Add prefix to node name
:param prefix: str, string to add to the start of the current node
:param obj_names: str or list(str), name of list of node names to rename
:param filter_type: str, name of object type to filter the objects to apply changes ('Group, 'Joint', etc)
:param add_underscore: bool, Whether or not to add underscore before the suffix
:param search_hierarchy: bool, Whether to search objects in hierarchies
:param selection_only: bool, Whether to search only selected objects or all scene objects
:param kwargs:
"""
selected_nodes, _ = scene.get_selected_nodes()
if not selected_nodes:
return
for node in selected_nodes:
new_name = '{}{}'.format(prefix, node.name)
rename_node(node, new_name)
# =================================================================================================================
# SCENE
# =================================================================================================================
def new_scene(force=True, do_save=True):
"""
Creates a new DCC scene
:param force: bool, True if we want to save the scene without any prompt dialog
:param do_save: bool, True if you want to save the current scene before creating new scene
:return:
"""
return scene.new_scene(force=force, do_save=do_save)
def node_exists(node):
"""
Returns whether given object exists or not
:return: bool
"""
node = node_utils.get_pymxs_node(node)
return rt.isValidNode(node)
def select_node(node, replace_selection=True, **kwargs):
"""
Selects given object in the current scene
:param replace_selection: bool
:param node: str
"""
node = node_utils.get_pymxs_node(node)
if replace_selection:
return rt.select(node)
else:
return rt.selectMore(node)
def deselect_node(node):
"""
Deselects given node from current selection
:param node: str
"""
node = node_utils.get_pymxs_node(node)
return rt.deselect(node)
def clear_selection():
"""
Clears current scene selection
"""
return rt.clearSelection()
def selected_nodes(full_path=True, **kwargs):
"""
Returns a list of selected nodes
:param full_path: bool
:return: list(str)
"""
# By default, we return always selected nodes as handles
as_handle = kwargs.get('as_handle', True)
current_selection = rt.getCurrentSelection()
if as_handle:
current_selection = [node.handle for node in current_selection]
return list(current_selection)
# =================================================================================================================
# ATTRIBUTES
# =================================================================================================================
def is_attribute_locked(node, attribute_name):
"""
Returns whether given attribute is locked or not
:param node: str
:param attribute_name: str
:return: bool
"""
node = node_utils.get_pymxs_node(node)
lock_flags = list(rt.getTransformLockFlags(node))
xform_attrs = [
max_constants.TRANSLATION_ATTR_NAME, max_constants.ROTATION_ATTR_NAME, max_constants.SCALE_ATTR_NAME]
for name, flags_list in zip(xform_attrs, [[0, 1, 2], [3, 4, 5], [6, 7, 8]]):
if name in attribute_name:
if attribute_name == name:
for flag_index in flags_list:
if not lock_flags[flag_index]:
return False
return True
else:
for i, (axis, flag_value) in enumerate(zip('XYZ', flags_list)):
flag_index = flags_list[i]
if attribute_name == '{}{}'.format(name, axis) and lock_flags[flag_index]:
return True
return False
def lock_translate_attributes(node):
"""
Locks all translate transform attributes of the given node
:param node: str
"""
node = node_utils.get_pymxs_node(node)
lock_flags = list(rt.getTransformLockFlags(node))
lock_flags[0] = True
lock_flags[1] = True
lock_flags[2] = True
to_bit_array = list()
for i, elem in enumerate(lock_flags):
if elem:
to_bit_array.append(i + 1)
ms_array = helpers.convert_python_list_to_maxscript_bit_array(to_bit_array)
return rt.setTransformLockFlags(node, ms_array)
def unlock_translate_attributes(node):
"""
Unlocks all translate transform attributes of the given node
:param node: str
"""
node = node_utils.get_pymxs_node(node)
lock_flags = list(rt.getTransformLockFlags(node))
lock_flags[0] = False
lock_flags[1] = False
lock_flags[2] = False
to_bit_array = list()
for i, elem in enumerate(lock_flags):
if elem:
to_bit_array.append(i + 1)
ms_array = helpers.convert_python_list_to_maxscript_bit_array(to_bit_array)
return rt.setTransformLockFlags(node, ms_array)
def lock_rotate_attributes(node):
"""
Locks all rotate transform attributes of the given node
:param node: str
"""
node = node_utils.get_pymxs_node(node)
lock_flags = list(rt.getTransformLockFlags(node))
lock_flags[3] = True
lock_flags[4] = True
lock_flags[5] = True
to_bit_array = list()
for i, elem in enumerate(lock_flags):
if elem:
to_bit_array.append(i + 1)
ms_array = helpers.convert_python_list_to_maxscript_bit_array(to_bit_array)
return rt.setTransformLockFlags(node, ms_array)
def unlock_rotate_attributes(node):
"""
Unlocks all rotate transform attributes of the given node
:param node: str
"""
node = node_utils.get_pymxs_node(node)
lock_flags = list(rt.getTransformLockFlags(node))
lock_flags[3] = False
lock_flags[4] = False
lock_flags[5] = False
to_bit_array = list()
for i, elem in enumerate(lock_flags):
if elem:
to_bit_array.append(i + 1)
ms_array = helpers.convert_python_list_to_maxscript_bit_array(to_bit_array)
return rt.setTransformLockFlags(node, ms_array)
def lock_scale_attributes(node):
"""
Locks all scale transform attributes of the given node
:param node: str
"""
node = node_utils.get_pymxs_node(node)
lock_flags = list(rt.getTransformLockFlags(node))
lock_flags[6] = True
lock_flags[7] = True
lock_flags[8] = True
to_bit_array = list()
for i, elem in enumerate(lock_flags):
if elem:
to_bit_array.append(i + 1)
ms_array = helpers.convert_python_list_to_maxscript_bit_array(to_bit_array)
return rt.setTransformLockFlags(node, ms_array)
def unlock_scale_attributes(node):
"""
Unlocks all scale transform attributes of the given node
:param node: str
"""
node = node_utils.get_pymxs_node(node)
lock_flags = list(rt.getTransformLockFlags(node))
lock_flags[6] = False
lock_flags[7] = False
lock_flags[8] = False
to_bit_array = list()
for i, elem in enumerate(lock_flags):
if elem:
to_bit_array.append(i + 1)
ms_array = helpers.convert_python_list_to_maxscript_bit_array(to_bit_array)
return rt.setTransformLockFlags(node, ms_array)
def get_attribute_value(node, attribute_name):
"""
Returns the value of the given attribute in the given node
:param node: str
:param attribute_name: str
:return: variant
"""
node = node_utils.get_pymxs_node(node)
try:
return rt.getProperty(node, attribute_name)
except Exception:
return None
def set_integer_attribute_value(node, attribute_name, attribute_value, clamp=False):
"""
Sets the integer value of the given attribute in the given node
:param node: str
:param attribute_name: str
:param attribute_value: int
:param clamp: bool
:return:
"""
node = node_utils.get_pymxs_node(node)
xform_attrs = [
max_constants.TRANSLATION_ATTR_NAME, max_constants.ROTATION_ATTR_NAME, max_constants.SCALE_ATTR_NAME]
for xform_attr in xform_attrs:
if xform_attr in attribute_name:
xform = attribute_name[:-1]
axis = attribute_name[-1]
if axis.lower() not in max_constants.AXES:
continue
axis_index = max_constants.AXES.index(axis.lower())
xform_controller = rt.getPropertyController(node.controller, xform)
# TODO: For now we only support default transform controllers (Bezier Float for translation,
# TODO: Euler_XYZ for rotation and Bezier Scale for scale). Support other controller types.
if xform == max_constants.TRANSLATION_ATTR_NAME or xform == max_constants.ROTATION_ATTR_NAME:
xform_channel = rt.getPropertyController(xform_controller, '{} {}'.format(axis.lower(), xform))
xform_channel.value = attribute_value
return
elif xform == max_constants.SCALE_ATTR_NAME:
current_scale = xform_controller.value
current_scale[axis_index] = attribute_value
xform_controller.value = current_scale
return
try:
return rt.setProperty(node, attribute_name, attribute_value)
except Exception:
pass
def set_float_attribute_value(node, attribute_name, attribute_value, clamp=False):
"""
Sets the integer value of the given attribute in the given node
:param node: str
:param attribute_name: str
:param attribute_value: int
:param clamp: bool
:return:
"""
node = node_utils.get_pymxs_node(node)
xform_attrs = [
max_constants.TRANSLATION_ATTR_NAME, max_constants.ROTATION_ATTR_NAME, max_constants.SCALE_ATTR_NAME]
for xform_attr in xform_attrs:
if xform_attr in attribute_name:
xform = attribute_name[:-1]
axis = attribute_name[-1]
if axis.lower() not in max_constants.AXES:
continue
axis_index = max_constants.AXES.index(axis.lower())
xform_controller = rt.getPropertyController(node.controller, xform)
# TODO: For now we only support default transform controllers (Bezier Float for translation,
# TODO: Euler_XYZ for rotation and Bezier Scale for scale). Support other controller types.
if xform == max_constants.TRANSLATION_ATTR_NAME or xform == max_constants.ROTATION_ATTR_NAME:
xform_channel = rt.getPropertyController(xform_controller, '{} {}'.format(axis.lower(), xform))
xform_channel.value = attribute_value
return
elif xform == max_constants.SCALE_ATTR_NAME:
current_scale = xform_controller.value
current_scale[axis_index] = attribute_value
xform_controller.value = current_scale
return
try:
return rt.setProperty(node, attribute_name, attribute_value)
except Exception:
pass
def new_file(force=True):
"""
Creates a new file
:param force: bool
"""
scene.new_scene(force=force)
def open_file(file_path, force=True):
"""
Open file in given path
:param file_path: str
:param force: bool
"""
if force:
return rt.loadMaxFile(file_path)
file_check_state = rt.getSaveRequired()
if not file_check_state:
return rt.loadMaxFile(file_path)
if rt.checkForSave():
return rt.loadMaxFile(file_path, quiet=True)
return None
def import_file(file_path, force=True, **kwargs):
"""
Imports given file into current DCC scene
:param file_path: str
:param force: bool
:return:
"""
return rt.importFile(file_path, noPrompt=force)
def merge_file(file_path, force=True, **kwargs):
"""
Merges given file into current DCC scene
:param file_path: str
:param force: bool
:return:
"""
return rt.mergeMAXFile(file_path)
# def reference_file(file_path, force=True, **kwargs):
# """
# References given file into current DCC scene
# :param file_path: str
# :param force: bool
# :param kwargs: keyword arguments
# :return:
# """
#
# pass
def import_obj_file(file_path, force=True, **kwargs):
"""
Imports OBJ file into current DCC scene
:param file_path: str
:param force: bool
:param kwargs: keyword arguments
:return:
"""
if force:
return rt.importFile(file_path, rt.readValue(rt.StringStream('#noPrompt')), using='OBJIMP')
else:
return rt.importFile(file_path, using='OBJIMP')
def import_fbx_file(file_path, force=True, **kwargs):
"""
Imports FBX file into current DCC scene
:param file_path: str
:param force: bool
:param kwargs: keyword arguments
:return:
"""
skin = kwargs.get('skin', True)
animation = kwargs.get('animation', True)
rt.FBXExporterSetParam("Mode", rt.readvalue(rt.StringStream('#create')))
# rt.FBXExporterSetParam("Skin", skin)
# rt.FBXExporterSetParam("Animation", animation)
if force:
return rt.importFile(file_path, rt.readValue(rt.StringStream('#noPrompt')))
else:
return rt.importFile(file_path)
def scene_name():
"""
Returns the name of the current scene
:return: str
"""
return scene.get_scene_name()
def save_current_scene(force=True, **kwargs):
"""
Saves current scene
:param force: bool
"""
path_to_save = kwargs.get('path_to_save', None)
name_to_save = kwargs.get('name_to_save', None)
extension_to_save = kwargs.get('extension_to_save', get_extensions()[0])
current_scene_name = rt.maxFileName
if not extension_to_save.startswith('.'):
extension_to_save = '.{}'.format(extension_to_save)
name_to_save = name_to_save or current_scene_name
if not name_to_save:
return
file_to_save = path_utils.join_path(path_to_save, name_to_save)
if not file_to_save.endswith(extension_to_save):
file_to_save = '{}{}'.format(file_to_save, extension_to_save)
if force:
return rt.saveMaxFile(file_to_save, quiet=True)
else:
file_check_state = rt.getSaveRequired()
if not file_check_state:
return rt.saveMaxFile(file_to_save)
if rt.checkForSave():
return rt.saveMaxFile(file_to_save)
def refresh_viewport():
"""
Refresh current DCC viewport
"""
viewport.force_redraw()
def enable_undo():
"""
Enables undo functionality
"""
return False
def disable_undo():
"""
Disables undo functionality
"""
return False
def get_all_fonts():
"""
Returns all fonts available in DCC
:return: list(str)
"""
return list()
def get_control_colors():
"""
Returns control colors available in DCC
:return: list(tuple(float, float, float))
"""
return list()
# =================================================================================================================
# TRANSFORMS
# =================================================================================================================
def convert_translation(translation):
"""
Converts given translation into a valid translation to be used with tpDcc
NOTE: tpDcc uses Y up coordinate axes as the base reference axis
NOTE: 3ds Max works with Z up axis. We must do the conversion.
:param translation: list(float, float, float)
:return: list(float, float, float)
"""
return translation[0], -translation[2], translation[1]
def convert_dcc_translation(translation):
"""
Converts given tpDcc translation into a translation that DCC can manage
NOTE: tpDcc uses Y up coordinate axes as the base reference axis
:param translation: list(float, float, float)
:return: list(float, float, float)
"""
return translation[0], translation[2], -translation[1]
def convert_rotation(rotation):
"""
Converts given rotation into a valid rotation to be used with tpDcc
NOTE: tpDcc uses Y up coordinate axes as the base reference axis
NOTE: 3ds Max works with Z up axis. We must do the conversion.
:param rotation: tuple(float, float, float)
:return: tuple(float, float, float)
"""
rotation_matrix1 = np.array(mathlib.rotation_matrix_xyz(rotation))
rotation_matrix2 = np.array(mathlib.rotation_matrix_xyz([-90, 0, 0]))
rotation_matrix3 = mathlib.rotation_matrix_to_xyz_euler(
rotation_matrix2.dot(rotation_matrix1).dot(np.linalg.inv(rotation_matrix2)))
return list(rotation_matrix3)
def convert_dcc_rotation(rotation):
"""
Converts given rotation into a rotation that DCC can manage
NOTE: tpDcc uses Y up coordinate axes as the base reference axis
:param rotation: list(float, float, float)
:return: list(float, float, float)
"""
rotation_matrix1 = np.array(mathlib.rotation_matrix_xyz(rotation))
rotation_matrix2 = np.array(mathlib.rotation_matrix_xyz([90, 0, 0]))
rotation_matrix3 = mathlib.rotation_matrix_to_xyz_euler(
rotation_matrix2.dot(rotation_matrix1).dot(np.linalg.inv(rotation_matrix2)))
return list(rotation_matrix3)
def convert_scale(scale):
"""
Converts given scale into a valid rotation to be used with tpDcc
NOTE: tpDcc uses Y up coordinate axes as the base reference axis
NOTE: 3ds Max works with Z up axis. We must do the conversion.
:param scale: tuple(float, float, float)
:return: tuple(float, float, float)
"""
return scale[0], scale[2], scale[1]
def convert_dcc_scale(scale):
"""
Converts given scale into a scale that DCC can manage
NOTE: tpDcc uses Y up coordinate axes as the base reference axis
:param scale: list(float, float, float)
:return: list(float, float, float)
"""
return scale[0], scale[2], scale[1]
# =================================================================================================================
# DECORATORS
# =================================================================================================================
def undo_decorator():
"""
Returns undo decorator for current DCC
"""
return decorators.empty_decorator
def repeat_last_decorator(command_name=None):
"""
Returns repeat last decorator for current DCC
"""
return decorators.empty_decorator
def suspend_refresh_decorator():
"""
Returns decorators that selects again the objects that were selected before executing the decorated function
"""
return decorators.empty_decorator
def restore_selection_decorator():
"""
Returns decorators that selects again the objects that were selected before executing the decorated function
"""
return decorators.empty_decorator
| [
"pymxs.runtime.isValidNode",
"pymxs.runtime.actionMan.executeAction",
"pymxs.runtime.select",
"tpDcc.dccs.max.core.scene.get_selected_nodes",
"pymxs.runtime.getCurrentSelection",
"tpDcc.dccs.max.core.scene.new_scene",
"pymxs.runtime.getTransformLockFlags",
"tpDcc.dccs.max.core.helpers.get_max_version"... | [((1682, 1735), 'pymxs.runtime.pathConfig.setCurrentProjectFolder', 'rt.pathConfig.setCurrentProjectFolder', (['workspace_path'], {}), '(workspace_path)\n', (1719, 1735), True, 'from pymxs import runtime as rt\n'), ((1925, 1961), 'pymxs.runtime.actionMan.executeAction', 'rt.actionMan.executeAction', (['(0)', '"""310"""'], {}), "(0, '310')\n", (1951, 1961), True, 'from pymxs import runtime as rt\n'), ((2771, 2791), 'tpDcc.dccs.max.core.gui.get_max_window', 'gui.get_max_window', ([], {}), '()\n', (2789, 2791), False, 'from tpDcc.dccs.max.core import gui, helpers, scene, directory, viewport, constants as max_constants, node as node_utils\n'), ((3209, 3304), 'tpDcc.dccs.max.core.directory.open_file_dialog', 'directory.open_file_dialog', ([], {'caption': 'title', 'start_directory': 'start_directory', 'filters': 'pattern'}), '(caption=title, start_directory=start_directory,\n filters=pattern)\n', (3235, 3304), False, 'from tpDcc.dccs.max.core import gui, helpers, scene, directory, viewport, constants as max_constants, node as node_utils\n'), ((3518, 3613), 'tpDcc.dccs.max.core.directory.save_file_dialog', 'directory.save_file_dialog', ([], {'caption': 'title', 'start_directory': 'start_directory', 'filters': 'pattern'}), '(caption=title, start_directory=start_directory,\n filters=pattern)\n', (3544, 3613), False, 'from tpDcc.dccs.max.core import gui, helpers, scene, directory, viewport, constants as max_constants, node as node_utils\n'), ((4220, 4233), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4231, 4233), False, 'from collections import OrderedDict\n'), ((4417, 4430), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4428, 4430), False, 'from collections import OrderedDict\n'), ((4898, 4929), 'tpDcc.dccs.max.core.node.get_pymxs_node', 'node_utils.get_pymxs_node', (['node'], {}), '(node)\n', (4923, 4929), True, 'from tpDcc.dccs.max.core import gui, helpers, scene, directory, viewport, constants as max_constants, node as node_utils\n'), ((5923, 5971), 'tpDcc.dccs.max.core.name.find_unique_name', 'name_utils.find_unique_name', ([], {'obj_names': 'obj_names'}), '(obj_names=obj_names)\n', (5950, 5971), True, 'from tpDcc.dccs.max.core import name as name_utils\n'), ((6732, 6758), 'tpDcc.dccs.max.core.scene.get_selected_nodes', 'scene.get_selected_nodes', ([], {}), '()\n', (6756, 6758), False, 'from tpDcc.dccs.max.core import gui, helpers, scene, directory, viewport, constants as max_constants, node as node_utils\n'), ((7454, 7499), 'tpDcc.dccs.max.core.scene.new_scene', 'scene.new_scene', ([], {'force': 'force', 'do_save': 'do_save'}), '(force=force, do_save=do_save)\n', (7469, 7499), False, 'from tpDcc.dccs.max.core import gui, helpers, scene, directory, viewport, constants as max_constants, node as node_utils\n'), ((7618, 7649), 'tpDcc.dccs.max.core.node.get_pymxs_node', 'node_utils.get_pymxs_node', (['node'], {}), '(node)\n', (7643, 7649), True, 'from tpDcc.dccs.max.core import gui, helpers, scene, directory, viewport, constants as max_constants, node as node_utils\n'), ((7661, 7681), 'pymxs.runtime.isValidNode', 'rt.isValidNode', (['node'], {}), '(node)\n', (7675, 7681), True, 'from pymxs import runtime as rt\n'), ((7871, 7902), 'tpDcc.dccs.max.core.node.get_pymxs_node', 'node_utils.get_pymxs_node', (['node'], {}), '(node)\n', (7896, 7902), True, 'from tpDcc.dccs.max.core import gui, helpers, scene, directory, viewport, constants as max_constants, node as node_utils\n'), ((8130, 8161), 'tpDcc.dccs.max.core.node.get_pymxs_node', 'node_utils.get_pymxs_node', (['node'], {}), '(node)\n', (8155, 8161), True, 'from tpDcc.dccs.max.core import gui, helpers, scene, directory, viewport, constants as max_constants, node as node_utils\n'), ((8173, 8190), 'pymxs.runtime.deselect', 'rt.deselect', (['node'], {}), '(node)\n', (8184, 8190), True, 'from pymxs import runtime as rt\n'), ((8279, 8298), 'pymxs.runtime.clearSelection', 'rt.clearSelection', ([], {}), '()\n', (8296, 8298), True, 'from pymxs import runtime as rt\n'), ((8582, 8606), 'pymxs.runtime.getCurrentSelection', 'rt.getCurrentSelection', ([], {}), '()\n', (8604, 8606), True, 'from pymxs import runtime as rt\n'), ((9178, 9209), 'tpDcc.dccs.max.core.node.get_pymxs_node', 'node_utils.get_pymxs_node', (['node'], {}), '(node)\n', (9203, 9209), True, 'from tpDcc.dccs.max.core import gui, helpers, scene, directory, viewport, constants as max_constants, node as node_utils\n'), ((10157, 10188), 'tpDcc.dccs.max.core.node.get_pymxs_node', 'node_utils.get_pymxs_node', (['node'], {}), '(node)\n', (10182, 10188), True, 'from tpDcc.dccs.max.core import gui, helpers, scene, directory, viewport, constants as max_constants, node as node_utils\n'), ((10457, 10521), 'tpDcc.dccs.max.core.helpers.convert_python_list_to_maxscript_bit_array', 'helpers.convert_python_list_to_maxscript_bit_array', (['to_bit_array'], {}), '(to_bit_array)\n', (10507, 10521), False, 'from tpDcc.dccs.max.core import gui, helpers, scene, directory, viewport, constants as max_constants, node as node_utils\n'), ((10534, 10574), 'pymxs.runtime.setTransformLockFlags', 'rt.setTransformLockFlags', (['node', 'ms_array'], {}), '(node, ms_array)\n', (10558, 10574), True, 'from pymxs import runtime as rt\n'), ((10730, 10761), 'tpDcc.dccs.max.core.node.get_pymxs_node', 'node_utils.get_pymxs_node', (['node'], {}), '(node)\n', (10755, 10761), True, 'from tpDcc.dccs.max.core import gui, helpers, scene, directory, viewport, constants as max_constants, node as node_utils\n'), ((11033, 11097), 'tpDcc.dccs.max.core.helpers.convert_python_list_to_maxscript_bit_array', 'helpers.convert_python_list_to_maxscript_bit_array', (['to_bit_array'], {}), '(to_bit_array)\n', (11083, 11097), False, 'from tpDcc.dccs.max.core import gui, helpers, scene, directory, viewport, constants as max_constants, node as node_utils\n'), ((11110, 11150), 'pymxs.runtime.setTransformLockFlags', 'rt.setTransformLockFlags', (['node', 'ms_array'], {}), '(node, ms_array)\n', (11134, 11150), True, 'from pymxs import runtime as rt\n'), ((11296, 11327), 'tpDcc.dccs.max.core.node.get_pymxs_node', 'node_utils.get_pymxs_node', (['node'], {}), '(node)\n', (11321, 11327), True, 'from tpDcc.dccs.max.core import gui, helpers, scene, directory, viewport, constants as max_constants, node as node_utils\n'), ((11596, 11660), 'tpDcc.dccs.max.core.helpers.convert_python_list_to_maxscript_bit_array', 'helpers.convert_python_list_to_maxscript_bit_array', (['to_bit_array'], {}), '(to_bit_array)\n', (11646, 11660), False, 'from tpDcc.dccs.max.core import gui, helpers, scene, directory, viewport, constants as max_constants, node as node_utils\n'), ((11673, 11713), 'pymxs.runtime.setTransformLockFlags', 'rt.setTransformLockFlags', (['node', 'ms_array'], {}), '(node, ms_array)\n', (11697, 11713), True, 'from pymxs import runtime as rt\n'), ((11863, 11894), 'tpDcc.dccs.max.core.node.get_pymxs_node', 'node_utils.get_pymxs_node', (['node'], {}), '(node)\n', (11888, 11894), True, 'from tpDcc.dccs.max.core import gui, helpers, scene, directory, viewport, constants as max_constants, node as node_utils\n'), ((12166, 12230), 'tpDcc.dccs.max.core.helpers.convert_python_list_to_maxscript_bit_array', 'helpers.convert_python_list_to_maxscript_bit_array', (['to_bit_array'], {}), '(to_bit_array)\n', (12216, 12230), False, 'from tpDcc.dccs.max.core import gui, helpers, scene, directory, viewport, constants as max_constants, node as node_utils\n'), ((12243, 12283), 'pymxs.runtime.setTransformLockFlags', 'rt.setTransformLockFlags', (['node', 'ms_array'], {}), '(node, ms_array)\n', (12267, 12283), True, 'from pymxs import runtime as rt\n'), ((12427, 12458), 'tpDcc.dccs.max.core.node.get_pymxs_node', 'node_utils.get_pymxs_node', (['node'], {}), '(node)\n', (12452, 12458), True, 'from tpDcc.dccs.max.core import gui, helpers, scene, directory, viewport, constants as max_constants, node as node_utils\n'), ((12727, 12791), 'tpDcc.dccs.max.core.helpers.convert_python_list_to_maxscript_bit_array', 'helpers.convert_python_list_to_maxscript_bit_array', (['to_bit_array'], {}), '(to_bit_array)\n', (12777, 12791), False, 'from tpDcc.dccs.max.core import gui, helpers, scene, directory, viewport, constants as max_constants, node as node_utils\n'), ((12804, 12844), 'pymxs.runtime.setTransformLockFlags', 'rt.setTransformLockFlags', (['node', 'ms_array'], {}), '(node, ms_array)\n', (12828, 12844), True, 'from pymxs import runtime as rt\n'), ((12992, 13023), 'tpDcc.dccs.max.core.node.get_pymxs_node', 'node_utils.get_pymxs_node', (['node'], {}), '(node)\n', (13017, 13023), True, 'from tpDcc.dccs.max.core import gui, helpers, scene, directory, viewport, constants as max_constants, node as node_utils\n'), ((13295, 13359), 'tpDcc.dccs.max.core.helpers.convert_python_list_to_maxscript_bit_array', 'helpers.convert_python_list_to_maxscript_bit_array', (['to_bit_array'], {}), '(to_bit_array)\n', (13345, 13359), False, 'from tpDcc.dccs.max.core import gui, helpers, scene, directory, viewport, constants as max_constants, node as node_utils\n'), ((13372, 13412), 'pymxs.runtime.setTransformLockFlags', 'rt.setTransformLockFlags', (['node', 'ms_array'], {}), '(node, ms_array)\n', (13396, 13412), True, 'from pymxs import runtime as rt\n'), ((13626, 13657), 'tpDcc.dccs.max.core.node.get_pymxs_node', 'node_utils.get_pymxs_node', (['node'], {}), '(node)\n', (13651, 13657), True, 'from tpDcc.dccs.max.core import gui, helpers, scene, directory, viewport, constants as max_constants, node as node_utils\n'), ((14065, 14096), 'tpDcc.dccs.max.core.node.get_pymxs_node', 'node_utils.get_pymxs_node', (['node'], {}), '(node)\n', (14090, 14096), True, 'from tpDcc.dccs.max.core import gui, helpers, scene, directory, viewport, constants as max_constants, node as node_utils\n'), ((15776, 15807), 'tpDcc.dccs.max.core.node.get_pymxs_node', 'node_utils.get_pymxs_node', (['node'], {}), '(node)\n', (15801, 15807), True, 'from tpDcc.dccs.max.core import gui, helpers, scene, directory, viewport, constants as max_constants, node as node_utils\n'), ((17281, 17309), 'tpDcc.dccs.max.core.scene.new_scene', 'scene.new_scene', ([], {'force': 'force'}), '(force=force)\n', (17296, 17309), False, 'from tpDcc.dccs.max.core import gui, helpers, scene, directory, viewport, constants as max_constants, node as node_utils\n'), ((17523, 17543), 'pymxs.runtime.getSaveRequired', 'rt.getSaveRequired', ([], {}), '()\n', (17541, 17543), True, 'from pymxs import runtime as rt\n'), ((17622, 17639), 'pymxs.runtime.checkForSave', 'rt.checkForSave', ([], {}), '()\n', (17637, 17639), True, 'from pymxs import runtime as rt\n'), ((17899, 17939), 'pymxs.runtime.importFile', 'rt.importFile', (['file_path'], {'noPrompt': 'force'}), '(file_path, noPrompt=force)\n', (17912, 17939), True, 'from pymxs import runtime as rt\n'), ((18126, 18152), 'pymxs.runtime.mergeMAXFile', 'rt.mergeMAXFile', (['file_path'], {}), '(file_path)\n', (18141, 18152), True, 'from pymxs import runtime as rt\n'), ((19524, 19546), 'tpDcc.dccs.max.core.scene.get_scene_name', 'scene.get_scene_name', ([], {}), '()\n', (19544, 19546), False, 'from tpDcc.dccs.max.core import gui, helpers, scene, directory, viewport, constants as max_constants, node as node_utils\n'), ((20101, 20149), 'tpDcc.libs.python.path.join_path', 'path_utils.join_path', (['path_to_save', 'name_to_save'], {}), '(path_to_save, name_to_save)\n', (20121, 20149), True, 'from tpDcc.libs.python import decorators, mathlib, path as path_utils\n'), ((20641, 20664), 'tpDcc.dccs.max.core.viewport.force_redraw', 'viewport.force_redraw', ([], {}), '()\n', (20662, 20664), False, 'from tpDcc.dccs.max.core import gui, helpers, scene, directory, viewport, constants as max_constants, node as node_utils\n'), ((1166, 1191), 'tpDcc.dccs.max.core.helpers.get_max_version', 'helpers.get_max_version', ([], {}), '()\n', (1189, 1191), False, 'from tpDcc.dccs.max.core import gui, helpers, scene, directory, viewport, constants as max_constants, node as node_utils\n'), ((1299, 1324), 'tpDcc.dccs.max.core.helpers.get_max_version', 'helpers.get_max_version', ([], {}), '()\n', (1322, 1324), False, 'from tpDcc.dccs.max.core import gui, helpers, scene, directory, viewport, constants as max_constants, node as node_utils\n'), ((2334, 2365), 'Qt.QtWidgets.QApplication.devicePixelRatio', 'QApplication.devicePixelRatio', ([], {}), '()\n', (2363, 2365), False, 'from Qt.QtWidgets import QApplication, QMainWindow\n'), ((7945, 7960), 'pymxs.runtime.select', 'rt.select', (['node'], {}), '(node)\n', (7954, 7960), True, 'from pymxs import runtime as rt\n'), ((7986, 8005), 'pymxs.runtime.selectMore', 'rt.selectMore', (['node'], {}), '(node)\n', (7999, 8005), True, 'from pymxs import runtime as rt\n'), ((9232, 9262), 'pymxs.runtime.getTransformLockFlags', 'rt.getTransformLockFlags', (['node'], {}), '(node)\n', (9256, 9262), True, 'from pymxs import runtime as rt\n'), ((10211, 10241), 'pymxs.runtime.getTransformLockFlags', 'rt.getTransformLockFlags', (['node'], {}), '(node)\n', (10235, 10241), True, 'from pymxs import runtime as rt\n'), ((10784, 10814), 'pymxs.runtime.getTransformLockFlags', 'rt.getTransformLockFlags', (['node'], {}), '(node)\n', (10808, 10814), True, 'from pymxs import runtime as rt\n'), ((11350, 11380), 'pymxs.runtime.getTransformLockFlags', 'rt.getTransformLockFlags', (['node'], {}), '(node)\n', (11374, 11380), True, 'from pymxs import runtime as rt\n'), ((11917, 11947), 'pymxs.runtime.getTransformLockFlags', 'rt.getTransformLockFlags', (['node'], {}), '(node)\n', (11941, 11947), True, 'from pymxs import runtime as rt\n'), ((12481, 12511), 'pymxs.runtime.getTransformLockFlags', 'rt.getTransformLockFlags', (['node'], {}), '(node)\n', (12505, 12511), True, 'from pymxs import runtime as rt\n'), ((13046, 13076), 'pymxs.runtime.getTransformLockFlags', 'rt.getTransformLockFlags', (['node'], {}), '(node)\n', (13070, 13076), True, 'from pymxs import runtime as rt\n'), ((13683, 13719), 'pymxs.runtime.getProperty', 'rt.getProperty', (['node', 'attribute_name'], {}), '(node, attribute_name)\n', (13697, 13719), True, 'from pymxs import runtime as rt\n'), ((15386, 15439), 'pymxs.runtime.setProperty', 'rt.setProperty', (['node', 'attribute_name', 'attribute_value'], {}), '(node, attribute_name, attribute_value)\n', (15400, 15439), True, 'from pymxs import runtime as rt\n'), ((17097, 17150), 'pymxs.runtime.setProperty', 'rt.setProperty', (['node', 'attribute_name', 'attribute_value'], {}), '(node, attribute_name, attribute_value)\n', (17111, 17150), True, 'from pymxs import runtime as rt\n'), ((17473, 17498), 'pymxs.runtime.loadMaxFile', 'rt.loadMaxFile', (['file_path'], {}), '(file_path)\n', (17487, 17498), True, 'from pymxs import runtime as rt\n'), ((17588, 17613), 'pymxs.runtime.loadMaxFile', 'rt.loadMaxFile', (['file_path'], {}), '(file_path)\n', (17602, 17613), True, 'from pymxs import runtime as rt\n'), ((17656, 17693), 'pymxs.runtime.loadMaxFile', 'rt.loadMaxFile', (['file_path'], {'quiet': '(True)'}), '(file_path, quiet=True)\n', (17670, 17693), True, 'from pymxs import runtime as rt\n'), ((18755, 18795), 'pymxs.runtime.importFile', 'rt.importFile', (['file_path'], {'using': '"""OBJIMP"""'}), "(file_path, using='OBJIMP')\n", (18768, 18795), True, 'from pymxs import runtime as rt\n'), ((19392, 19416), 'pymxs.runtime.importFile', 'rt.importFile', (['file_path'], {}), '(file_path)\n', (19405, 19416), True, 'from pymxs import runtime as rt\n'), ((20303, 20343), 'pymxs.runtime.saveMaxFile', 'rt.saveMaxFile', (['file_to_save'], {'quiet': '(True)'}), '(file_to_save, quiet=True)\n', (20317, 20343), True, 'from pymxs import runtime as rt\n'), ((20381, 20401), 'pymxs.runtime.getSaveRequired', 'rt.getSaveRequired', ([], {}), '()\n', (20399, 20401), True, 'from pymxs import runtime as rt\n'), ((20494, 20511), 'pymxs.runtime.checkForSave', 'rt.checkForSave', ([], {}), '()\n', (20509, 20511), True, 'from pymxs import runtime as rt\n'), ((22510, 22547), 'tpDcc.libs.python.mathlib.rotation_matrix_xyz', 'mathlib.rotation_matrix_xyz', (['rotation'], {}), '(rotation)\n', (22537, 22547), False, 'from tpDcc.libs.python import decorators, mathlib, path as path_utils\n'), ((22581, 22621), 'tpDcc.libs.python.mathlib.rotation_matrix_xyz', 'mathlib.rotation_matrix_xyz', (['[-90, 0, 0]'], {}), '([-90, 0, 0])\n', (22608, 22621), False, 'from tpDcc.libs.python import decorators, mathlib, path as path_utils\n'), ((23110, 23147), 'tpDcc.libs.python.mathlib.rotation_matrix_xyz', 'mathlib.rotation_matrix_xyz', (['rotation'], {}), '(rotation)\n', (23137, 23147), False, 'from tpDcc.libs.python import decorators, mathlib, path as path_utils\n'), ((23181, 23220), 'tpDcc.libs.python.mathlib.rotation_matrix_xyz', 'mathlib.rotation_matrix_xyz', (['[90, 0, 0]'], {}), '([90, 0, 0])\n', (23208, 23220), False, 'from tpDcc.libs.python import decorators, mathlib, path as path_utils\n'), ((14557, 14605), 'pymxs.runtime.getPropertyController', 'rt.getPropertyController', (['node.controller', 'xform'], {}), '(node.controller, xform)\n', (14581, 14605), True, 'from pymxs import runtime as rt\n'), ((16268, 16316), 'pymxs.runtime.getPropertyController', 'rt.getPropertyController', (['node.controller', 'xform'], {}), '(node.controller, xform)\n', (16292, 16316), True, 'from pymxs import runtime as rt\n'), ((19143, 19169), 'pymxs.runtime.StringStream', 'rt.StringStream', (['"""#create"""'], {}), "('#create')\n", (19158, 19169), True, 'from pymxs import runtime as rt\n'), ((20454, 20482), 'pymxs.runtime.saveMaxFile', 'rt.saveMaxFile', (['file_to_save'], {}), '(file_to_save)\n', (20468, 20482), True, 'from pymxs import runtime as rt\n'), ((20532, 20560), 'pymxs.runtime.saveMaxFile', 'rt.saveMaxFile', (['file_to_save'], {}), '(file_to_save)\n', (20546, 20560), True, 'from pymxs import runtime as rt\n'), ((22735, 22766), 'numpy.linalg.inv', 'np.linalg.inv', (['rotation_matrix2'], {}), '(rotation_matrix2)\n', (22748, 22766), True, 'import numpy as np\n'), ((23334, 23365), 'numpy.linalg.inv', 'np.linalg.inv', (['rotation_matrix2'], {}), '(rotation_matrix2)\n', (23347, 23365), True, 'import numpy as np\n'), ((2385, 2398), 'Qt.QtWidgets.QMainWindow', 'QMainWindow', ([], {}), '()\n', (2396, 2398), False, 'from Qt.QtWidgets import QApplication, QMainWindow\n'), ((18683, 18711), 'pymxs.runtime.StringStream', 'rt.StringStream', (['"""#noPrompt"""'], {}), "('#noPrompt')\n", (18698, 18711), True, 'from pymxs import runtime as rt\n'), ((19336, 19364), 'pymxs.runtime.StringStream', 'rt.StringStream', (['"""#noPrompt"""'], {}), "('#noPrompt')\n", (19351, 19364), True, 'from pymxs import runtime as rt\n')] |
# -*- coding: utf-8 -*-
import tensorflow as tf
from tensorflow.core.protobuf import saver_pb2
import numpy as np
import cv2
import matplotlib.pyplot as plt
import os
from os.path import join as pjoin
import sys
import copy
import detect_face
import nn4 as network
import random
import sklearn
from sklearn.externals import joblib
#face detection parameters
minsize = 20 # minimum size of face
threshold = [ 0.6, 0.7, 0.7 ] # three steps's threshold
factor = 0.709 # scale factor
#facenet embedding parameters
model_dir='./model_check_point/model.ckpt-500000'#"Directory containing the graph definition and checkpoint files.")
image_size=96 #"Image size (height, width) in pixels."
pool_type='MAX' #"The type of pooling to use for some of the inception layers {'MAX', 'L2'}.
use_lrn=False #"Enables Local Response Normalization after the first layers of the inception network."
seed=42,# "Random seed."
batch_size= None # "Number of images to process in a batch."
def to_rgb(img):
w, h = img.shape
ret = np.empty((w, h, 3), dtype=np.uint8)
ret[:, :, 0] = ret[:, :, 1] = ret[:, :, 2] = img
return ret
if __name__ == '__main__':
#restore mtcnn model
print('Creating networks and loading parameters')
gpu_memory_fraction=1.0
with tf.Graph().as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = detect_face.create_mtcnn(sess, './model_check_point/')
image = cv2.imread(sys.argv[1])
find_results=[]
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
if gray.ndim == 2:
img = to_rgb(gray)
bounding_boxes, points = detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
nrof_faces = bounding_boxes.shape[0]#number of faces
num=-1
for face_position in bounding_boxes:
num += 1
face_position=face_position.astype(int)
# draw face
cv2.rectangle(image, (face_position[0],face_position[1]),(face_position[2], face_position[3]), (0, 255, 0), 5)
# draw feature points
cv2.circle(image,(points[0][num],points[5][num]),5,(0, 255, 0),-1)
cv2.circle(image,(points[1][num],points[6][num]),5,(0, 255, 0),-1)
cv2.circle(image,(points[2][num],points[7][num]),5,(0, 255, 0),-1)
cv2.circle(image,(points[3][num],points[8][num]),5,(0, 255, 0),-1)
cv2.circle(image,(points[4][num],points[9][num]),5,(0, 255, 0),-1)
# show result
image = cv2.resize(image, (640, 480), interpolation=cv2.INTER_CUBIC)
cv2.imshow("Show Result",image)
cv2.waitKey(0)
| [
"cv2.rectangle",
"tensorflow.Graph",
"detect_face.create_mtcnn",
"tensorflow.ConfigProto",
"cv2.imshow",
"detect_face.detect_face",
"cv2.waitKey",
"numpy.empty",
"cv2.circle",
"cv2.cvtColor",
"tensorflow.GPUOptions",
"cv2.resize",
"cv2.imread"
] | [((1019, 1054), 'numpy.empty', 'np.empty', (['(w, h, 3)'], {'dtype': 'np.uint8'}), '((w, h, 3), dtype=np.uint8)\n', (1027, 1054), True, 'import numpy as np\n'), ((1613, 1636), 'cv2.imread', 'cv2.imread', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (1623, 1636), False, 'import cv2\n'), ((1669, 1708), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (1681, 1708), False, 'import cv2\n'), ((1808, 1882), 'detect_face.detect_face', 'detect_face.detect_face', (['img', 'minsize', 'pnet', 'rnet', 'onet', 'threshold', 'factor'], {}), '(img, minsize, pnet, rnet, onet, threshold, factor)\n', (1831, 1882), False, 'import detect_face\n'), ((2662, 2722), 'cv2.resize', 'cv2.resize', (['image', '(640, 480)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(image, (640, 480), interpolation=cv2.INTER_CUBIC)\n', (2672, 2722), False, 'import cv2\n'), ((2727, 2759), 'cv2.imshow', 'cv2.imshow', (['"""Show Result"""', 'image'], {}), "('Show Result', image)\n", (2737, 2759), False, 'import cv2\n'), ((2763, 2777), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2774, 2777), False, 'import cv2\n'), ((1311, 1377), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'per_process_gpu_memory_fraction': 'gpu_memory_fraction'}), '(per_process_gpu_memory_fraction=gpu_memory_fraction)\n', (1324, 1377), True, 'import tensorflow as tf\n'), ((2114, 2231), 'cv2.rectangle', 'cv2.rectangle', (['image', '(face_position[0], face_position[1])', '(face_position[2], face_position[3])', '(0, 255, 0)', '(5)'], {}), '(image, (face_position[0], face_position[1]), (face_position[2\n ], face_position[3]), (0, 255, 0), 5)\n', (2127, 2231), False, 'import cv2\n'), ((2264, 2335), 'cv2.circle', 'cv2.circle', (['image', '(points[0][num], points[5][num])', '(5)', '(0, 255, 0)', '(-1)'], {}), '(image, (points[0][num], points[5][num]), 5, (0, 255, 0), -1)\n', (2274, 2335), False, 'import cv2\n'), ((2339, 2410), 'cv2.circle', 'cv2.circle', (['image', '(points[1][num], points[6][num])', '(5)', '(0, 255, 0)', '(-1)'], {}), '(image, (points[1][num], points[6][num]), 5, (0, 255, 0), -1)\n', (2349, 2410), False, 'import cv2\n'), ((2414, 2485), 'cv2.circle', 'cv2.circle', (['image', '(points[2][num], points[7][num])', '(5)', '(0, 255, 0)', '(-1)'], {}), '(image, (points[2][num], points[7][num]), 5, (0, 255, 0), -1)\n', (2424, 2485), False, 'import cv2\n'), ((2489, 2560), 'cv2.circle', 'cv2.circle', (['image', '(points[3][num], points[8][num])', '(5)', '(0, 255, 0)', '(-1)'], {}), '(image, (points[3][num], points[8][num]), 5, (0, 255, 0), -1)\n', (2499, 2560), False, 'import cv2\n'), ((2564, 2635), 'cv2.circle', 'cv2.circle', (['image', '(points[4][num], points[9][num])', '(5)', '(0, 255, 0)', '(-1)'], {}), '(image, (points[4][num], points[9][num]), 5, (0, 255, 0), -1)\n', (2574, 2635), False, 'import cv2\n'), ((1543, 1597), 'detect_face.create_mtcnn', 'detect_face.create_mtcnn', (['sess', '"""./model_check_point/"""'], {}), "(sess, './model_check_point/')\n", (1567, 1597), False, 'import detect_face\n'), ((1264, 1274), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1272, 1274), True, 'import tensorflow as tf\n'), ((1411, 1478), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'gpu_options': 'gpu_options', 'log_device_placement': '(False)'}), '(gpu_options=gpu_options, log_device_placement=False)\n', (1425, 1478), True, 'import tensorflow as tf\n')] |
from __future__ import print_function
import tensorflow as tf
import keras
from tensorflow.keras.models import load_model
from keras import backend as K
from keras.layers import Input
import numpy as np
import subprocess
from tensorloader import TensorLoader as tl
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from sklearn import preprocessing
from sklearn.metrics import accuracy_score, roc_curve, auc, precision_recall_curve,average_precision_score, confusion_matrix
import pandas as pd
from sklearn import impute
import argparse
import os
import time
#Step 0: Process arguments
parser = argparse.ArgumentParser(description='CoRE-ATAC Prediction Tool')
parser.add_argument("datadirectory")
parser.add_argument("basename")
parser.add_argument("model")
parser.add_argument("outputfile")
parser.add_argument('--pf', dest='pf', type=str, default="",
help='Destination of PEAS features)')
parser.add_argument('--le', dest='le', type=str, default="",
help='Destination of LabelEncoder.)')
parser.add_argument('--swapchannels', default=False, action='store_true', dest='swap')
args = parser.parse_args()
datadirectory = args.datadirectory
basename = args.basename
model = args.model
outputfile = args.outputfile
featurefile = args.pf
labelencoder = args.le
swapchannels = args.swap
def predict(datadirectory, basename, model, outputfile, featurefile, labelencoder, swapchannels):
model = load_model(model)
if featurefile == "":
featurefile = "/CoRE-ATAC/PEAS/features.txt"
if labelencoder == "":
labelencoder = "/CoRE-ATAC/PEAS/labelencoder.txt"
#Step 1: Load the data
start_time = time.time()
seqdata,sigdata,annot,summitpeaks,peaks = tl.readTensors(basename, datadirectory, 600, sequence=True, signal=True)
peasfeatures = tl.getPEASFeatures(datadirectory+"/peak_features/"+basename+"_features.txt", featurefile, labelencoder, peaks)
#num_classes = 4
peasfeatures = np.expand_dims(peasfeatures, axis=2)
sigseqdata = tl.getSeqSigTensor(seqdata, sigdata)
print("--- Data loaded in %s seconds ---" % (time.time() - start_time))
x_test_sigseq = sigseqdata
if swapchannels == False:
x_test_sigseq = np.moveaxis(x_test_sigseq, 1, -1) #Originally had channels first, but CPU tensorflow requires channels last
x_test_peas = peasfeatures
#Step 2: Make predictions
start_time = time.time()
sig_predictions, peas_predictions, predictions = model.predict([x_test_sigseq, x_test_peas])
print("--- Data predicted in %s seconds ---" % (time.time() - start_time))
#Write the output file:
columns = ["Chr", "Start", "End", "Promoter Probability", "Enhancer Probability", "Insulator Probability", "Other Probability"]
pd.DataFrame(np.concatenate((peaks, predictions), axis=1), columns=columns).to_csv(outputfile, header=None, index=None, sep="\t")
predict(datadirectory, basename, model, outputfile, featurefile, labelencoder, swapchannels)
| [
"argparse.ArgumentParser",
"tensorloader.TensorLoader.readTensors",
"tensorloader.TensorLoader.getSeqSigTensor",
"tensorflow.keras.models.load_model",
"numpy.expand_dims",
"numpy.concatenate",
"numpy.moveaxis",
"time.time",
"tensorloader.TensorLoader.getPEASFeatures"
] | [((632, 696), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""CoRE-ATAC Prediction Tool"""'}), "(description='CoRE-ATAC Prediction Tool')\n", (655, 696), False, 'import argparse\n'), ((1478, 1495), 'tensorflow.keras.models.load_model', 'load_model', (['model'], {}), '(model)\n', (1488, 1495), False, 'from tensorflow.keras.models import load_model\n'), ((1711, 1722), 'time.time', 'time.time', ([], {}), '()\n', (1720, 1722), False, 'import time\n'), ((1769, 1841), 'tensorloader.TensorLoader.readTensors', 'tl.readTensors', (['basename', 'datadirectory', '(600)'], {'sequence': '(True)', 'signal': '(True)'}), '(basename, datadirectory, 600, sequence=True, signal=True)\n', (1783, 1841), True, 'from tensorloader import TensorLoader as tl\n'), ((1861, 1981), 'tensorloader.TensorLoader.getPEASFeatures', 'tl.getPEASFeatures', (["(datadirectory + '/peak_features/' + basename + '_features.txt')", 'featurefile', 'labelencoder', 'peaks'], {}), "(datadirectory + '/peak_features/' + basename +\n '_features.txt', featurefile, labelencoder, peaks)\n", (1879, 1981), True, 'from tensorloader import TensorLoader as tl\n'), ((2012, 2048), 'numpy.expand_dims', 'np.expand_dims', (['peasfeatures'], {'axis': '(2)'}), '(peasfeatures, axis=2)\n', (2026, 2048), True, 'import numpy as np\n'), ((2066, 2102), 'tensorloader.TensorLoader.getSeqSigTensor', 'tl.getSeqSigTensor', (['seqdata', 'sigdata'], {}), '(seqdata, sigdata)\n', (2084, 2102), True, 'from tensorloader import TensorLoader as tl\n'), ((2453, 2464), 'time.time', 'time.time', ([], {}), '()\n', (2462, 2464), False, 'import time\n'), ((2266, 2299), 'numpy.moveaxis', 'np.moveaxis', (['x_test_sigseq', '(1)', '(-1)'], {}), '(x_test_sigseq, 1, -1)\n', (2277, 2299), True, 'import numpy as np\n'), ((2153, 2164), 'time.time', 'time.time', ([], {}), '()\n', (2162, 2164), False, 'import time\n'), ((2615, 2626), 'time.time', 'time.time', ([], {}), '()\n', (2624, 2626), False, 'import time\n'), ((2820, 2864), 'numpy.concatenate', 'np.concatenate', (['(peaks, predictions)'], {'axis': '(1)'}), '((peaks, predictions), axis=1)\n', (2834, 2864), True, 'import numpy as np\n')] |
import time
import cv2
import numpy as np
from numba import njit
from scipy.ndimage import correlate
from sklearn.linear_model import Ridge
def compute_image_grads(image):
kernel_hor = np.array([-1, 0, 1], dtype=np.float32).reshape(1, 3)
kernel_ver = kernel_hor.T
grad_hor = correlate(image.astype(np.float32), kernel_hor)
grad_ver = correlate(image.astype(np.float32), kernel_ver)
grads = np.maximum(grad_hor, grad_ver)
return grads
def compute_gradient_sensivity(image):
height, width = image.shape
lapl_diff = np.array([
[ 1, -2, 1],
[-2, 4, -2],
[ 1, -2, 1]
], dtype=np.float32)
convolved = correlate(image.astype(np.float32), lapl_diff)
factor = np.sqrt(np.pi / 2) / (6 * (height - 2) * (width - 2))
gradient_sens = np.abs(convolved.ravel()).sum()
gradient_sens = gradient_sens * factor
return gradient_sens
def get_centroids_intensities(image, num_centroids=15):
counts = np.bincount(image.ravel())
intensities = np.argpartition(counts, -num_centroids).astype(np.float32)
return intensities[-num_centroids:]
@njit
def __compute_inten_weights(image, grad_weights, centroids, sensivity):
weights = np.exp(-np.square(centroids - image) / sensivity)
weights = weights * grad_weights
return weights
def compute_weights(image, grads_square, sensivity=80, num_centroids=15):
"""
returns: ndarray of shape [Nc, H, W], where Nc is number of centroids
"""
centroids = get_centroids_intensities(image, num_centroids)
gradient_sens = compute_gradient_sensivity(image)
grads_weights = np.exp(-grads_square / gradient_sens)
image = image[None,:,:].copy()
centroids = centroids[:, None, None].copy()
grads_weights = grads_weights[None,:,:].copy()
weights = __compute_inten_weights(
image, grads_weights, centroids, sensivity)
return weights
def __sum_spatial_axes(image):
return image.sum(axis=-1).sum(axis=-1)
def __compute_est_variance(weights, grads, grads_square, denominator):
est_variance = __sum_spatial_axes(weights * grads_square) / denominator
est_variance = est_variance - np.square(__sum_spatial_axes(weights * grads) / denominator)
est_variance = est_variance * 0.5
return est_variance
def compute_estimates(image):
grads = compute_image_grads(image)
grads_square = np.square(grads)
start = time.time()
weights = compute_weights(image, grads_square)
end = time.time()
print("Weights computing took: {}".format(end - start))
start = time.time()
denominator = __sum_spatial_axes(weights)
end = time.time()
print("Denominator took: {}".format(end - start))
start = time.time()
est_variance = __compute_est_variance(
weights, grads, grads_square, denominator)
end = time.time()
print("Est variance took: {}".format(end - start))
print("Est variance shape: {}".format(est_variance.shape))
start = time.time()
est_intensity = __sum_spatial_axes(weights * image) / denominator
end = time.time()
print("Intensity took: {}".format(end - start))
print("Est image shape: {}".format(est_intensity.shape))
return est_variance, est_intensity
def noise_estimation(image):
est_variance, est_intensity = compute_estimates(image)
reg = Ridge()
reg.fit(est_intensity[:, None], est_variance)
variance = reg.predict(image.ravel()[:, None])
variance = np.reshape(variance, image.shape)
return variance
| [
"numpy.reshape",
"numpy.sqrt",
"numpy.argpartition",
"sklearn.linear_model.Ridge",
"numpy.square",
"numpy.exp",
"numpy.array",
"numpy.maximum",
"time.time"
] | [((425, 455), 'numpy.maximum', 'np.maximum', (['grad_hor', 'grad_ver'], {}), '(grad_hor, grad_ver)\n', (435, 455), True, 'import numpy as np\n'), ((568, 633), 'numpy.array', 'np.array', (['[[1, -2, 1], [-2, 4, -2], [1, -2, 1]]'], {'dtype': 'np.float32'}), '([[1, -2, 1], [-2, 4, -2], [1, -2, 1]], dtype=np.float32)\n', (576, 633), True, 'import numpy as np\n'), ((1650, 1687), 'numpy.exp', 'np.exp', (['(-grads_square / gradient_sens)'], {}), '(-grads_square / gradient_sens)\n', (1656, 1687), True, 'import numpy as np\n'), ((2409, 2425), 'numpy.square', 'np.square', (['grads'], {}), '(grads)\n', (2418, 2425), True, 'import numpy as np\n'), ((2439, 2450), 'time.time', 'time.time', ([], {}), '()\n', (2448, 2450), False, 'import time\n'), ((2512, 2523), 'time.time', 'time.time', ([], {}), '()\n', (2521, 2523), False, 'import time\n'), ((2598, 2609), 'time.time', 'time.time', ([], {}), '()\n', (2607, 2609), False, 'import time\n'), ((2666, 2677), 'time.time', 'time.time', ([], {}), '()\n', (2675, 2677), False, 'import time\n'), ((2746, 2757), 'time.time', 'time.time', ([], {}), '()\n', (2755, 2757), False, 'import time\n'), ((2862, 2873), 'time.time', 'time.time', ([], {}), '()\n', (2871, 2873), False, 'import time\n'), ((3007, 3018), 'time.time', 'time.time', ([], {}), '()\n', (3016, 3018), False, 'import time\n'), ((3099, 3110), 'time.time', 'time.time', ([], {}), '()\n', (3108, 3110), False, 'import time\n'), ((3367, 3374), 'sklearn.linear_model.Ridge', 'Ridge', ([], {}), '()\n', (3372, 3374), False, 'from sklearn.linear_model import Ridge\n'), ((3492, 3525), 'numpy.reshape', 'np.reshape', (['variance', 'image.shape'], {}), '(variance, image.shape)\n', (3502, 3525), True, 'import numpy as np\n'), ((747, 765), 'numpy.sqrt', 'np.sqrt', (['(np.pi / 2)'], {}), '(np.pi / 2)\n', (754, 765), True, 'import numpy as np\n'), ((194, 232), 'numpy.array', 'np.array', (['[-1, 0, 1]'], {'dtype': 'np.float32'}), '([-1, 0, 1], dtype=np.float32)\n', (202, 232), True, 'import numpy as np\n'), ((1038, 1077), 'numpy.argpartition', 'np.argpartition', (['counts', '(-num_centroids)'], {}), '(counts, -num_centroids)\n', (1053, 1077), True, 'import numpy as np\n'), ((1244, 1272), 'numpy.square', 'np.square', (['(centroids - image)'], {}), '(centroids - image)\n', (1253, 1272), True, 'import numpy as np\n')] |
"""
Met Grid
--------
Grids for meterological stuff
"""
import os
import numpy as np
from multigrids import TemporalGrid, common
# try:
from atm.tools import stack_rasters
# except ImportError:
# from ..tools import stack_rasters
class MetGridShapeError(Exception):
"""Raised if data shape is not correct"""
MetGridBase = TemporalGrid
def load_degree_days(*args, **kwargs):
"""
"""
# rows, cols, timesteps, data
degree_days = MetGridBase(*args, **kwargs)
if len(args) > 1:
dt = kwargs['data_type'] if 'data_type' in kwargs else 'float32'
mm = np.memmap(args[3], dtype=dt, mode='r')
degree_days.grids = mm.reshape(degree_days.memory_shape)
return degree_days
class DegreeDayGrids (object):
"""Degree-days grids
Parameters
----------
config: dict or atm.control
configuration for object
"""
def __init__ (self, *args, **kwargs):
""" Class initialiser
Parameters
----------
args: tuple
must be (rows, cols, timesteps, fdd_data, tdd_data) or
(saved_fdd_temporal_grid, saved_tdd_temporal_grid)
"""
if len(args) == 5:
rows, cols, timesteps = args[:3]
self.thawing = load_degree_days(
rows, cols, timesteps, args[4], **kwargs
)
self.freezing = load_degree_days(
rows, cols, timesteps, args[3], **kwargs
)
# elif len(args) == 2:
else:
self.thawing = MetGridBase(args[1], **kwargs)
self.freezing = MetGridBase(args[0], **kwargs)
def save(self, path, filename_start):
"""save the fdd and tdd , in the tempoal_grid output format
Parameters
----------
path: path
path to save at
filename_start: str
used to create file names, (i.e. 'test' becomes 'test_fdd.*', and
'test_tdd.*')
"""
self.freezing.save(os.path.join(path, filename_start + '_fdd.yaml'))
self.thawing.save(os.path.join(path, filename_start + '_tdd.yaml'))
def __getitem__ (self, key):
"""get grid for thawing or freezing degree-days
Parameters
----------
key: tuple
(str, year). str should be freeze, fdd, thaw, tdd, or heating.
the year is an int
Raises
------
KeyError
Returns
-------
np.array
thawing or freezing gird for a year
"""
freeze_or_thaw = key[0]
year = key[1]
if freeze_or_thaw.lower() in ['freeze', 'fdd']:
return self.freezing[year]
elif freeze_or_thaw.lower() in ['thaw', 'tdd', 'heating']:
return self.thawing[year]
else:
raise KeyError("key did not match tdd or fdd")
| [
"numpy.memmap",
"os.path.join"
] | [((608, 646), 'numpy.memmap', 'np.memmap', (['args[3]'], {'dtype': 'dt', 'mode': '"""r"""'}), "(args[3], dtype=dt, mode='r')\n", (617, 646), True, 'import numpy as np\n'), ((2028, 2076), 'os.path.join', 'os.path.join', (['path', "(filename_start + '_fdd.yaml')"], {}), "(path, filename_start + '_fdd.yaml')\n", (2040, 2076), False, 'import os\n'), ((2104, 2152), 'os.path.join', 'os.path.join', (['path', "(filename_start + '_tdd.yaml')"], {}), "(path, filename_start + '_tdd.yaml')\n", (2116, 2152), False, 'import os\n')] |
"""A plot of the deltas for erosion between scenarios."""
import os
import sys
from pyiem.dep import read_env
from pyiem.util import logger, get_dbconn
from tqdm import tqdm
import numpy as np
LOG = logger()
def readfile(fn, lengths):
"""Our env reader."""
try:
df = read_env(fn)
except Exception as exp:
LOG.info("ABORT: Attempting to read: %s resulted in: %s", fn, exp)
sys.exit()
# truncate anything newer than 2020
df = df[df["year"] < 2021]
key = int(fn.split("/")[-1].split(".")[0].split("_")[1])
df["delivery"] = df["sed_del"] / lengths[key]
return df
def load_lengths(hucs):
"""Build out our flowpath lengths."""
idep = get_dbconn("idep")
icursor = idep.cursor()
res = {}
icursor.execute(
"SELECT huc_12, fpath, ST_Length(geom) from flowpaths where "
"scenario = 0 and huc_12 in %s",
(tuple(hucs),),
)
for row in icursor:
d = res.setdefault(row[0], {})
d[row[1]] = row[2]
return res
def main():
"""Go Main Go."""
hucs = open("myhucs.txt").read().strip().split("\n")
scenarios = [0]
scenarios.extend(list(range(130, 140)))
deltas = []
baseline = None
lengths = load_lengths(hucs)
progress = tqdm(scenarios)
for scenario in progress:
vals = []
for huc12 in hucs:
progress.set_description(f"{scenario} {huc12}")
mydir = f"/i/{scenario}/env/{huc12[:8]}/{huc12[8:]}"
data = []
for fn in os.listdir(mydir):
res = readfile(os.path.join(mydir, fn), lengths[huc12])
data.append(res["delivery"].sum())
if not data:
LOG.info("no data scenario: %s huc12: %s", scenario, huc12)
sys.exit()
vals.append(np.average(data))
if scenario > 0:
deltas.append((np.average(vals) - baseline) / baseline)
else:
baseline = np.average(vals)
print(deltas)
if __name__ == "__main__":
main()
| [
"os.listdir",
"numpy.average",
"tqdm.tqdm",
"os.path.join",
"pyiem.util.get_dbconn",
"pyiem.util.logger",
"sys.exit",
"pyiem.dep.read_env"
] | [((201, 209), 'pyiem.util.logger', 'logger', ([], {}), '()\n', (207, 209), False, 'from pyiem.util import logger, get_dbconn\n'), ((698, 716), 'pyiem.util.get_dbconn', 'get_dbconn', (['"""idep"""'], {}), "('idep')\n", (708, 716), False, 'from pyiem.util import logger, get_dbconn\n'), ((1266, 1281), 'tqdm.tqdm', 'tqdm', (['scenarios'], {}), '(scenarios)\n', (1270, 1281), False, 'from tqdm import tqdm\n'), ((287, 299), 'pyiem.dep.read_env', 'read_env', (['fn'], {}), '(fn)\n', (295, 299), False, 'from pyiem.dep import read_env\n'), ((412, 422), 'sys.exit', 'sys.exit', ([], {}), '()\n', (420, 422), False, 'import sys\n'), ((1526, 1543), 'os.listdir', 'os.listdir', (['mydir'], {}), '(mydir)\n', (1536, 1543), False, 'import os\n'), ((1968, 1984), 'numpy.average', 'np.average', (['vals'], {}), '(vals)\n', (1978, 1984), True, 'import numpy as np\n'), ((1785, 1795), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1793, 1795), False, 'import sys\n'), ((1820, 1836), 'numpy.average', 'np.average', (['data'], {}), '(data)\n', (1830, 1836), True, 'import numpy as np\n'), ((1576, 1599), 'os.path.join', 'os.path.join', (['mydir', 'fn'], {}), '(mydir, fn)\n', (1588, 1599), False, 'import os\n'), ((1890, 1906), 'numpy.average', 'np.average', (['vals'], {}), '(vals)\n', (1900, 1906), True, 'import numpy as np\n')] |
#%%
import pandas as pd
import numpy as np
#%%
# Load the File
df21 = pd.read_csv('data/Smashwords21/smashwords_april_2021.csv')
# %%
# check total rows vs. total UNIQUE links (expect some duplicates)
len(df21), len(df21.Link.unique())
#%%
# drop duplicates
df21 = df21.drop_duplicates('Link')
#%%
# Glance at high and low prices
df21.Price.value_counts()
#%%
# parser function to turn the "Words" column into an integer
def my_parse(x):
try:
return int(x.replace(',', ''))
except:
try:
return int(x)
except ValueError:
return 0
#%%
df21['cleaned_words'] = df21.Words.fillna(0).apply(my_parse)
#%%
# we only want free books
filt = df21[df21.Price == "$0.00 USD"]
filt
#%%
# get the number of words per author
words = filt.groupby('Author').cleaned_words.sum()
#%%
# get the number of books per author
c = filt.Author.value_counts()
c
#%%
words
#%%
words.sum()
#%%
authors = sorted(list(set(words.index)))
#%%
# how many authors and how many books?
len(authors), len(filt)
#%%
# based on a consistent author:book ratio, how many should we
# expect based on 7815 books in original BookCorpus
print('books ratio, est')
authors_to_books = len(authors) / len(filt)
authors_to_books, authors_to_books * 7815
#%%
print('words ratio')
authors_to_words = len(authors) / words.sum()
authors_to_words
#%%
# uncomment in a notebook to look use systematic sampling
# to look at some examples
# authors[0:100]
# authors[1000:1100]
# authors[10000:10100]
#%%
words.describe()
# %%
c.describe()
#%%
np.isinf(words).sum()
#%%
# "share" of the top 10 authors by book count
c[c > c.quantile(0.90)].sum() / c.sum()
#%%
# # "share" of the top 10 authors by word count
words[words > words.quantile(0.90)].sum() / words.sum()
# %%
# Uncomment below for power law explorations
#import powerlaw
# #%%
# powerlaw.plot_pdf(c, color='b')
# # %%
# results = powerlaw.Fit(words)
# # %%
# print(results.power_law.alpha)
# print(results.power_law.xmin)
# # %%
# results.distribution_compare('power_law', 'exponential')
# # %%
# results.distribution_compare('power_law', 'lognormal')
# %%
| [
"numpy.isinf",
"pandas.read_csv"
] | [((71, 129), 'pandas.read_csv', 'pd.read_csv', (['"""data/Smashwords21/smashwords_april_2021.csv"""'], {}), "('data/Smashwords21/smashwords_april_2021.csv')\n", (82, 129), True, 'import pandas as pd\n'), ((1554, 1569), 'numpy.isinf', 'np.isinf', (['words'], {}), '(words)\n', (1562, 1569), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 27 15:04:09 2019
@author: EMG
EPMA Microsegregation Analysis
"""
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import metrics
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# curve-fit() function imported from scipy
from scipy.optimize import curve_fit
import statsmodels.api as sm
from matplotlib.pyplot import figure
import math
# %% Import EPMA data
""" Paste Filename Here"""
filename='10_1_19 DataBase W Mo Ti disabled new filament _Un 29 Duraloy M10 CCnew Mn.xlsx'
#filename='10_1_19 DataBase W Mo Ti disabled new filament _Un 33 Duraloy M10 DTA.xlsx'
#filename='9_27_19 DataBase W Ti disabled_Un 25 MT418 DTA Grid.xlsx'
#filename='9_24_19 DataBase W Ti disabled_Un 20 418 DTA Linetrace Core.xlsx'
#filename='10_1_19 DataBase W Mo Ti disabled new filament _Un 32 HP-2 DTA.xlsx'
data = pd.read_excel(filename)
data.head
[Len,Wid]=data.shape
data.info
data.describe()
# %% Pull EPMA data
Total=data['Elemental Totals']
Si=data['Si Elemental Percents']
Cr=data["Cr Elemental Percents"]
Fe=data["Fe Elemental Percents"]
Ni=data["Ni Elemental Percents"]
W=data["W Elemental Percents"]
Nb=data["Nb Elemental Percents"]
Mo=data["Mo Elemental Percents"]
Mn=data["Mn Elemental Percents"]
Ti=data["Ti Elemental Percents"]
#data['Total'] = data.sum(axis=1)-distance
#Total=data.sum(axis=1)-distance #totals the composition
# errprs
Si_er=data['Si Percent Errors']
Cr_er=data["Cr Percent Errors"]
Fe_er=data["Fe Percent Errors"]
Ni_er=data["Ni Percent Errors"]
W_er=data["W Percent Errors"]
Nb_er=data["Nb Percent Errors"]
Mo_er=data["Mo Percent Errors"]
Mn_er=data["Mn Percent Errors"]
Ti_er=data["Ti Percent Errors"]
# %%Lets get Plotting
# make subplots?
plt.scatter(Fe,Si,label="Si")
plt.scatter(Fe,Cr,label="Cr")
#plt.plot(Fe,Fe,label="Fe")
plt.scatter(Fe,Ni,label="Ni")
#plt.scatter(Fe,W,label="W")
plt.scatter(Fe,Nb,label="Nb")
#plt.scatter(Fe,Mo,label="Mo")
plt.scatter(Fe,W,label="Mn")
#plt.plot(Fe,Ti,label="Ti")
#
#
#plt.xlabel('Distance (um)')
plt.xlabel('Concentration Fe (wt.%)')
#
plt.ylabel('Concentration (wt.%)')
#
plt.title("Concentration of Elements")
#
plt.legend()
#plt.xlim(30,35)
#plt.ylim(0,40)
#
plt.show()
# %%Lets get Plotting Function of Cr
# make subplots?
plt.scatter(Cr,Si,label="Si")
plt.scatter(Cr,Fe,label="Fe")
#plt.plot(Fe,Fe,label="Fe")
plt.scatter(Cr,Ni,label="Ni")
#plt.scatter(Fe,W,label="W")
plt.scatter(Cr,Nb,label="Nb")
#plt.scatter(Fe,Mo,label="Mo")
plt.scatter(Cr,W,label="Mn")
#plt.plot(Fe,Ti,label="Ti")
#
#
#plt.xlabel('Distance (um)')
plt.xlabel('Concentration Cr (wt.%)')
#
plt.ylabel('Concentration (wt.%)')
#
plt.title("Concentration of Elements")
#
plt.legend()
#plt.xlim(30,35)
#plt.ylim(0,40)
#
plt.show()
# %% Subplots
fig, axs = plt.subplots(6, sharex=True)
fig.suptitle('Concentration wt% as a function of Fe')
axs[0].scatter(Fe,Si)
#axs[0].set_title('Si')
axs[1].scatter(Fe,Cr)
#axs[1].set_title('Cr')
axs[2].scatter(Fe,Ni)
axs[3].scatter(Fe,Nb)
axs[4].scatter(Fe,Mo)
axs[5].scatter(Fe,Mn)
#plt.legend()
#plt.xlim(30,35)
#plt.ylim(0,40)
# %% Filter for Carbides
totalwtcarbide = 95 #max comp for filtering for carbides
M7_filter = (data['Elemental Totals']<totalwtcarbide) & (data["Cr Elemental Percents"] > 70)
M7_comp=data[M7_filter]
AM7_comp=M7_comp.loc[:,"Si Elemental Percents":"V Elemental Percents"].mean(axis=0)
print(AM7_comp)
#M7_comp1=M7_comp.loc['Element Totals':'V Elemental Percents']#.mean(axis=1)
#print(M7_comp1)
MC_filter = (data['Elemental Totals']<totalwtcarbide) & (data["Nb Elemental Percents"] > 80)
MC_comp=data[MC_filter]
AMC_comp=MC_comp.loc[:,"Si Elemental Percents":"V Elemental Percents"].mean(axis=0)
print(AMC_comp)
Avg_comp=data.loc[:,"Si Elemental Percents":"V Elemental Percents"].mean(axis=0)
print(Avg_comp)
# %% WIRS
#filter dataset to remove interdendritic regions
totalwtlow=97 #threshold for filtering interdendritic regions may need to be tweaked
totalwthigh=103
crmax=26
nbmax=1
nimin=30
nimax=36.5
""" This value will influence the kline"""
maxfs=1#0.96#0.899#82.19485515/100 HP-2 #0.96 for M10 #0.899 for HP
max_filter = (data['Elemental Totals']>totalwtlow) & (data["Cr Elemental Percents"] < crmax) & (data["Nb Elemental Percents"] < nbmax) & (data['Elemental Totals']<totalwthigh) & (data["Ni Elemental Percents"] > nimin) & (data["Ni Elemental Percents"] < nimax)
primary_y=data#[max_filter]
#print(primary_y)
#plt.plot(primary_y['Relative Microns'],primary_y[' "Si Elemental Percents"'],label="Si")
#plt.plot(primary_y['Relative Microns'],primary_y["Cr Elemental Percents"],label="Cr")
#plt.show()
#for negatively segregating elements
primary_y['Si_bar']=(primary_y['Si Elemental Percents'] - primary_y['Si Elemental Percents'].min())/primary_y['Si Percent Errors']
primary_y['Cr_bar']=(primary_y["Cr Elemental Percents"] - primary_y["Cr Elemental Percents"].min())/primary_y["Cr Percent Errors"]
primary_y['Ni_bar']=(primary_y["Ni Elemental Percents"] - primary_y["Ni Elemental Percents"].min())/primary_y["Ni Percent Errors"] #if Ni negatively segregates
primary_y['Nb_bar']=(primary_y["Nb Elemental Percents"] - primary_y["Nb Elemental Percents"].min())/primary_y["Nb Percent Errors"]
#primary_y['Mo_bar']=(primary_y["Mo Elemental Percents"] - primary_y["Mo Elemental Percents"].min())/primary_y["Mo Percent Errors"]
primary_y['Mn_bar']=(primary_y["Mn Elemental Percents"] - primary_y["Mn Elemental Percents"].min())/primary_y["Mn Percent Errors"]
#W_bar=(primary_y["W Elemental Percents"] - primary_y["W Elemental Percents"].min())/primary_y["W Percent Errors"]
#for positively segregating elements
primary_y['Fe_bar']=(primary_y["Fe Elemental Percents"].max() - primary_y["Fe Elemental Percents"])/primary_y["Fe Percent Errors"]
#primary_y['Ni_bar']=(primary_y["Ni Elemental Percents"].max() - primary_y["Ni Elemental Percents"])/primary_y["Ni Percent Errors"]
#Ti_bar=(primary_y["Ti Elemental Percents"].max() - primary_y["Ti Elemental Percents"])/primary_y["Ti Percent Errors"]
#Aggregate Values into a new dataframe
#Cbar=pd.DataFrame(data=[Si_bar,Cr_bar,Fe_bar,Ni_bar,W_bar,Nb_bar,Mo_bar,Ti_bar]).T
#Cbar.columns=['Sibar', 'Crbar', 'Febar', 'Nibar', 'Wbar', 'Nbbar', 'Mobar', 'Tibar']
#Cbar=pd.DataFrame(data=[Si_bar,Cr_bar,Fe_bar,Ni_bar,Nb_bar,Mo_bar]).T
#Cbar.columns=['Sibar', 'Crbar', 'Febar', 'Nibar', 'Nbbar', 'Mobar']
#primary_y['Avgbar'] = primary_y[['Si_bar', 'Cr_bar', 'Fe_bar', 'Ni_bar', 'Nb_bar', 'Mo_bar', 'Mn_bar']].mean(axis=1)
primary_y['Avgbar'] = primary_y[['Si_bar', 'Cr_bar', 'Fe_bar', 'Ni_bar', 'Nb_bar', 'Mn_bar']].mean(axis=1)
#print(primary_y)
#sort according to Cbar min to max
primary_y_sort=primary_y.sort_values(by=['Avgbar'])
#print(primary_y_sort)
primary_y_sort['Rank'] = primary_y_sort['Avgbar'].rank(ascending = 1)
#print(primary_y_sort)
primary_y_sort['Fsolid']=(primary_y_sort['Rank'] - 0.5)/primary_y_sort['Rank'].max()*maxfs
#print(primary_y_sort['Fsolid'])
f_solid=primary_y_sort['Fsolid']
# %%Lets get Plotting
plt.plot(primary_y_sort['Fsolid'],primary_y_sort['Si Elemental Percents'],label="Si")
plt.plot(primary_y_sort['Fsolid'],primary_y_sort['Cr Elemental Percents'],label="Cr")
plt.plot(primary_y_sort['Fsolid'],primary_y_sort['Fe Elemental Percents'],label="Fe")
plt.plot(primary_y_sort['Fsolid'],primary_y_sort['Ni Elemental Percents'],label="Ni")
plt.plot(primary_y_sort['Fsolid'],primary_y_sort['Nb Elemental Percents'],label="Nb")
plt.plot(primary_y_sort['Fsolid'],primary_y_sort['Mo Elemental Percents'],label="Mo")
plt.plot(primary_y_sort['Fsolid'],primary_y_sort['Mn Elemental Percents'],label="Mn")
#plt.plot(Csort['Fsolid'],Csort['Fe'],label="Fe")
#plt.plot(Csort['Fsolid'],Csort['Ni'],label="Ni")
##plt.plot(Csort['Fsolid'],Csort['W'],label="W")
#plt.plot(Csort['Fsolid'],Csort['Nb'],label="Nb")
#plt.plot(Csort['Fsolid'],Csort['Mo'],label="Mo")
#plt.plot(Csort['Fsolid'],Csort['Ti'],label="Ti")
plt.xlabel('Fraction Solid')
plt.ylabel('Concentration (wt.%)')
plt.title("Concentration of Elements")
#plt.legend()
#loc='best'
plt.show()
# %% Calculate k from Core
#Nominal Composition
C0={'Si':1.929,'Cr':24.571,'Fe':37.695,'Ni':33.206,'Nb':1.28,'Mn':0.837,'Mo':0.07} #M10
#C0={'Si':1.14,'Cr':25.2,'Fe':36.66,'Ni':35,'Nb':0.418,'Mn':0.899,'Mo':0.06} #HP-2
#C0={'Si':1.929,'Cr':24.571,'Fe':37.695,'Ni':33.206,'Nb':1.28,'Mn':0.837,'Mo':0.07}
acore=10 #points to average from start of sorted data
#pull C0 estimates from grid
C0Si=data['Si Elemental Percents'].mean()
C0Cr=data["Cr Elemental Percents"].mean()
C0Fe=data["Fe Elemental Percents"].mean()
C0Ni=primary_y_sort["Ni Elemental Percents"].mean()
C0W=data["W Elemental Percents"].mean()
C0Nb=data["Nb Elemental Percents"].mean()
#C0Mo=data["Mo Elemental Percents"].mean()
C0Mn=data["Mn Elemental Percents"].mean()
C0Ti=data["Ti Elemental Percents"].mean()
#Average of the first 6 points to solidify to the total average composition
KSi=primary_y_sort['Si Elemental Percents'].iloc[0:acore].mean(axis=0) / C0Si
#primary_y_sort['Si Elemental Percents'].iloc[0:5].mean(axis=0)
print(KSi)
KSic0=primary_y_sort['Si Elemental Percents'].iloc[0:acore].mean(axis=0) / C0['Si']
#primary_y_sort['Si Elemental Percents'].iloc[0:5].mean(axis=0)
print(KSic0)
KCr=primary_y_sort['Cr Elemental Percents'].iloc[0:acore].mean(axis=0) / C0Cr
print(KCr)
KCrc0=primary_y_sort['Cr Elemental Percents'].iloc[0:acore].mean(axis=0) / C0['Cr']
print(KCrc0)
KFe=primary_y_sort['Fe Elemental Percents'].iloc[0:acore].mean(axis=0) / C0Fe
print(KFe)
KFec0=primary_y_sort['Fe Elemental Percents'].iloc[0:acore].mean(axis=0) / C0['Fe']
print(KFec0)
KNi=primary_y_sort['Ni Elemental Percents'].iloc[0:acore].mean(axis=0) / C0Ni
print(KNi)
KNic0=primary_y_sort['Ni Elemental Percents'].iloc[0:acore].mean(axis=0) / C0['Ni']
print(KNic0)
#KW=primary_y_sort['W Elemental Percents'].iloc[0:5].mean(axis=0) / data["W Elemental Percents"].mean()
KNb=primary_y_sort['Nb Elemental Percents'].iloc[0:acore].mean(axis=0) / C0Nb
print(KNb)
KNbc0=primary_y_sort['Nb Elemental Percents'].iloc[0:acore].mean(axis=0) / C0['Nb']
print(KNbc0)
#KMo=primary_y_sort['Mo Elemental Percents'].iloc[0:acore].mean(axis=0) / C0Mo
#print(KMo)
#KMoc0=primary_y_sort['Mo Elemental Percents'].iloc[0:acore].mean(axis=0) / C0['Mo']
#print(KMoc0)
KMn=primary_y_sort['Mn Elemental Percents'].iloc[0:acore].mean(axis=0) / C0Mn
print(KMn)
KMnc0=primary_y_sort['Mn Elemental Percents'].iloc[0:acore].mean(axis=0) / C0['Mn']
print(KMnc0)
#KTi=primary_y_sort['Ti Elemental Percents'].iloc[0:5].mean(axis=0) / data["Ti Elemental Percents"].mean()
# %% Calc from Curve
#CsSi=primary_y_sort['Si Elemental Percents'].div(C0['Si'])
#print(CsSi)
#lnCsSi=CsSi.div(C0['Si'])
#print(lnCsSi)
#lnCsNi=np.log(primary_y_sort['Ni Elemental Percents'].div(C0['Ni']))
lnCsSi=np.log(primary_y_sort['Si Elemental Percents'].div(data["Si Elemental Percents"].mean()))
lnCsCr=np.log(primary_y_sort['Cr Elemental Percents'].div(data["Cr Elemental Percents"].mean()))
lnCsFe=np.log(primary_y_sort['Fe Elemental Percents'].div(data["Fe Elemental Percents"].mean()))
lnCsNi=np.log(primary_y_sort['Ni Elemental Percents'].div(data["Ni Elemental Percents"].mean()))
lnCsNb=np.log(primary_y_sort['Nb Elemental Percents'].div(data["Nb Elemental Percents"].mean()))
#lnCsMo=np.log(primary_y_sort['Mo Elemental Percents'].div(data["Mo Elemental Percents"].mean()))
lnCsMn=np.log(primary_y_sort['Mn Elemental Percents'].div(data["Mn Elemental Percents"].mean()))
lnFL=np.log(1-primary_y_sort['Fsolid'])
FL=1-primary_y_sort['Fsolid']
FS=primary_y_sort['Fsolid']
#loglog(lnFL,lnCsSi)
plt.plot(lnFL,lnCsNi,label="Ni")
plt.xlabel('Ln(Fraction Liquid)')
plt.ylabel('Ln(Cs/C0)')
plt.title("Concentration of Elements")
plt.show()
def test(F,a,b):
# return math.log(a)+(1-a)*F+b
return (a-1)*np.log(1-F)+(b)
Siparam, Siparam_cov = curve_fit(test, FS, lnCsSi)
Crparam, Crparam_cov = curve_fit(test, FS, lnCsCr)
Feparam, Feparam_cov = curve_fit(test, FS, lnCsFe)
Niparam, Niparam_cov = curve_fit(test, FS, lnCsNi)
Nbparam, Nbparam_cov = curve_fit(test, FS, lnCsNb)
#Moparam, Moparam_cov = curve_fit(test, FS, lnCsMo)
Mnparam, Mnparam_cov = curve_fit(test, FS, lnCsMn)
print("Sine funcion coefficients:")
print(Niparam)
print("Covariance of coefficients:")
print(Niparam_cov)
# ans stores the new y-data according to
# the coefficients given by curve-fit() function
ansCr = test(FS,Crparam[0],Crparam[1])#((Crparam[0]-1)*lnFL+Crparam[1])
ansSi = ((Siparam[0]-1)*lnFL+Siparam[1])
ansNi = ((Niparam[0]-1)*lnFL+Niparam[1])
ansFe = ((Feparam[0]-1)*lnFL+Feparam[1])
ansNb = ((Nbparam[0]-1)*lnFL+Nbparam[1])
#ansMo = ((Moparam[0]-1)*lnFL+Moparam[1])
ansMn = ((Mnparam[0]-1)*lnFL+Mnparam[1])
'''Below 4 lines can be un-commented for plotting results
using matplotlib as shown in the first example. '''
plt.plot(lnFL, lnCsCr, 'o', color ='red', label ="data")
plt.plot(lnFL, ansCr, '--', color ='blue', label ="optimized data")
plt.legend()
plt.title("Cr")
plt.xlabel('Ln(Fraction Solid)')
plt.ylabel('Ln(Cs/C0)')
plt.show()
plt.plot(lnFL, lnCsSi, 'o', color ='red', label ="data")
plt.plot(lnFL, ansSi, '--', color ='blue', label ="optimized data")
plt.legend()
plt.title("Si")
plt.xlabel('Ln(Fraction Solid)')
plt.ylabel('Ln(Cs/C0)')
plt.show()
plt.plot(lnFL, lnCsNi, 'o', color ='red', label ="data")
plt.plot(lnFL, ansNi, '--', color ='blue', label ="optimized data")
plt.legend()
plt.title("Ni")
plt.xlabel('Ln(Fraction Solid)')
plt.ylabel('Ln(Cs/C0)')
plt.show()
plt.plot(lnFL, lnCsNb, 'o', color ='red', label ="data")
plt.plot(lnFL, ansNb, '--', color ='blue', label ="optimized data")
plt.legend()
plt.title("Nb")
plt.xlabel('Ln(Fraction Solid)')
plt.ylabel('Ln(Cs/C0)')
plt.show()
plt.plot(lnFL, lnCsFe, 'o', color ='red', label ="data")
plt.plot(lnFL, ansFe, '--', color ='blue', label ="optimized data")
plt.legend()
plt.title("Fe")
plt.xlabel('Ln(Fraction Solid)')
plt.ylabel('Ln(Cs/C0)')
plt.show()
#define new k values
#K["Si"]=??
KSi_line=Siparam[0] #abs(1-Siparam[0])
print(KSi_line)
KCr_line=Crparam[0] #abs(Crparam[0]-2) #Crparam[0]
print(KCr_line)
KFe_line=Feparam[0] #klineFe=abs(Feparam[0]-2)
print(KFe_line)
KNi_line=Niparam[0] #abs(Niparam[0]-2) #Niparam[0]
print(KNi_line)
KNb_line=Nbparam[0] #abs(Nbparam[0]-2) #Nbparam[0]
print(KNb_line)
#KMo_line=Moparam[0] #abs(Moparam[0]-2) #Moparam[0]
#print(KMo_line)
KMn_line=Mnparam[0] #abs(Mnparam[0]-2) #Mnparam[0]
print(KMn_line)
# %%K fit with linear regression
X=lnFL
X=sm.add_constant(X)
Simodel=sm.OLS(lnCsSi,X).fit()
Simodel.summary()
klSi=1-0.3637
print(klSi)
Crmodel=sm.OLS(lnCsCr,X).fit()
Crmodel.summary()
Nbmodel=sm.OLS(lnCsNb,X).fit()
Nbmodel.summary()
Mnmodel=sm.OLS(lnCsMn,X).fit()
Mnmodel.summary()
Nimodel=sm.OLS(lnCsNi,X).fit()
Nimodel.summary()
Femodel=sm.OLS(lnCsFe,X).fit()
Femodel.summary()
# %% Scheil Calculation
def scheil(k,Cnom,fs):
return k*Cnom*(1-fs)**(k-1)
#from dendrite core k values
NEQ_Si=scheil(KSi,C0Si,f_solid)
NEQ_Cr=scheil(KCr,C0Cr,f_solid)
NEQ_Fe=scheil(KFe,C0Fe,f_solid)
NEQ_Ni=scheil(KNi,C0Ni,f_solid)
NEQ_Mn=scheil(KMn,C0Mn,f_solid)
NEQ_Nb=scheil(KNb,C0Nb,f_solid)
#NEQ_Mo=scheil(KMo,C0Mo,f_solid)
# %% Equlibrium Calculation
def equil(k,Cnom,fs):
return k*Cnom/((1-fs)+k*fs)
EQ_Si=equil(KSi,C0Si,f_solid)
EQ_Cr=equil(KCr,C0Cr,f_solid)
EQ_Fe=equil(KFe,C0Fe,f_solid)
EQ_Ni=equil(KNi,C0Ni,f_solid)
EQ_Mn=equil(KMn,C0Mn,f_solid)
EQ_Nb=equil(KNb,C0Nb,f_solid)
#EQ_Mo=equil(KMo,C0Mo,f_solid)
# %% <NAME> Calculation-work in progress
def BF(k,Cnom,fs,alpha):
return k*Cnom*(1-(1-2*alpha*k)*fs)**((k-1)/(1-2*alpha*k))
# %% Plot solidification path
figure(num=None, figsize=(6, 4), dpi=100, facecolor='w', edgecolor='k')
#plt.plot(primary_y_sort['Fsolid'],primary_y_sort['Si Elemental Percents'],label="Si", color='blue')
plt.plot(primary_y_sort['Fsolid'],primary_y_sort['Cr Elemental Percents'],label="Cr", color='green')
plt.plot(primary_y_sort['Fsolid'],primary_y_sort['Fe Elemental Percents'],label="Fe", color='red')
plt.plot(primary_y_sort['Fsolid'],primary_y_sort['Ni Elemental Percents'],label="Ni", color='magenta')
#plt.plot(primary_y_sort['Fsolid'],primary_y_sort['Nb Elemental Percents'],label="Nb", color='cyan')
#plt.plot(primary_y_sort['Fsolid'],primary_y_sort['Mo Elemental Percents'],label="Mo")
#plt.plot(primary_y_sort['Fsolid'],primary_y_sort['Mn Elemental Percents'],label="Mn", color='black')
#plt.plot(primary_y_sort['Fsolid'],NEQ_Si,label="NESi", color='blue')
plt.plot(primary_y_sort['Fsolid'],NEQ_Cr,label="NECr", color='green')
plt.plot(primary_y_sort['Fsolid'],NEQ_Fe,label="NEFe", color='red')
plt.plot(primary_y_sort['Fsolid'],NEQ_Ni,label="NENi", color='magenta')
#plt.plot(primary_y_sort['Fsolid'],NEQ_Nb,label="NENb", color='cyan')
#plt.plot(primary_y_sort['Fsolid'],NEQ_Mo,label="NEMo")
#plt.plot(primary_y_sort['Fsolid'],NEQ_Mn,label="NEMn", color='black')
#plt.plot(primary_y_sort['Fsolid'],EQ_Si,label="ESi", color='blue', linestyle='dashed')
plt.plot(primary_y_sort['Fsolid'],EQ_Cr,label="ECr", color='green', linestyle='dashed')
plt.plot(primary_y_sort['Fsolid'],EQ_Fe,label="EFe", color='red', linestyle='dashed')
plt.plot(primary_y_sort['Fsolid'],EQ_Ni,label="ENi", color='magenta', linestyle='dashed')
#plt.plot(primary_y_sort['Fsolid'],EQ_Nb,label="ENb", color='cyan', linestyle='dashed')
#plt.plot(primary_y_sort['Fsolid'],EQ_Mo,label="EMo")
#plt.plot(primary_y_sort['Fsolid'],EQ_Mn,label="EMn", color='black', linestyle='dashed')
plt.xlabel('Fraction Solid')
plt.ylabel('Concentration (wt.%)')
#plt.title("Solidification Path Solidification")
plt.xlim(0,1.0)
plt.ylim(20,45)
#plt.legend()
#loc='best'
plt.show()
# %% Plot solidification path Major Elements
figure(num=None, figsize=(6, 4), dpi=100, facecolor='w', edgecolor='k')
#plt.plot(primary_y_sort['Fsolid'],primary_y_sort['Si Elemental Percents'],label="Si", color='blue')
plt.plot(primary_y_sort['Fsolid'],primary_y_sort['Cr Elemental Percents'],label="Cr", color='green')
plt.plot(primary_y_sort['Fsolid'],primary_y_sort['Fe Elemental Percents'],label="Fe", color='red')
plt.plot(primary_y_sort['Fsolid'],primary_y_sort['Ni Elemental Percents'],label="Ni", color='magenta')
#plt.plot(primary_y_sort['Fsolid'],primary_y_sort['Nb Elemental Percents'],label="Nb", color='cyan')
#plt.plot(primary_y_sort['Fsolid'],primary_y_sort['Mo Elemental Percents'],label="Mo")
#plt.plot(primary_y_sort['Fsolid'],primary_y_sort['Mn Elemental Percents'],label="Mn", color='black')
#plt.plot(primary_y_sort['Fsolid'],NEQ_Si,label="NESi", color='blue')
plt.plot(primary_y_sort['Fsolid'],NEQ_Cr,label="NECr", color='green')
plt.plot(primary_y_sort['Fsolid'],NEQ_Fe,label="NEFe", color='red')
plt.plot(primary_y_sort['Fsolid'],NEQ_Ni,label="NENi", color='magenta')
#plt.plot(primary_y_sort['Fsolid'],NEQ_Nb,label="NENb", color='cyan')
#plt.plot(primary_y_sort['Fsolid'],NEQ_Mo,label="NEMo")
#plt.plot(primary_y_sort['Fsolid'],NEQ_Mn,label="NEMn", color='black')
#plt.plot(primary_y_sort['Fsolid'],EQ_Si,label="ESi", color='blue', linestyle='dashed')
plt.plot(primary_y_sort['Fsolid'],EQ_Cr,label="ECr", color='green', linestyle='dashed')
plt.plot(primary_y_sort['Fsolid'],EQ_Fe,label="EFe", color='red', linestyle='dashed')
plt.plot(primary_y_sort['Fsolid'],EQ_Ni,label="ENi", color='magenta', linestyle='dashed')
#plt.plot(primary_y_sort['Fsolid'],EQ_Nb,label="ENb", color='cyan', linestyle='dashed')
#plt.plot(primary_y_sort['Fsolid'],EQ_Mo,label="EMo")
#plt.plot(primary_y_sort['Fsolid'],EQ_Mn,label="EMn", color='black', linestyle='dashed')
plt.xlabel('Fraction Solid')
plt.ylabel('Concentration (wt.%)')
#plt.title("Solidification Path Solidification")
plt.xlim(0,1.0)
plt.ylim(20,45)
#plt.legend()
#loc='best'
plt.show()
# %% Minor Elements
figure(num=None, figsize=(6, 4), dpi=100, facecolor='w', edgecolor='k')
plt.plot(primary_y_sort['Fsolid'],primary_y_sort['Si Elemental Percents'],label="Si", color='blue')
plt.plot(primary_y_sort['Fsolid'],primary_y_sort['Nb Elemental Percents'],label="Nb", color='cyan')
#plt.plot(primary_y_sort['Fsolid'],primary_y_sort['Mo Elemental Percents'],label="Mo")
plt.plot(primary_y_sort['Fsolid'],primary_y_sort['Mn Elemental Percents'],label="Mn", color='black')
plt.plot(primary_y_sort['Fsolid'],NEQ_Si,label="NESi", color='blue')
plt.plot(primary_y_sort['Fsolid'],NEQ_Nb,label="NENb", color='cyan')
#plt.plot(primary_y_sort['Fsolid'],NEQ_Mo,label="NEMo")
plt.plot(primary_y_sort['Fsolid'],NEQ_Mn,label="NEMn", color='black')
plt.plot(primary_y_sort['Fsolid'],EQ_Si,label="ESi", color='blue', linestyle='dashed')
plt.plot(primary_y_sort['Fsolid'],EQ_Nb,label="ENb", color='cyan', linestyle='dashed')
#plt.plot(primary_y_sort['Fsolid'],EQ_Mo,label="EMo")
plt.plot(primary_y_sort['Fsolid'],EQ_Mn,label="EMn", color='black', linestyle='dashed')
plt.xlabel('Fraction Solid')
plt.ylabel('Concentration (wt.%)')
#plt.title("Solidification Path Solidification")
plt.xlim(0,1.0)
#plt.ylim(20,45)
#plt.legend()
#loc='best'
plt.show()
# %% ??????
#print(primary_y_sort['Si Elemental Percents'])
#print(lnCs)
#C=data['C']
#Nb=data['Nb']
#Si=data['Si']
#Ti=data['Ti']
#W=data['W']
#F_Y_MC=data['Fraction Y E1']
#F_MC=data['Fraction MC E1']
#F_Y_M7=data['Fraction Y E2']
#F_M7=data['Fraction M7C3 E2']
#
##print(F_Y_MC)
##print(F_MC)
##print(F_Y_M7)
##print(F_M7)
#
##set x and y
#y=F_MC
#X=Si
#X = sm.add_constant(X) ## let's add an intercept (beta_0) to our model
#
#
## To get statistics of the dataset
## Note the difference in argument order
#model = sm.OLS(y, X).fit()
#predictions = model.predict(X) # make the predictions by the model
#
## Print out the statistics
#model.summary()
# %% Output table?
from prettytable import PrettyTable
x = PrettyTable()
x.field_names = ["City name", "Area", "Population", "Annual Rainfall"]
#['Si', 'Cr', 'Fe', 'Ni', 'Nb', 'Mn']
x.add_row(["Adelaide", 1295, 1158259, 600.5])
x.add_row(["Brisbane", 5905, 1857594, 1146.4])
x.add_row(["Darwin", 112, 120900, 1714.7])
x.add_row(["Hobart", 1357, 205556, 619.5])
x.add_row(["Sydney", 2058, 4336374, 1214.8])
x.add_row(["Melbourne", 1566, 3806092, 646.9])
x.add_row(["Perth", 5386, 1554769, 869.4])
print(x)
# %% Bar Charts
# libraries
import numpy as np
import matplotlib.pyplot as plt
# set width of bar
barWidth = 0.25
# set height of bar
bars1 = [KSi, KCr, KFe, KNi, KNb, KMn]
bars2 = [KSic0, KCrc0, KFec0, KNic0, KNbc0, KMnc0]
bars3 = [KSi_line, KCr_line, KFe_line, KNi_line, KNb_line, KMn_line]
# Set position of bar on X axis
r1 = np.arange(len(bars1))
r2 = [x + barWidth for x in r1]
r3 = [x + barWidth for x in r2]
# Make the plot
plt.bar(r1, bars1, color='#7f6d5f', width=barWidth, edgecolor='white', label='Kmean')
plt.bar(r2, bars2, color='#557f2d', width=barWidth, edgecolor='white', label='KC0')
plt.bar(r3, bars3, color='#2d7f5e', width=barWidth, edgecolor='white', label='K_line')
# Add xticks on the middle of the group bars
plt.xlabel('Element', fontweight='bold')
plt.ylabel('Partition Coefficient (k)', fontweight='bold')
plt.xticks([r + barWidth for r in range(len(bars1))], ['Si', 'Cr', 'Fe', 'Ni', 'Nb', 'Mn'])
plt.axhline(1, color="black")#.plot(["Si","Mn"], [1,1], "k--")
# Create legend & Show graphic
plt.ylim(0,1.2)
plt.legend()
plt.show()
#plt.savefig('M10 DTA K-values.png')
| [
"matplotlib.pyplot.ylabel",
"numpy.log",
"pandas.read_excel",
"statsmodels.api.OLS",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.ylim",
"prettytable.PrettyTable",
"statsmodels.api.add_constant",
"matplotlib.... | [((1001, 1024), 'pandas.read_excel', 'pd.read_excel', (['filename'], {}), '(filename)\n', (1014, 1024), True, 'import pandas as pd\n'), ((1902, 1933), 'matplotlib.pyplot.scatter', 'plt.scatter', (['Fe', 'Si'], {'label': '"""Si"""'}), "(Fe, Si, label='Si')\n", (1913, 1933), True, 'import matplotlib.pyplot as plt\n'), ((1933, 1964), 'matplotlib.pyplot.scatter', 'plt.scatter', (['Fe', 'Cr'], {'label': '"""Cr"""'}), "(Fe, Cr, label='Cr')\n", (1944, 1964), True, 'import matplotlib.pyplot as plt\n'), ((1993, 2024), 'matplotlib.pyplot.scatter', 'plt.scatter', (['Fe', 'Ni'], {'label': '"""Ni"""'}), "(Fe, Ni, label='Ni')\n", (2004, 2024), True, 'import matplotlib.pyplot as plt\n'), ((2054, 2085), 'matplotlib.pyplot.scatter', 'plt.scatter', (['Fe', 'Nb'], {'label': '"""Nb"""'}), "(Fe, Nb, label='Nb')\n", (2065, 2085), True, 'import matplotlib.pyplot as plt\n'), ((2117, 2147), 'matplotlib.pyplot.scatter', 'plt.scatter', (['Fe', 'W'], {'label': '"""Mn"""'}), "(Fe, W, label='Mn')\n", (2128, 2147), True, 'import matplotlib.pyplot as plt\n'), ((2212, 2249), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Concentration Fe (wt.%)"""'], {}), "('Concentration Fe (wt.%)')\n", (2222, 2249), True, 'import matplotlib.pyplot as plt\n'), ((2254, 2288), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Concentration (wt.%)"""'], {}), "('Concentration (wt.%)')\n", (2264, 2288), True, 'import matplotlib.pyplot as plt\n'), ((2293, 2331), 'matplotlib.pyplot.title', 'plt.title', (['"""Concentration of Elements"""'], {}), "('Concentration of Elements')\n", (2302, 2331), True, 'import matplotlib.pyplot as plt\n'), ((2336, 2348), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2346, 2348), True, 'import matplotlib.pyplot as plt\n'), ((2388, 2398), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2396, 2398), True, 'import matplotlib.pyplot as plt\n'), ((2456, 2487), 'matplotlib.pyplot.scatter', 'plt.scatter', (['Cr', 'Si'], {'label': '"""Si"""'}), "(Cr, Si, label='Si')\n", (2467, 2487), True, 'import matplotlib.pyplot as plt\n'), ((2487, 2518), 'matplotlib.pyplot.scatter', 'plt.scatter', (['Cr', 'Fe'], {'label': '"""Fe"""'}), "(Cr, Fe, label='Fe')\n", (2498, 2518), True, 'import matplotlib.pyplot as plt\n'), ((2547, 2578), 'matplotlib.pyplot.scatter', 'plt.scatter', (['Cr', 'Ni'], {'label': '"""Ni"""'}), "(Cr, Ni, label='Ni')\n", (2558, 2578), True, 'import matplotlib.pyplot as plt\n'), ((2608, 2639), 'matplotlib.pyplot.scatter', 'plt.scatter', (['Cr', 'Nb'], {'label': '"""Nb"""'}), "(Cr, Nb, label='Nb')\n", (2619, 2639), True, 'import matplotlib.pyplot as plt\n'), ((2671, 2701), 'matplotlib.pyplot.scatter', 'plt.scatter', (['Cr', 'W'], {'label': '"""Mn"""'}), "(Cr, W, label='Mn')\n", (2682, 2701), True, 'import matplotlib.pyplot as plt\n'), ((2766, 2803), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Concentration Cr (wt.%)"""'], {}), "('Concentration Cr (wt.%)')\n", (2776, 2803), True, 'import matplotlib.pyplot as plt\n'), ((2808, 2842), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Concentration (wt.%)"""'], {}), "('Concentration (wt.%)')\n", (2818, 2842), True, 'import matplotlib.pyplot as plt\n'), ((2847, 2885), 'matplotlib.pyplot.title', 'plt.title', (['"""Concentration of Elements"""'], {}), "('Concentration of Elements')\n", (2856, 2885), True, 'import matplotlib.pyplot as plt\n'), ((2890, 2902), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2900, 2902), True, 'import matplotlib.pyplot as plt\n'), ((2942, 2952), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2950, 2952), True, 'import matplotlib.pyplot as plt\n'), ((2986, 3014), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(6)'], {'sharex': '(True)'}), '(6, sharex=True)\n', (2998, 3014), True, 'import matplotlib.pyplot as plt\n'), ((7284, 7375), 'matplotlib.pyplot.plot', 'plt.plot', (["primary_y_sort['Fsolid']", "primary_y_sort['Si Elemental Percents']"], {'label': '"""Si"""'}), "(primary_y_sort['Fsolid'], primary_y_sort['Si Elemental Percents'],\n label='Si')\n", (7292, 7375), True, 'import matplotlib.pyplot as plt\n'), ((7371, 7462), 'matplotlib.pyplot.plot', 'plt.plot', (["primary_y_sort['Fsolid']", "primary_y_sort['Cr Elemental Percents']"], {'label': '"""Cr"""'}), "(primary_y_sort['Fsolid'], primary_y_sort['Cr Elemental Percents'],\n label='Cr')\n", (7379, 7462), True, 'import matplotlib.pyplot as plt\n'), ((7458, 7549), 'matplotlib.pyplot.plot', 'plt.plot', (["primary_y_sort['Fsolid']", "primary_y_sort['Fe Elemental Percents']"], {'label': '"""Fe"""'}), "(primary_y_sort['Fsolid'], primary_y_sort['Fe Elemental Percents'],\n label='Fe')\n", (7466, 7549), True, 'import matplotlib.pyplot as plt\n'), ((7545, 7636), 'matplotlib.pyplot.plot', 'plt.plot', (["primary_y_sort['Fsolid']", "primary_y_sort['Ni Elemental Percents']"], {'label': '"""Ni"""'}), "(primary_y_sort['Fsolid'], primary_y_sort['Ni Elemental Percents'],\n label='Ni')\n", (7553, 7636), True, 'import matplotlib.pyplot as plt\n'), ((7632, 7723), 'matplotlib.pyplot.plot', 'plt.plot', (["primary_y_sort['Fsolid']", "primary_y_sort['Nb Elemental Percents']"], {'label': '"""Nb"""'}), "(primary_y_sort['Fsolid'], primary_y_sort['Nb Elemental Percents'],\n label='Nb')\n", (7640, 7723), True, 'import matplotlib.pyplot as plt\n'), ((7719, 7810), 'matplotlib.pyplot.plot', 'plt.plot', (["primary_y_sort['Fsolid']", "primary_y_sort['Mo Elemental Percents']"], {'label': '"""Mo"""'}), "(primary_y_sort['Fsolid'], primary_y_sort['Mo Elemental Percents'],\n label='Mo')\n", (7727, 7810), True, 'import matplotlib.pyplot as plt\n'), ((7806, 7897), 'matplotlib.pyplot.plot', 'plt.plot', (["primary_y_sort['Fsolid']", "primary_y_sort['Mn Elemental Percents']"], {'label': '"""Mn"""'}), "(primary_y_sort['Fsolid'], primary_y_sort['Mn Elemental Percents'],\n label='Mn')\n", (7814, 7897), True, 'import matplotlib.pyplot as plt\n'), ((8202, 8230), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Fraction Solid"""'], {}), "('Fraction Solid')\n", (8212, 8230), True, 'import matplotlib.pyplot as plt\n'), ((8232, 8266), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Concentration (wt.%)"""'], {}), "('Concentration (wt.%)')\n", (8242, 8266), True, 'import matplotlib.pyplot as plt\n'), ((8268, 8306), 'matplotlib.pyplot.title', 'plt.title', (['"""Concentration of Elements"""'], {}), "('Concentration of Elements')\n", (8277, 8306), True, 'import matplotlib.pyplot as plt\n'), ((8340, 8350), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8348, 8350), True, 'import matplotlib.pyplot as plt\n'), ((11834, 11870), 'numpy.log', 'np.log', (["(1 - primary_y_sort['Fsolid'])"], {}), "(1 - primary_y_sort['Fsolid'])\n", (11840, 11870), True, 'import numpy as np\n'), ((11956, 11990), 'matplotlib.pyplot.plot', 'plt.plot', (['lnFL', 'lnCsNi'], {'label': '"""Ni"""'}), "(lnFL, lnCsNi, label='Ni')\n", (11964, 11990), True, 'import matplotlib.pyplot as plt\n'), ((11992, 12025), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Ln(Fraction Liquid)"""'], {}), "('Ln(Fraction Liquid)')\n", (12002, 12025), True, 'import matplotlib.pyplot as plt\n'), ((12029, 12052), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Ln(Cs/C0)"""'], {}), "('Ln(Cs/C0)')\n", (12039, 12052), True, 'import matplotlib.pyplot as plt\n'), ((12056, 12094), 'matplotlib.pyplot.title', 'plt.title', (['"""Concentration of Elements"""'], {}), "('Concentration of Elements')\n", (12065, 12094), True, 'import matplotlib.pyplot as plt\n'), ((12098, 12108), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12106, 12108), True, 'import matplotlib.pyplot as plt\n'), ((12223, 12250), 'scipy.optimize.curve_fit', 'curve_fit', (['test', 'FS', 'lnCsSi'], {}), '(test, FS, lnCsSi)\n', (12232, 12250), False, 'from scipy.optimize import curve_fit\n'), ((12276, 12303), 'scipy.optimize.curve_fit', 'curve_fit', (['test', 'FS', 'lnCsCr'], {}), '(test, FS, lnCsCr)\n', (12285, 12303), False, 'from scipy.optimize import curve_fit\n'), ((12329, 12356), 'scipy.optimize.curve_fit', 'curve_fit', (['test', 'FS', 'lnCsFe'], {}), '(test, FS, lnCsFe)\n', (12338, 12356), False, 'from scipy.optimize import curve_fit\n'), ((12382, 12409), 'scipy.optimize.curve_fit', 'curve_fit', (['test', 'FS', 'lnCsNi'], {}), '(test, FS, lnCsNi)\n', (12391, 12409), False, 'from scipy.optimize import curve_fit\n'), ((12435, 12462), 'scipy.optimize.curve_fit', 'curve_fit', (['test', 'FS', 'lnCsNb'], {}), '(test, FS, lnCsNb)\n', (12444, 12462), False, 'from scipy.optimize import curve_fit\n'), ((12542, 12569), 'scipy.optimize.curve_fit', 'curve_fit', (['test', 'FS', 'lnCsMn'], {}), '(test, FS, lnCsMn)\n', (12551, 12569), False, 'from scipy.optimize import curve_fit\n'), ((13243, 13297), 'matplotlib.pyplot.plot', 'plt.plot', (['lnFL', 'lnCsCr', '"""o"""'], {'color': '"""red"""', 'label': '"""data"""'}), "(lnFL, lnCsCr, 'o', color='red', label='data')\n", (13251, 13297), True, 'import matplotlib.pyplot as plt\n'), ((13302, 13367), 'matplotlib.pyplot.plot', 'plt.plot', (['lnFL', 'ansCr', '"""--"""'], {'color': '"""blue"""', 'label': '"""optimized data"""'}), "(lnFL, ansCr, '--', color='blue', label='optimized data')\n", (13310, 13367), True, 'import matplotlib.pyplot as plt\n'), ((13372, 13384), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (13382, 13384), True, 'import matplotlib.pyplot as plt\n'), ((13386, 13401), 'matplotlib.pyplot.title', 'plt.title', (['"""Cr"""'], {}), "('Cr')\n", (13395, 13401), True, 'import matplotlib.pyplot as plt\n'), ((13403, 13435), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Ln(Fraction Solid)"""'], {}), "('Ln(Fraction Solid)')\n", (13413, 13435), True, 'import matplotlib.pyplot as plt\n'), ((13437, 13460), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Ln(Cs/C0)"""'], {}), "('Ln(Cs/C0)')\n", (13447, 13460), True, 'import matplotlib.pyplot as plt\n'), ((13463, 13473), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13471, 13473), True, 'import matplotlib.pyplot as plt\n'), ((13477, 13531), 'matplotlib.pyplot.plot', 'plt.plot', (['lnFL', 'lnCsSi', '"""o"""'], {'color': '"""red"""', 'label': '"""data"""'}), "(lnFL, lnCsSi, 'o', color='red', label='data')\n", (13485, 13531), True, 'import matplotlib.pyplot as plt\n'), ((13536, 13601), 'matplotlib.pyplot.plot', 'plt.plot', (['lnFL', 'ansSi', '"""--"""'], {'color': '"""blue"""', 'label': '"""optimized data"""'}), "(lnFL, ansSi, '--', color='blue', label='optimized data')\n", (13544, 13601), True, 'import matplotlib.pyplot as plt\n'), ((13606, 13618), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (13616, 13618), True, 'import matplotlib.pyplot as plt\n'), ((13621, 13636), 'matplotlib.pyplot.title', 'plt.title', (['"""Si"""'], {}), "('Si')\n", (13630, 13636), True, 'import matplotlib.pyplot as plt\n'), ((13638, 13670), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Ln(Fraction Solid)"""'], {}), "('Ln(Fraction Solid)')\n", (13648, 13670), True, 'import matplotlib.pyplot as plt\n'), ((13672, 13695), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Ln(Cs/C0)"""'], {}), "('Ln(Cs/C0)')\n", (13682, 13695), True, 'import matplotlib.pyplot as plt\n'), ((13698, 13708), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13706, 13708), True, 'import matplotlib.pyplot as plt\n'), ((13712, 13766), 'matplotlib.pyplot.plot', 'plt.plot', (['lnFL', 'lnCsNi', '"""o"""'], {'color': '"""red"""', 'label': '"""data"""'}), "(lnFL, lnCsNi, 'o', color='red', label='data')\n", (13720, 13766), True, 'import matplotlib.pyplot as plt\n'), ((13771, 13836), 'matplotlib.pyplot.plot', 'plt.plot', (['lnFL', 'ansNi', '"""--"""'], {'color': '"""blue"""', 'label': '"""optimized data"""'}), "(lnFL, ansNi, '--', color='blue', label='optimized data')\n", (13779, 13836), True, 'import matplotlib.pyplot as plt\n'), ((13841, 13853), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (13851, 13853), True, 'import matplotlib.pyplot as plt\n'), ((13856, 13871), 'matplotlib.pyplot.title', 'plt.title', (['"""Ni"""'], {}), "('Ni')\n", (13865, 13871), True, 'import matplotlib.pyplot as plt\n'), ((13873, 13905), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Ln(Fraction Solid)"""'], {}), "('Ln(Fraction Solid)')\n", (13883, 13905), True, 'import matplotlib.pyplot as plt\n'), ((13907, 13930), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Ln(Cs/C0)"""'], {}), "('Ln(Cs/C0)')\n", (13917, 13930), True, 'import matplotlib.pyplot as plt\n'), ((13933, 13943), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13941, 13943), True, 'import matplotlib.pyplot as plt\n'), ((13947, 14001), 'matplotlib.pyplot.plot', 'plt.plot', (['lnFL', 'lnCsNb', '"""o"""'], {'color': '"""red"""', 'label': '"""data"""'}), "(lnFL, lnCsNb, 'o', color='red', label='data')\n", (13955, 14001), True, 'import matplotlib.pyplot as plt\n'), ((14006, 14071), 'matplotlib.pyplot.plot', 'plt.plot', (['lnFL', 'ansNb', '"""--"""'], {'color': '"""blue"""', 'label': '"""optimized data"""'}), "(lnFL, ansNb, '--', color='blue', label='optimized data')\n", (14014, 14071), True, 'import matplotlib.pyplot as plt\n'), ((14076, 14088), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (14086, 14088), True, 'import matplotlib.pyplot as plt\n'), ((14091, 14106), 'matplotlib.pyplot.title', 'plt.title', (['"""Nb"""'], {}), "('Nb')\n", (14100, 14106), True, 'import matplotlib.pyplot as plt\n'), ((14108, 14140), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Ln(Fraction Solid)"""'], {}), "('Ln(Fraction Solid)')\n", (14118, 14140), True, 'import matplotlib.pyplot as plt\n'), ((14142, 14165), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Ln(Cs/C0)"""'], {}), "('Ln(Cs/C0)')\n", (14152, 14165), True, 'import matplotlib.pyplot as plt\n'), ((14168, 14178), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14176, 14178), True, 'import matplotlib.pyplot as plt\n'), ((14182, 14236), 'matplotlib.pyplot.plot', 'plt.plot', (['lnFL', 'lnCsFe', '"""o"""'], {'color': '"""red"""', 'label': '"""data"""'}), "(lnFL, lnCsFe, 'o', color='red', label='data')\n", (14190, 14236), True, 'import matplotlib.pyplot as plt\n'), ((14241, 14306), 'matplotlib.pyplot.plot', 'plt.plot', (['lnFL', 'ansFe', '"""--"""'], {'color': '"""blue"""', 'label': '"""optimized data"""'}), "(lnFL, ansFe, '--', color='blue', label='optimized data')\n", (14249, 14306), True, 'import matplotlib.pyplot as plt\n'), ((14311, 14323), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (14321, 14323), True, 'import matplotlib.pyplot as plt\n'), ((14326, 14341), 'matplotlib.pyplot.title', 'plt.title', (['"""Fe"""'], {}), "('Fe')\n", (14335, 14341), True, 'import matplotlib.pyplot as plt\n'), ((14343, 14375), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Ln(Fraction Solid)"""'], {}), "('Ln(Fraction Solid)')\n", (14353, 14375), True, 'import matplotlib.pyplot as plt\n'), ((14377, 14400), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Ln(Cs/C0)"""'], {}), "('Ln(Cs/C0)')\n", (14387, 14400), True, 'import matplotlib.pyplot as plt\n'), ((14403, 14413), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14411, 14413), True, 'import matplotlib.pyplot as plt\n'), ((14966, 14984), 'statsmodels.api.add_constant', 'sm.add_constant', (['X'], {}), '(X)\n', (14981, 14984), True, 'import statsmodels.api as sm\n'), ((16154, 16225), 'matplotlib.pyplot.figure', 'figure', ([], {'num': 'None', 'figsize': '(6, 4)', 'dpi': '(100)', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(num=None, figsize=(6, 4), dpi=100, facecolor='w', edgecolor='k')\n", (16160, 16225), False, 'from matplotlib.pyplot import figure\n'), ((16329, 16435), 'matplotlib.pyplot.plot', 'plt.plot', (["primary_y_sort['Fsolid']", "primary_y_sort['Cr Elemental Percents']"], {'label': '"""Cr"""', 'color': '"""green"""'}), "(primary_y_sort['Fsolid'], primary_y_sort['Cr Elemental Percents'],\n label='Cr', color='green')\n", (16337, 16435), True, 'import matplotlib.pyplot as plt\n'), ((16431, 16535), 'matplotlib.pyplot.plot', 'plt.plot', (["primary_y_sort['Fsolid']", "primary_y_sort['Fe Elemental Percents']"], {'label': '"""Fe"""', 'color': '"""red"""'}), "(primary_y_sort['Fsolid'], primary_y_sort['Fe Elemental Percents'],\n label='Fe', color='red')\n", (16439, 16535), True, 'import matplotlib.pyplot as plt\n'), ((16531, 16639), 'matplotlib.pyplot.plot', 'plt.plot', (["primary_y_sort['Fsolid']", "primary_y_sort['Ni Elemental Percents']"], {'label': '"""Ni"""', 'color': '"""magenta"""'}), "(primary_y_sort['Fsolid'], primary_y_sort['Ni Elemental Percents'],\n label='Ni', color='magenta')\n", (16539, 16639), True, 'import matplotlib.pyplot as plt\n'), ((17003, 17074), 'matplotlib.pyplot.plot', 'plt.plot', (["primary_y_sort['Fsolid']", 'NEQ_Cr'], {'label': '"""NECr"""', 'color': '"""green"""'}), "(primary_y_sort['Fsolid'], NEQ_Cr, label='NECr', color='green')\n", (17011, 17074), True, 'import matplotlib.pyplot as plt\n'), ((17074, 17143), 'matplotlib.pyplot.plot', 'plt.plot', (["primary_y_sort['Fsolid']", 'NEQ_Fe'], {'label': '"""NEFe"""', 'color': '"""red"""'}), "(primary_y_sort['Fsolid'], NEQ_Fe, label='NEFe', color='red')\n", (17082, 17143), True, 'import matplotlib.pyplot as plt\n'), ((17143, 17216), 'matplotlib.pyplot.plot', 'plt.plot', (["primary_y_sort['Fsolid']", 'NEQ_Ni'], {'label': '"""NENi"""', 'color': '"""magenta"""'}), "(primary_y_sort['Fsolid'], NEQ_Ni, label='NENi', color='magenta')\n", (17151, 17216), True, 'import matplotlib.pyplot as plt\n'), ((17507, 17600), 'matplotlib.pyplot.plot', 'plt.plot', (["primary_y_sort['Fsolid']", 'EQ_Cr'], {'label': '"""ECr"""', 'color': '"""green"""', 'linestyle': '"""dashed"""'}), "(primary_y_sort['Fsolid'], EQ_Cr, label='ECr', color='green',\n linestyle='dashed')\n", (17515, 17600), True, 'import matplotlib.pyplot as plt\n'), ((17596, 17687), 'matplotlib.pyplot.plot', 'plt.plot', (["primary_y_sort['Fsolid']", 'EQ_Fe'], {'label': '"""EFe"""', 'color': '"""red"""', 'linestyle': '"""dashed"""'}), "(primary_y_sort['Fsolid'], EQ_Fe, label='EFe', color='red',\n linestyle='dashed')\n", (17604, 17687), True, 'import matplotlib.pyplot as plt\n'), ((17683, 17778), 'matplotlib.pyplot.plot', 'plt.plot', (["primary_y_sort['Fsolid']", 'EQ_Ni'], {'label': '"""ENi"""', 'color': '"""magenta"""', 'linestyle': '"""dashed"""'}), "(primary_y_sort['Fsolid'], EQ_Ni, label='ENi', color='magenta',\n linestyle='dashed')\n", (17691, 17778), True, 'import matplotlib.pyplot as plt\n'), ((18012, 18040), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Fraction Solid"""'], {}), "('Fraction Solid')\n", (18022, 18040), True, 'import matplotlib.pyplot as plt\n'), ((18044, 18078), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Concentration (wt.%)"""'], {}), "('Concentration (wt.%)')\n", (18054, 18078), True, 'import matplotlib.pyplot as plt\n'), ((18132, 18148), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1.0)'], {}), '(0, 1.0)\n', (18140, 18148), True, 'import matplotlib.pyplot as plt\n'), ((18149, 18165), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(20)', '(45)'], {}), '(20, 45)\n', (18157, 18165), True, 'import matplotlib.pyplot as plt\n'), ((18196, 18206), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18204, 18206), True, 'import matplotlib.pyplot as plt\n'), ((18260, 18331), 'matplotlib.pyplot.figure', 'figure', ([], {'num': 'None', 'figsize': '(6, 4)', 'dpi': '(100)', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(num=None, figsize=(6, 4), dpi=100, facecolor='w', edgecolor='k')\n", (18266, 18331), False, 'from matplotlib.pyplot import figure\n'), ((18435, 18541), 'matplotlib.pyplot.plot', 'plt.plot', (["primary_y_sort['Fsolid']", "primary_y_sort['Cr Elemental Percents']"], {'label': '"""Cr"""', 'color': '"""green"""'}), "(primary_y_sort['Fsolid'], primary_y_sort['Cr Elemental Percents'],\n label='Cr', color='green')\n", (18443, 18541), True, 'import matplotlib.pyplot as plt\n'), ((18537, 18641), 'matplotlib.pyplot.plot', 'plt.plot', (["primary_y_sort['Fsolid']", "primary_y_sort['Fe Elemental Percents']"], {'label': '"""Fe"""', 'color': '"""red"""'}), "(primary_y_sort['Fsolid'], primary_y_sort['Fe Elemental Percents'],\n label='Fe', color='red')\n", (18545, 18641), True, 'import matplotlib.pyplot as plt\n'), ((18637, 18745), 'matplotlib.pyplot.plot', 'plt.plot', (["primary_y_sort['Fsolid']", "primary_y_sort['Ni Elemental Percents']"], {'label': '"""Ni"""', 'color': '"""magenta"""'}), "(primary_y_sort['Fsolid'], primary_y_sort['Ni Elemental Percents'],\n label='Ni', color='magenta')\n", (18645, 18745), True, 'import matplotlib.pyplot as plt\n'), ((19109, 19180), 'matplotlib.pyplot.plot', 'plt.plot', (["primary_y_sort['Fsolid']", 'NEQ_Cr'], {'label': '"""NECr"""', 'color': '"""green"""'}), "(primary_y_sort['Fsolid'], NEQ_Cr, label='NECr', color='green')\n", (19117, 19180), True, 'import matplotlib.pyplot as plt\n'), ((19180, 19249), 'matplotlib.pyplot.plot', 'plt.plot', (["primary_y_sort['Fsolid']", 'NEQ_Fe'], {'label': '"""NEFe"""', 'color': '"""red"""'}), "(primary_y_sort['Fsolid'], NEQ_Fe, label='NEFe', color='red')\n", (19188, 19249), True, 'import matplotlib.pyplot as plt\n'), ((19249, 19322), 'matplotlib.pyplot.plot', 'plt.plot', (["primary_y_sort['Fsolid']", 'NEQ_Ni'], {'label': '"""NENi"""', 'color': '"""magenta"""'}), "(primary_y_sort['Fsolid'], NEQ_Ni, label='NENi', color='magenta')\n", (19257, 19322), True, 'import matplotlib.pyplot as plt\n'), ((19613, 19706), 'matplotlib.pyplot.plot', 'plt.plot', (["primary_y_sort['Fsolid']", 'EQ_Cr'], {'label': '"""ECr"""', 'color': '"""green"""', 'linestyle': '"""dashed"""'}), "(primary_y_sort['Fsolid'], EQ_Cr, label='ECr', color='green',\n linestyle='dashed')\n", (19621, 19706), True, 'import matplotlib.pyplot as plt\n'), ((19702, 19793), 'matplotlib.pyplot.plot', 'plt.plot', (["primary_y_sort['Fsolid']", 'EQ_Fe'], {'label': '"""EFe"""', 'color': '"""red"""', 'linestyle': '"""dashed"""'}), "(primary_y_sort['Fsolid'], EQ_Fe, label='EFe', color='red',\n linestyle='dashed')\n", (19710, 19793), True, 'import matplotlib.pyplot as plt\n'), ((19789, 19884), 'matplotlib.pyplot.plot', 'plt.plot', (["primary_y_sort['Fsolid']", 'EQ_Ni'], {'label': '"""ENi"""', 'color': '"""magenta"""', 'linestyle': '"""dashed"""'}), "(primary_y_sort['Fsolid'], EQ_Ni, label='ENi', color='magenta',\n linestyle='dashed')\n", (19797, 19884), True, 'import matplotlib.pyplot as plt\n'), ((20118, 20146), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Fraction Solid"""'], {}), "('Fraction Solid')\n", (20128, 20146), True, 'import matplotlib.pyplot as plt\n'), ((20150, 20184), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Concentration (wt.%)"""'], {}), "('Concentration (wt.%)')\n", (20160, 20184), True, 'import matplotlib.pyplot as plt\n'), ((20238, 20254), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1.0)'], {}), '(0, 1.0)\n', (20246, 20254), True, 'import matplotlib.pyplot as plt\n'), ((20255, 20271), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(20)', '(45)'], {}), '(20, 45)\n', (20263, 20271), True, 'import matplotlib.pyplot as plt\n'), ((20302, 20312), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (20310, 20312), True, 'import matplotlib.pyplot as plt\n'), ((20337, 20408), 'matplotlib.pyplot.figure', 'figure', ([], {'num': 'None', 'figsize': '(6, 4)', 'dpi': '(100)', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(num=None, figsize=(6, 4), dpi=100, facecolor='w', edgecolor='k')\n", (20343, 20408), False, 'from matplotlib.pyplot import figure\n'), ((20410, 20515), 'matplotlib.pyplot.plot', 'plt.plot', (["primary_y_sort['Fsolid']", "primary_y_sort['Si Elemental Percents']"], {'label': '"""Si"""', 'color': '"""blue"""'}), "(primary_y_sort['Fsolid'], primary_y_sort['Si Elemental Percents'],\n label='Si', color='blue')\n", (20418, 20515), True, 'import matplotlib.pyplot as plt\n'), ((20511, 20616), 'matplotlib.pyplot.plot', 'plt.plot', (["primary_y_sort['Fsolid']", "primary_y_sort['Nb Elemental Percents']"], {'label': '"""Nb"""', 'color': '"""cyan"""'}), "(primary_y_sort['Fsolid'], primary_y_sort['Nb Elemental Percents'],\n label='Nb', color='cyan')\n", (20519, 20616), True, 'import matplotlib.pyplot as plt\n'), ((20700, 20806), 'matplotlib.pyplot.plot', 'plt.plot', (["primary_y_sort['Fsolid']", "primary_y_sort['Mn Elemental Percents']"], {'label': '"""Mn"""', 'color': '"""black"""'}), "(primary_y_sort['Fsolid'], primary_y_sort['Mn Elemental Percents'],\n label='Mn', color='black')\n", (20708, 20806), True, 'import matplotlib.pyplot as plt\n'), ((20806, 20876), 'matplotlib.pyplot.plot', 'plt.plot', (["primary_y_sort['Fsolid']", 'NEQ_Si'], {'label': '"""NESi"""', 'color': '"""blue"""'}), "(primary_y_sort['Fsolid'], NEQ_Si, label='NESi', color='blue')\n", (20814, 20876), True, 'import matplotlib.pyplot as plt\n'), ((20876, 20946), 'matplotlib.pyplot.plot', 'plt.plot', (["primary_y_sort['Fsolid']", 'NEQ_Nb'], {'label': '"""NENb"""', 'color': '"""cyan"""'}), "(primary_y_sort['Fsolid'], NEQ_Nb, label='NENb', color='cyan')\n", (20884, 20946), True, 'import matplotlib.pyplot as plt\n'), ((21003, 21074), 'matplotlib.pyplot.plot', 'plt.plot', (["primary_y_sort['Fsolid']", 'NEQ_Mn'], {'label': '"""NEMn"""', 'color': '"""black"""'}), "(primary_y_sort['Fsolid'], NEQ_Mn, label='NEMn', color='black')\n", (21011, 21074), True, 'import matplotlib.pyplot as plt\n'), ((21076, 21168), 'matplotlib.pyplot.plot', 'plt.plot', (["primary_y_sort['Fsolid']", 'EQ_Si'], {'label': '"""ESi"""', 'color': '"""blue"""', 'linestyle': '"""dashed"""'}), "(primary_y_sort['Fsolid'], EQ_Si, label='ESi', color='blue',\n linestyle='dashed')\n", (21084, 21168), True, 'import matplotlib.pyplot as plt\n'), ((21164, 21256), 'matplotlib.pyplot.plot', 'plt.plot', (["primary_y_sort['Fsolid']", 'EQ_Nb'], {'label': '"""ENb"""', 'color': '"""cyan"""', 'linestyle': '"""dashed"""'}), "(primary_y_sort['Fsolid'], EQ_Nb, label='ENb', color='cyan',\n linestyle='dashed')\n", (21172, 21256), True, 'import matplotlib.pyplot as plt\n'), ((21307, 21400), 'matplotlib.pyplot.plot', 'plt.plot', (["primary_y_sort['Fsolid']", 'EQ_Mn'], {'label': '"""EMn"""', 'color': '"""black"""', 'linestyle': '"""dashed"""'}), "(primary_y_sort['Fsolid'], EQ_Mn, label='EMn', color='black',\n linestyle='dashed')\n", (21315, 21400), True, 'import matplotlib.pyplot as plt\n'), ((21400, 21428), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Fraction Solid"""'], {}), "('Fraction Solid')\n", (21410, 21428), True, 'import matplotlib.pyplot as plt\n'), ((21432, 21466), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Concentration (wt.%)"""'], {}), "('Concentration (wt.%)')\n", (21442, 21466), True, 'import matplotlib.pyplot as plt\n'), ((21520, 21536), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(1.0)'], {}), '(0, 1.0)\n', (21528, 21536), True, 'import matplotlib.pyplot as plt\n'), ((21585, 21595), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (21593, 21595), True, 'import matplotlib.pyplot as plt\n'), ((22351, 22364), 'prettytable.PrettyTable', 'PrettyTable', ([], {}), '()\n', (22362, 22364), False, 'from prettytable import PrettyTable\n'), ((23271, 23360), 'matplotlib.pyplot.bar', 'plt.bar', (['r1', 'bars1'], {'color': '"""#7f6d5f"""', 'width': 'barWidth', 'edgecolor': '"""white"""', 'label': '"""Kmean"""'}), "(r1, bars1, color='#7f6d5f', width=barWidth, edgecolor='white',\n label='Kmean')\n", (23278, 23360), True, 'import matplotlib.pyplot as plt\n'), ((23358, 23445), 'matplotlib.pyplot.bar', 'plt.bar', (['r2', 'bars2'], {'color': '"""#557f2d"""', 'width': 'barWidth', 'edgecolor': '"""white"""', 'label': '"""KC0"""'}), "(r2, bars2, color='#557f2d', width=barWidth, edgecolor='white',\n label='KC0')\n", (23365, 23445), True, 'import matplotlib.pyplot as plt\n'), ((23443, 23533), 'matplotlib.pyplot.bar', 'plt.bar', (['r3', 'bars3'], {'color': '"""#2d7f5e"""', 'width': 'barWidth', 'edgecolor': '"""white"""', 'label': '"""K_line"""'}), "(r3, bars3, color='#2d7f5e', width=barWidth, edgecolor='white',\n label='K_line')\n", (23450, 23533), True, 'import matplotlib.pyplot as plt\n'), ((23580, 23620), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Element"""'], {'fontweight': '"""bold"""'}), "('Element', fontweight='bold')\n", (23590, 23620), True, 'import matplotlib.pyplot as plt\n'), ((23622, 23680), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Partition Coefficient (k)"""'], {'fontweight': '"""bold"""'}), "('Partition Coefficient (k)', fontweight='bold')\n", (23632, 23680), True, 'import matplotlib.pyplot as plt\n'), ((23775, 23804), 'matplotlib.pyplot.axhline', 'plt.axhline', (['(1)'], {'color': '"""black"""'}), "(1, color='black')\n", (23786, 23804), True, 'import matplotlib.pyplot as plt\n'), ((23871, 23887), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1.2)'], {}), '(0, 1.2)\n', (23879, 23887), True, 'import matplotlib.pyplot as plt\n'), ((23888, 23900), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (23898, 23900), True, 'import matplotlib.pyplot as plt\n'), ((23902, 23912), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (23910, 23912), True, 'import matplotlib.pyplot as plt\n'), ((14994, 15011), 'statsmodels.api.OLS', 'sm.OLS', (['lnCsSi', 'X'], {}), '(lnCsSi, X)\n', (15000, 15011), True, 'import statsmodels.api as sm\n'), ((15073, 15090), 'statsmodels.api.OLS', 'sm.OLS', (['lnCsCr', 'X'], {}), '(lnCsCr, X)\n', (15079, 15090), True, 'import statsmodels.api as sm\n'), ((15124, 15141), 'statsmodels.api.OLS', 'sm.OLS', (['lnCsNb', 'X'], {}), '(lnCsNb, X)\n', (15130, 15141), True, 'import statsmodels.api as sm\n'), ((15175, 15192), 'statsmodels.api.OLS', 'sm.OLS', (['lnCsMn', 'X'], {}), '(lnCsMn, X)\n', (15181, 15192), True, 'import statsmodels.api as sm\n'), ((15226, 15243), 'statsmodels.api.OLS', 'sm.OLS', (['lnCsNi', 'X'], {}), '(lnCsNi, X)\n', (15232, 15243), True, 'import statsmodels.api as sm\n'), ((15277, 15294), 'statsmodels.api.OLS', 'sm.OLS', (['lnCsFe', 'X'], {}), '(lnCsFe, X)\n', (15283, 15294), True, 'import statsmodels.api as sm\n'), ((12183, 12196), 'numpy.log', 'np.log', (['(1 - F)'], {}), '(1 - F)\n', (12189, 12196), True, 'import numpy as np\n')] |
import numpy as np
def flux(x):
return 0.5 * np.square(x)
def minf(a,b):
# if b<=0:
# return flux(b)
# elif a>=0:
# return flux(a)
# else:
# return 0.0
return (b <= 0) * flux(b) + (a >= 0) * flux(a)
def maxf(a,b):
return np.maximum(flux(a),flux(b))
| [
"numpy.square"
] | [((50, 62), 'numpy.square', 'np.square', (['x'], {}), '(x)\n', (59, 62), True, 'import numpy as np\n')] |
import numpy as np
import zmq
from meta_mb.logger import logger
import gym
from gym import spaces
from meta_mb.meta_envs.base import MetaEnv
import time
class PR2Env(MetaEnv, gym.utils.EzPickle):
PR2_GAINS = np.array([3.09, 1.08, 0.393, 0.674, 0.111, 0.152, 0.098])
def __init__(self):
# self.goal = np.array([-0.1511672, 0.43030036, 0.71051866])
# self.goal = np.array([-7.29517469e-02, -2.86581420e-02, 5.70482330e-01, -8.47117285e-02,
# -1.18948075e-02, 5.98804157e-01, -5.13613156e-02, -8.77137857e-02,
# 5.85055245e-01])
# self.goal = np.array([0.1644276, -0.31147851, 1.52381236,
# -0.90102611, -4.98011356, -1.66494068, -1.01992367])
self.goal = [np.array([ 5.96785857e-01, -2.85932172e-01, 1.59162625e+00, -1.10704422e+00,
-5.06300837e+00, -1.71918953e+00, -6.13503858e-01, 2.79299305e-01, 3.57783994e-01,
1.16489066e-01])
]
context = zmq.Context()
self.socket = context.socket(zmq.REQ)
print("Connecting to the server...")
self.socket.connect("tcp://127.0.0.1:7777")
max_torques = np.array([5] * 7)
self.frame_skip = 4
# self.init_qpos = np.array([0.8, 0.4, 1.5, -1., -1.7, -.5, 0.])
# self.init_qpos = np.array([0.7783511, -0.25606883, 1.12741523,
# -0.87482262, -7.52093116, -0.09122304, 3.15171159])
# self.init_qpos = np.array([7.10011717e-01, -3.56398411e-01, 9.63204825e-01, -9.12897313e-01,
# -4.66548326e+00, -2.05669173e+00, -2.77487280e-01])
# self.init_qpos = np.array([0.5, -0.5, 1, -0.5, -5, 0, 1])
# self.init_qpos = np.array([0.7, -0.2, 1.1, -0.8, -7.5, 0, 3])
# self.init_qpos = np.array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
self.init_qpos = np.array([0,0,0,0,0,0,0])
self.act_dim = 7
self.obs_dim = 23 - 6
self._t = 0
# self.obs_dim = 4
self._init_obs = self.reset(real=True).copy()
self._low, self._high = -max_torques, max_torques
gym.utils.EzPickle.__init__(self)
def step(self, action):
ob = self.do_simulation(action, self.frame_skip)
# time.sleep(1 / 20)
# reward_dist = -np.linalg.norm(ob[-3:] - self.goal)
# reward_dist = -np.linalg.norm(ob - self.goal[self.idx])
# reward_ctrl = -np.square(ob[7:14]).sum()
reward_dist = np.exp(-np.linalg.norm(ob[:5] - self.goal[self.idx][:5]))
reward_vel = .5 * np.exp(-np.linalg.norm(ob[7:14]))
reward_gripper = 2 * np.exp(-np.linalg.norm(np.concatenate([ob[5:7], ob[-3:]], axis=-1) - self.goal[self.idx][5:]))
reward = reward_dist + reward_vel + reward_gripper
done = False
self._t += 1
return ob, reward, done, dict(reward_dist=reward_dist) # , reward_ctrl=reward_ctrl)
def do_simulation(self, action, frame_skip):
assert frame_skip > 0
if action.ndim == 2:
action = action.reshape(-1)
action = np.clip(action, self._low, self._high)
# action = np.concatenate([[0] * 5, action])
for _ in range(frame_skip):
md = dict(
dtype=str(action.dtype),
cmd="action",
)
self.socket.send_json(md, 0 | zmq.SNDMORE)
self.socket.send(action, 0, copy=True, track=False)
ob = self._get_obs()
return ob
def reward(self, obs, act, obs_next):
assert obs.ndim == act.ndim == obs_next.ndim
if obs.ndim == 2:
assert obs.shape == obs_next.shape and act.shape[0] == obs.shape[0]
# reward_ctrl = -0.5 * 0.1 * np.sum(np.square(act/(2 * self._high)), axis=1)
# reward_ctrl = -0.5 * 0.1 * np.sum(np.square(obs[:, 7:14]), axis=1)
# reward_dist = -np.linalg.norm(obs_next[:,-3:] - self.goal, axis=1)
reward_dist = np.exp(-np.linalg.norm(obs_next[:, :5] - self.goal[self.idx][:5], axis=1))
reward_vel = .5 * np.exp(-np.linalg.norm(obs_next[:, 7:14], axis=1))
reward_gripper = 2 * np.exp(-np.linalg.norm(np.concatenate([obs_next[:, 5:7], obs_next[:, -3:]],
axis=-1) - self.goal[self.idx][5:], axis=1))
reward = reward_dist + reward_vel + reward_gripper
return np.clip(reward, -100, 100)
elif obs.ndim == 1:
assert obs.shape == obs_next.shape
# reward_ctrl = -0.5 * 0.1 * np.sum(np.square(act/(2 * self._high)))
# reward_ctrl = -0.5 * 0.1 * np.sum(np.square(obs[7:14]))
# reward_dist = -np.linalg.norm(obs_next[-3:] - self.goal)
reward_dist = np.exp(-np.linalg.norm(obs_next[:5] - self.goal[self.idx][:5]))
reward_vel = .5 * np.exp(-np.linalg.norm(obs_next[7:14]))
reward_gripper = 2 * np.exp(-np.linalg.norm(np.concatenate([obs_next[5:7], obs_next[-3:]], axis=-1) - self.goal[self.idx][5:]))
reward = reward_dist + reward_vel + reward_gripper
return np.clip(reward, -100, 100)
else:
raise NotImplementedError
def reset(self, real=False):
self._t = 0
if real:
print('real')
qpos = self.init_qpos + np.random.uniform(-0.01, 0.01, size=len(self.init_qpos))
md = dict(
dtype=str(qpos.dtype),
cmd="reset",
)
self.socket.send_json(md, 0 | zmq.SNDMORE)
self.socket.send(qpos, 0, copy=True, track=False)
return self._get_obs()
else:
return self._init_obs + np.random.uniform(-0.01, 0.01, size=len(self._init_obs))
def _get_obs(self):
msg = self.socket.recv(flags=0, copy=True, track=False)
buf = memoryview(msg)
obs = np.frombuffer(buf, dtype=np.float64)
# return np.concatenate([obs.reshape(-1)[:2], obs.reshape(-1)[7:9]])
return obs[:-6]
def log_diagnostics(self, paths, prefix=''):
dist = [-path["env_infos"]['reward_dist'] for path in paths]
final_dist = [-path["env_infos"]['reward_dist'][-1] for path in paths]
# ctrl_cost = [-path["env_infos"]['reward_ctrl'] for path in paths]
logger.logkv(prefix + 'AvgDistance', np.mean(dist))
logger.logkv(prefix + 'AvgFinalDistance', np.mean(final_dist))
# logger.logkv(prefix + 'AvgCtrlCost', np.mean(ctrl_cost))
@property
def action_space(self):
return spaces.Box(low=self._low, high=self._high, dtype=np.float32)
@property
def observation_space(self):
low = np.ones(self.obs_dim) * -1e6
high = np.ones(self.obs_dim) * 1e6
return spaces.Box(low=low, high=high, dtype=np.float32)
@property
def idx(self):
return 0
# if self._t < 10:
# return 0
# else:
# return 1
if __name__ == "__main__":
env = PR2Env()
# print("reset!")
# obs = env.reset()
# obs = np.expand_dims(, axis=0)
print(env._init_obs)
# print("reset done!")
# for _ in range(100):
# print("action!")
# a = env.action_space.sample() * 0
# env.step(a)
# env.reward(obs, np.expand_dims(a, axis=0), obs)
# print("action done!")
# Init:
#
# [ 5.42730494e-01 1.52270862e-02 9.43007182e-01 -8.68156264e-01
# -5.32638623e+00 -1.53867780e+00 8.99776899e-01 2.31858976e-11
# -6.93889390e-17 0.00000000e+00 8.80117496e-03 0.00000000e+00
# 0.00000000e+00 0.00000000e+00 -1.48569878e-01 2.43122203e-01
# 1.87660681e-01 -5.87167355e-02 2.69877152e-01 2.89092061e-01
# -1.58908432e-01 2.16395001e-01 3.14027421e-01]
#
# End:
# [ 1.42125719e-01 -1.45503268e-01 9.30820215e-01 -1.06374839e+00
# -4.73241234e+00 -1.44477962e-01 1.58286694e+00 0.00000000e+00
# -7.12379766e-09 0.00000000e+00 0.00000000e+00 0.00000000e+00
# 0.00000000e+00 0.00000000e+00 -7.29517469e-02 -2.86581420e-02
# 5.70482330e-01 -8.47117285e-02 -1.18948075e-02 5.98804157e-01
# -5.13613156e-02 -8.77137857e-02 5.85055245e-01]
# Init from plate position
# [ 0.7783511 -0.25606883 1.12741523 -0.87482262 -7.52093116 -0.09122304
# 3.15171159 0. 0. 0. 0. 0.
# 0. 0. -0.03861735 0.5429763 0.55299989 -0.02305524
# 0.59228718 0.64473468 -0.01265096 0.46321 0.64747213]
#PLATE position:
# 0.1644276 -0.31147851 1.52381236 -0.90102611 -4.98011356 -1.66494068
# -1.01992367 0. 0. 0. 0. 0.
# 0. 0. -0.22708802 0.07970474 0.15733524 -0.05736825
# 0.17155499 0.17208294 -0.10596121 0.02212625 0.24058344]
"""
Lego:
1. [ 0.74187219 -0.16986661 0.96786218 -0.76494165 -4.5891251 -1.94265812
3.26905514 0. 0. 0. 0. 0.
0. 0. -0.16659146 0.49554746 0.21704888 -0.19025537
0.58158067 0.20974212 -0.14368087 0.57942362 0.26686146]
2. [ 3.28085262e-01 -3.16469607e-01 1.18802936e+00 -9.21583556e-01
-4.85452754e+00 -1.87099893e+00 2.64370143e+00 0.00000000e+00
-1.11022302e-15 0.00000000e+00 0.00000000e+00 0.00000000e+00
0.00000000e+00 0.00000000e+00 -1.45424603e-01 1.37123813e-01
2.56431151e-01 -1.69998881e-01 2.24088020e-01 2.55860864e-01
-1.23184919e-01 2.21378144e-01 3.08828806e-01]
3. [0.12629056, -0.33922564, 1.25569909, -0.83081232, -4.90531728, -1.8157426,
2.34105339, 0., 0., 0., 0., 0.,
0., 0., -0.14435956, 0.01431401, 0.25546218, -0.17143094,
0.08309022, 0.26141724, -0.13745477, 0.09015565, 0.30613478]
4. [-0.06936906,-0.29151411, 1.52926443, -0.57891128, -4.95552855, -1.87387052,
2.36106749, 0., 0., 0., 0., 0.,
0., 0., -0.13345914, -0.02537312, 0.16194666, -0.1599033,
0.03485722, 0.18399872, -0.13409751, 0.04803587, 0.23454963]
5. [-1.43238858e-01, -2.00743753e-01, 1.39055750e+00, -3.84339446e-01,
-4.77573980e+00, -1.93996057e+00, 2.49555356e+00, 0.00000000e+00,
-1.83213444e-10, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, -1.22261587e-01, -2.89328340e-02,
1.21728281e-01, -1.47726911e-01, 3.27933283e-02, 1.51349782e-01,
-1.21627484e-01, 4.52869719e-02, 1.98203254e-01]
"""
"""
[ 0.03492746 -0.44285442 1.51306859 -0.80359543 -5.16447223 -1.86651751
2.31072767 0. 0. 0. 0. 0.
0. 0. -0.19274069 -0.03007735 0.2464475 -0.21953061
0.0431868 0.26590113 -0.18416306 0.04940512 0.31146071]
[ 0.06096014 -0.22290762 1.43930537 -0.87134812 -5.04669556 -1.87256525
2.30224343 0. 0. 0. 0. 0.
0. 0. -0.1915515 -0.01845283 0.12999443 -0.21631334
0.056541 0.16264397 -0.18166899 0.06205027 0.20291775]
"""
"""
Init: [ 6.08807068e-01, -3.93620177e-01, 1.25922689e+00, -9.09422816e-01,
-4.91272171e+00, -1.93638719e+00, 2.68064616e+00, 0.00000000e+00,
0.00000000e+00, -1.01669442e-05, 0.00000000e+00, 0.00000000e+00,
-2.86612146e-02, -2.86612146e-02, -1.68403580e-01, 3.50214633e-01,
2.73749418e-01, -1.95250427e-01, 4.13470716e-01, 2.77695352e-01,
-1.65989693e-01, 4.25103612e-01, 3.34286505e-01]
Goal: [ 4.08587588e-01 -1.46010837e-01 1.28889254e+00 -8.87996750e-01
-4.80784494e+00 -2.00043797e+00 2.48319703e+00 0.00000000e+00
0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00
0.00000000e+00 8.88178420e-15 -1.51403786e-01 2.53451761e-01
1.17586076e-01 -1.76419679e-01 3.10666054e-01 1.47617230e-01
-1.53969955e-01 3.25107895e-01 1.94429943e-01]
"""
"""
Init (2): ([7.10011717e-01, -3.56398411e-01, 9.63204825e-01, -9.12897313e-01,
-4.66548326e+00, -2.05669173e+00, -2.77487280e-01
Goal (2) : [ 5.96785857e-01, -2.85932172e-01, 1.59162625e+00, -1.10704422e+00,
-5.06300837e+00, -1.71918953e+00, -6.13503858e-01, -1.58354262e-03,
8.21743149e-03, -9.93711846e-02, -8.94682723e-03, 1.73478727e-02,
-2.01818272e-02, -1.85441385e-02, -2.79299305e-01, 3.57783994e-01,
1.16489066e-01, -1.07182981e-01, 4.33240193e-01, 1.20192246e-01,
-1.68514010e-01, 2.89484657e-01, 1.87359024e-01]
"""
"""
obs for init_qpos=np.array([0,0,0,0,0,0,0])
[ 0.09859975 0.09922984 0.80846948 -0.15038998 0.10019115 -0.09034941
0.10817561 0. 0. 0. 0. 0.
0. 0. 0.1448004 0.26479295 0.17570619]
[ 0.48759759 -0.31427014 1.13511226 -0.5531421 -4.65912008 -1.99247238
0.99332108 0. 0. 0. 0. 0.
0. 0. -0.16828345 0.40697539 0.26332118]
obs for init_qpos=np.array([0.1,0.1,0.1,0.1,0.1,0.1,0.1])
[-8.76432427e-03 -9.05165677e-03 8.67159349e-01 -1.51692912e-01
-7.23088587e-03 -8.43016724e-02 -4.66166996e-04 0.00000000e+00
0.00000000e+00 0.00000000e+00 0.00000000e+00 0.00000000e+00
0.00000000e+00 0.00000000e+00 1.39332611e-01 1.52058032e-01
2.73096161e-01]
obs for init_qpos=np.array([0.5, -0.5, 1, -0.5, -5, 0, 1])
[ 7.05061651e-01 -2.03366195e-01 1.12436849e+00 -8.07938547e-01
-7.50843619e+00 -9.20897691e-02 3.00325915e+00 0.00000000e+00
0.00000000e+00 2.61271893e-09 0.00000000e+00 0.00000000e+00
0.00000000e+00 0.00000000e+00 -4.40685751e-02 4.99549120e-01
5.37356806e-01]
obs for init_qpos=np.array([0.7, -0.2, 1.1, -0.8, -7.5, 0, 3])
[ 0.46595897 -0.32120692 1.06359401 -0.51101382 -4.52624532 -1.9935601
0.99031896 0. 0. 0. 0. 0.
0. 0. -0.16006932 0.41632504 0.27408067]
""" | [
"numpy.clip",
"numpy.mean",
"numpy.ones",
"gym.spaces.Box",
"numpy.array",
"gym.utils.EzPickle.__init__",
"numpy.concatenate",
"numpy.linalg.norm",
"numpy.frombuffer",
"zmq.Context"
] | [((214, 271), 'numpy.array', 'np.array', (['[3.09, 1.08, 0.393, 0.674, 0.111, 0.152, 0.098]'], {}), '([3.09, 1.08, 0.393, 0.674, 0.111, 0.152, 0.098])\n', (222, 271), True, 'import numpy as np\n'), ((1011, 1024), 'zmq.Context', 'zmq.Context', ([], {}), '()\n', (1022, 1024), False, 'import zmq\n'), ((1190, 1207), 'numpy.array', 'np.array', (['([5] * 7)'], {}), '([5] * 7)\n', (1198, 1207), True, 'import numpy as np\n'), ((1868, 1899), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0, 0])\n', (1876, 1899), True, 'import numpy as np\n'), ((2118, 2151), 'gym.utils.EzPickle.__init__', 'gym.utils.EzPickle.__init__', (['self'], {}), '(self)\n', (2145, 2151), False, 'import gym\n'), ((3069, 3107), 'numpy.clip', 'np.clip', (['action', 'self._low', 'self._high'], {}), '(action, self._low, self._high)\n', (3076, 3107), True, 'import numpy as np\n'), ((5890, 5926), 'numpy.frombuffer', 'np.frombuffer', (['buf'], {'dtype': 'np.float64'}), '(buf, dtype=np.float64)\n', (5903, 5926), True, 'import numpy as np\n'), ((6559, 6619), 'gym.spaces.Box', 'spaces.Box', ([], {'low': 'self._low', 'high': 'self._high', 'dtype': 'np.float32'}), '(low=self._low, high=self._high, dtype=np.float32)\n', (6569, 6619), False, 'from gym import spaces\n'), ((6769, 6817), 'gym.spaces.Box', 'spaces.Box', ([], {'low': 'low', 'high': 'high', 'dtype': 'np.float32'}), '(low=low, high=high, dtype=np.float32)\n', (6779, 6817), False, 'from gym import spaces\n'), ((788, 934), 'numpy.array', 'np.array', (['[0.596785857, -0.285932172, 1.59162625, -1.10704422, -5.06300837, -\n 1.71918953, -0.613503858, 0.279299305, 0.357783994, 0.116489066]'], {}), '([0.596785857, -0.285932172, 1.59162625, -1.10704422, -5.06300837, \n -1.71918953, -0.613503858, 0.279299305, 0.357783994, 0.116489066])\n', (796, 934), True, 'import numpy as np\n'), ((4417, 4443), 'numpy.clip', 'np.clip', (['reward', '(-100)', '(100)'], {}), '(reward, -100, 100)\n', (4424, 4443), True, 'import numpy as np\n'), ((6348, 6361), 'numpy.mean', 'np.mean', (['dist'], {}), '(dist)\n', (6355, 6361), True, 'import numpy as np\n'), ((6413, 6432), 'numpy.mean', 'np.mean', (['final_dist'], {}), '(final_dist)\n', (6420, 6432), True, 'import numpy as np\n'), ((6682, 6703), 'numpy.ones', 'np.ones', (['self.obs_dim'], {}), '(self.obs_dim)\n', (6689, 6703), True, 'import numpy as np\n'), ((6726, 6747), 'numpy.ones', 'np.ones', (['self.obs_dim'], {}), '(self.obs_dim)\n', (6733, 6747), True, 'import numpy as np\n'), ((2475, 2523), 'numpy.linalg.norm', 'np.linalg.norm', (['(ob[:5] - self.goal[self.idx][:5])'], {}), '(ob[:5] - self.goal[self.idx][:5])\n', (2489, 2523), True, 'import numpy as np\n'), ((5123, 5149), 'numpy.clip', 'np.clip', (['reward', '(-100)', '(100)'], {}), '(reward, -100, 100)\n', (5130, 5149), True, 'import numpy as np\n'), ((2559, 2583), 'numpy.linalg.norm', 'np.linalg.norm', (['ob[7:14]'], {}), '(ob[7:14])\n', (2573, 2583), True, 'import numpy as np\n'), ((3962, 4027), 'numpy.linalg.norm', 'np.linalg.norm', (['(obs_next[:, :5] - self.goal[self.idx][:5])'], {'axis': '(1)'}), '(obs_next[:, :5] - self.goal[self.idx][:5], axis=1)\n', (3976, 4027), True, 'import numpy as np\n'), ((4067, 4108), 'numpy.linalg.norm', 'np.linalg.norm', (['obs_next[:, 7:14]'], {'axis': '(1)'}), '(obs_next[:, 7:14], axis=1)\n', (4081, 4108), True, 'import numpy as np\n'), ((4775, 4829), 'numpy.linalg.norm', 'np.linalg.norm', (['(obs_next[:5] - self.goal[self.idx][:5])'], {}), '(obs_next[:5] - self.goal[self.idx][:5])\n', (4789, 4829), True, 'import numpy as np\n'), ((2637, 2680), 'numpy.concatenate', 'np.concatenate', (['[ob[5:7], ob[-3:]]'], {'axis': '(-1)'}), '([ob[5:7], ob[-3:]], axis=-1)\n', (2651, 2680), True, 'import numpy as np\n'), ((4869, 4899), 'numpy.linalg.norm', 'np.linalg.norm', (['obs_next[7:14]'], {}), '(obs_next[7:14])\n', (4883, 4899), True, 'import numpy as np\n'), ((4166, 4227), 'numpy.concatenate', 'np.concatenate', (['[obs_next[:, 5:7], obs_next[:, -3:]]'], {'axis': '(-1)'}), '([obs_next[:, 5:7], obs_next[:, -3:]], axis=-1)\n', (4180, 4227), True, 'import numpy as np\n'), ((4957, 5012), 'numpy.concatenate', 'np.concatenate', (['[obs_next[5:7], obs_next[-3:]]'], {'axis': '(-1)'}), '([obs_next[5:7], obs_next[-3:]], axis=-1)\n', (4971, 5012), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
损失函数
Authors: dongrenguang(<EMAIL>)
Date: 2021/10/17
"""
import numpy as np
from ..core import Node
from ..operator import SoftMax
class LossFunction(Node):
"""损失函数抽象类
"""
pass
class PerceptionLoss(LossFunction):
"""感知机损失
"""
def compute(self):
# 输入为正时为0,输入为负时为输入的相反数
self.value = np.mat(np.where(
self.parents[0].value >= 0.0, 0.0, -self.parents[0].value))
def get_jacobi_with_parent(self, parent):
# 雅可比矩阵为对角阵,每个对角线元素对应一个父节点元素。若父节点元素大于0,则相应对角线元素(偏导数)为0,否则为-1。
diag = np.where(parent.value >= 0.0, 0.0, -1)
return np.diag(diag.ravel())
class LogLoss(LossFunction):
"""LogLoss
"""
def compute(self):
assert len(self.parents) == 1
x = self.parents[0].value
self.value = np.log(1 + np.power(np.e, np.where(-x > 1e2, 1e2, -x)))
def get_jacobi_with_parent(self, parent):
x = parent.value
diag = -1 / (1 + np.power(np.e, np.where(x > 1e2, 1e2, x)))
return np.diag(diag.ravel())
class CrossEntropyWithSoftMax(LossFunction):
"""对第一个父节点施加SoftMax之后,再以第二个父节点为标签One-Hot向量计算交叉熵
"""
def compute(self):
prob = SoftMax.softmax(self.parents[0].value)
self.value = np.mat(-np.sum(np.multiply(self.parents[1].value, np.log(prob + 1e-10))))
def get_jacobi_with_parent(self, parent):
prob = SoftMax.softmax(self.parents[0].value)
if parent is self.parents[0]:
return (prob - self.parents[1].value).T
else:
return (-np.log(prob)).T
| [
"numpy.where",
"numpy.log"
] | [((575, 613), 'numpy.where', 'np.where', (['(parent.value >= 0.0)', '(0.0)', '(-1)'], {}), '(parent.value >= 0.0, 0.0, -1)\n', (583, 613), True, 'import numpy as np\n'), ((361, 428), 'numpy.where', 'np.where', (['(self.parents[0].value >= 0.0)', '(0.0)', '(-self.parents[0].value)'], {}), '(self.parents[0].value >= 0.0, 0.0, -self.parents[0].value)\n', (369, 428), True, 'import numpy as np\n'), ((847, 878), 'numpy.where', 'np.where', (['(-x > 100.0)', '(100.0)', '(-x)'], {}), '(-x > 100.0, 100.0, -x)\n', (855, 878), True, 'import numpy as np\n'), ((989, 1018), 'numpy.where', 'np.where', (['(x > 100.0)', '(100.0)', 'x'], {}), '(x > 100.0, 100.0, x)\n', (997, 1018), True, 'import numpy as np\n'), ((1559, 1571), 'numpy.log', 'np.log', (['prob'], {}), '(prob)\n', (1565, 1571), True, 'import numpy as np\n'), ((1309, 1329), 'numpy.log', 'np.log', (['(prob + 1e-10)'], {}), '(prob + 1e-10)\n', (1315, 1329), True, 'import numpy as np\n')] |
import numpy as np
X = np.array([
[1,0,0],
[-1,10,0],
[-1,-1,0],
])
y = np.array([-1,1,1])
def perceptron_sgd(X, Y):
w = np.zeros(len(X[0]))
eta = 1
epochs = 20
for t in range(epochs):
for i, x in enumerate(X):
if (np.dot(X[i], w)*Y[i]) <= 0:
print('mistake')
w = w + eta*X[i]*Y[i]
print(w)
return w
w = perceptron_sgd(X,y)
print(w) | [
"numpy.array",
"numpy.dot"
] | [((24, 71), 'numpy.array', 'np.array', (['[[1, 0, 0], [-1, 10, 0], [-1, -1, 0]]'], {}), '([[1, 0, 0], [-1, 10, 0], [-1, -1, 0]])\n', (32, 71), True, 'import numpy as np\n'), ((91, 111), 'numpy.array', 'np.array', (['[-1, 1, 1]'], {}), '([-1, 1, 1])\n', (99, 111), True, 'import numpy as np\n'), ((272, 287), 'numpy.dot', 'np.dot', (['X[i]', 'w'], {}), '(X[i], w)\n', (278, 287), True, 'import numpy as np\n')] |
import argparse
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Normal
from torch.autograd import grad
from torch.utils.data.sampler import BatchSampler, SubsetRandomSampler
from tensorboardX import SummaryWriter
#CPU or GPU
device = 'cuda' if torch.cuda.is_available() else 'cpu'
#device = 'cpu'
parser = argparse.ArgumentParser()
parser.add_argument('--tau', default=0.005, type=float) # target smoothing coefficient
parser.add_argument('--target_update_interval', default=1, type=int)
parser.add_argument('--gradient_steps', default=1, type=int)
parser.add_argument('--learning_rate', default=3e-4, type=int)
parser.add_argument('--gamma', default=0.99, type=int) # discount gamma
parser.add_argument('--capacity', default=400000, type=int) # replay buffer size
parser.add_argument('--iteration', default=100000, type=int) # num of games
parser.add_argument('--batch_size', default=512, type=int) # mini batch size
parser.add_argument('--seed', default=1, type=int)
# optional parameters
parser.add_argument('--num_hidden_layers', default=2, type=int)
parser.add_argument('--num_hidden_units_per_layer', default=256, type=int)
parser.add_argument('--sample_frequency', default=256, type=int)
parser.add_argument('--activation', default='Relu', type=str)
parser.add_argument('--render', default=False, type=bool) # show UI or not
parser.add_argument('--log_interval', default=2000, type=int) #
parser.add_argument('--load', default=True, type=bool) # load model
args = parser.parse_args()
min_Val = torch.tensor(1e-7).float().to(device)
class Replay_buffer():
def __init__(self, capacity,state_dim,action_dim):
self.capacity = capacity
self.state_pool = torch.zeros(self.capacity, state_dim).float().to(device)
self.action_pool = torch.zeros(self.capacity, action_dim).float().to(device)
self.reward_pool = torch.zeros(self.capacity, 1).float().to(device)
self.next_state_pool = torch.zeros(self.capacity, state_dim).float().to(device)
self.done_pool = torch.zeros(self.capacity, 1).float().to(device)
self.num_transition = 0
def push(self, s, a, r, s_, d):
index = self.num_transition % self.capacity
s = torch.tensor(s).float().to(device)
a = torch.tensor(a).float().to(device)
r = torch.tensor(r).float().to(device)
s_ = torch.tensor(s_).float().to(device)
d = torch.tensor(d).float().to(device)
for pool, ele in zip([self.state_pool, self.action_pool, self.reward_pool, self.next_state_pool, self.done_pool],
[s, a, r, s_, d]):
pool[index] = ele
self.num_transition += 1
def sample(self, batch_size):
index = np.random.choice(range(self.capacity), batch_size, replace=False)
bn_s, bn_a, bn_r, bn_s_, bn_d = self.state_pool[index], self.action_pool[index], self.reward_pool[index],\
self.next_state_pool[index], self.done_pool[index]
return bn_s, bn_a, bn_r, bn_s_, bn_d
class Actor(nn.Module):
def __init__(self, state_dim, action_dim ,min_log_std=-20, max_log_std=2):##max and min left to modify
super(Actor, self).__init__()
self.fc1 = nn.Linear(state_dim, 512)
self.fc2 = nn.Linear(512, 256)
self.mu_head = nn.Linear(256, action_dim)
self.log_std_head = nn.Linear(256, action_dim)
self.min_log_std = min_log_std
self.max_log_std = max_log_std
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
mu = self.mu_head(x)
log_std_head = self.log_std_head(x)
log_std_head = torch.clamp(log_std_head, self.min_log_std, self.max_log_std) ##give a resitriction on the chosen action
return mu, log_std_head
class Critic(nn.Module):
def __init__(self, state_dim):
super(Critic, self).__init__()
self.fc1 = nn.Linear(state_dim, 256)
self.fc2 = nn.Linear(256, 256)
self.fc3 = nn.Linear(256, 1)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
class Q(nn.Module):
def __init__(self, state_dim, action_dim):
super(Q, self).__init__()
self.state_dim = state_dim
self.action_dim = action_dim
self.fc1 = nn.Linear(state_dim + action_dim, 256)
self.fc2 = nn.Linear(256, 256)
self.fc3 = nn.Linear(256, 1)
def forward(self, s, a):
s = s.reshape(-1, self.state_dim)
a = a.reshape(-1, self.action_dim)
x = torch.cat((s, a), -1) # combination s and a
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
class SACAgent():
def __init__(self, state_dim = 45, action_dim=21):
super(SACAgent, self).__init__()
self.policy_net = Actor(state_dim=state_dim, action_dim = action_dim).to(device)
self.value_net = Critic(state_dim).to(device)
self.Target_value_net = Critic(state_dim).to(device)
self.Q_net1 = Q(state_dim, action_dim).to(device)
self.Q_net2 = Q(state_dim, action_dim).to(device)
self.policy_optimizer = optim.Adam(self.policy_net.parameters(), lr=args.learning_rate)
self.value_optimizer = optim.Adam(self.value_net.parameters(), lr=args.learning_rate)
self.Q1_optimizer = optim.Adam(self.Q_net1.parameters(), lr=args.learning_rate)
self.Q2_optimizer = optim.Adam(self.Q_net2.parameters(), lr=args.learning_rate)
self.replay_buffer = Replay_buffer(args.capacity,state_dim,action_dim)
self.num_transition = 0
self.num_training = 0
self.writer = SummaryWriter('./exp-SAC_dual_Q_network')
self.value_criterion = nn.MSELoss()
self.Q1_criterion = nn.MSELoss()
self.Q2_criterion = nn.MSELoss()
for target_param, param in zip(self.Target_value_net.parameters(), self.value_net.parameters()):
target_param.data.copy_(param.data)
self.steer_range = (-0.8,0.8)
self.throttle_range = (0.6,1.0)
def select_action(self, state):
state = torch.FloatTensor(state).to(device)
mu, log_sigma = self.policy_net(state)
sigma = torch.exp(log_sigma)
dist = Normal(mu, sigma)
z = dist.sample()
steer = float(torch.tanh(z[0,0]).detach().cpu().numpy())
throttle = float(torch.tanh(z[0,1]).detach().cpu().numpy())
steer = (steer + 1)/2 * (self.steer_range[1] - self.steer_range[0]) + self.steer_range[0]
throttle = (throttle + 1)/2 * (self.throttle_range[1] - self.throttle_range[0]) + self.throttle_range[0]
return np.array([steer, throttle])
def test(self, state):
state = torch.FloatTensor(state).to(device)
mu, log_sigma = self.policy_net(state)
action = mu
steer = float(torch.tanh(action[0,0]).detach().cpu().numpy())
throttle = float(torch.tanh(action[0,1]).detach().cpu().numpy())
steer = (steer + 1)/2 * (self.steer_range[1] - self.steer_range[0]) + self.steer_range[0]
throttle = (throttle + 1)/2 * (self.throttle_range[1] - self.throttle_range[0]) + self.throttle_range[0]
return np.array([steer, throttle])
def evaluate(self, state):
batch = state.size()[0]
batch_mu, batch_log_sigma = self.policy_net(state)
batch_sigma = torch.exp(batch_log_sigma)
dist = Normal(batch_mu, batch_sigma)
noise = Normal(0, 1)
z = noise.sample()
action = torch.tanh(batch_mu + batch_sigma * z.to(device))
log_prob = dist.log_prob(batch_mu + batch_sigma * z.to(device)) - torch.log(1 - action.pow(2) + min_Val)
log_prob_0 = log_prob[:,0].reshape(batch,1)
log_prob_1 = log_prob[:,1].reshape(batch,1)
log_prob = log_prob_0 + log_prob_1
return action, log_prob, z, batch_mu, batch_log_sigma
def update(self):
if self.num_training % 500 == 0:
print("**************************Train Start************************")
print("Training ... \t{} times ".format(self.num_training))
for _ in range(args.gradient_steps):
bn_s, bn_a, bn_r, bn_s_, bn_d = self.replay_buffer.sample(args.batch_size)
target_value = self.Target_value_net(bn_s_)
next_q_value = bn_r + (1 - bn_d) * args.gamma * target_value
excepted_value = self.value_net(bn_s)
excepted_Q1 = self.Q_net1(bn_s, bn_a)
excepted_Q2 = self.Q_net2(bn_s, bn_a)
sample_action, log_prob, z, batch_mu, batch_log_sigma = self.evaluate(bn_s)
excepted_new_Q = torch.min(self.Q_net1(bn_s, sample_action), self.Q_net2(bn_s, sample_action))
next_value = excepted_new_Q - log_prob
V_loss = self.value_criterion(excepted_value, next_value.detach()).mean() # J_V
# Dual Q net
Q1_loss = self.Q1_criterion(excepted_Q1, next_q_value.detach()).mean() # J_Q
Q2_loss = self.Q2_criterion(excepted_Q2, next_q_value.detach()).mean()
pi_loss = (log_prob - excepted_new_Q).mean() # according to original paper
self.writer.add_scalar('Loss/V_loss', V_loss, global_step=self.num_training)
self.writer.add_scalar('Loss/Q1_loss', Q1_loss, global_step=self.num_training)
self.writer.add_scalar('Loss/Q2_loss', Q2_loss, global_step=self.num_training)
self.writer.add_scalar('Loss/policy_loss', pi_loss, global_step=self.num_training)
# mini batch gradient descent
self.value_optimizer.zero_grad()
V_loss.backward(retain_graph=True)
nn.utils.clip_grad_norm_(self.value_net.parameters(), 0.5)
self.value_optimizer.step()
self.Q1_optimizer.zero_grad()
Q1_loss.backward(retain_graph = True)
nn.utils.clip_grad_norm_(self.Q_net1.parameters(), 0.5)
self.Q1_optimizer.step()
self.Q2_optimizer.zero_grad()
Q2_loss.backward(retain_graph = True)
nn.utils.clip_grad_norm_(self.Q_net2.parameters(), 0.5)
self.Q2_optimizer.step()
self.policy_optimizer.zero_grad()
pi_loss.backward(retain_graph = True)
nn.utils.clip_grad_norm_(self.policy_net.parameters(), 0.5)
self.policy_optimizer.step()
# update target v net update
for target_param, param in zip(self.Target_value_net.parameters(), self.value_net.parameters()):
target_param.data.copy_(target_param * (1 - args.tau) + param * args.tau)
self.num_training += 1
def save(self,epoch, capacity):
os.makedirs('./SAC_model_' +str(capacity) , exist_ok=True)
torch.save(self.policy_net.state_dict(), './SAC_model_' +str(capacity)+ '/policy_net_' + str(epoch) + '.pth')
torch.save(self.value_net.state_dict(), './SAC_model_' +str(capacity)+ '/value_net_'+ str(epoch) +'.pth')
torch.save(self.Q_net1.state_dict(), './SAC_model_' +str(capacity)+'/Q_net1_' + str(epoch) + '.pth')
torch.save(self.Q_net2.state_dict(), './SAC_model_' +str(capacity)+'/Q_net2_' + str(epoch) + '.pth')
print("====================================")
print("Model has been saved...")
print("====================================")
def load(self, epoch, capacity):
dir = './SAC_model_' + str(capacity) + '/'
self.policy_net.load_state_dict(torch.load( dir + 'policy_net_' + str(epoch) + '.pth'))
self.value_net.load_state_dict(torch.load( dir + 'value_net_'+ str(epoch) + '.pth'))
self.Q_net1.load_state_dict(torch.load( dir + 'Q_net1_' + str(epoch) + '.pth'))
self.Q_net2.load_state_dict(torch.load( dir + 'Q_net2_' + str(epoch) + '.pth'))
print("====================================")
print("model has been loaded...")
print("====================================") | [
"torch.tanh",
"torch.distributions.Normal",
"tensorboardX.SummaryWriter",
"argparse.ArgumentParser",
"torch.FloatTensor",
"torch.exp",
"torch.nn.MSELoss",
"numpy.array",
"torch.cuda.is_available",
"torch.tensor",
"torch.nn.Linear",
"torch.zeros",
"torch.clamp",
"torch.cat"
] | [((418, 443), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (441, 443), False, 'import argparse\n'), ((354, 379), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (377, 379), False, 'import torch\n'), ((3324, 3349), 'torch.nn.Linear', 'nn.Linear', (['state_dim', '(512)'], {}), '(state_dim, 512)\n', (3333, 3349), True, 'import torch.nn as nn\n'), ((3369, 3388), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(256)'], {}), '(512, 256)\n', (3378, 3388), True, 'import torch.nn as nn\n'), ((3412, 3438), 'torch.nn.Linear', 'nn.Linear', (['(256)', 'action_dim'], {}), '(256, action_dim)\n', (3421, 3438), True, 'import torch.nn as nn\n'), ((3467, 3493), 'torch.nn.Linear', 'nn.Linear', (['(256)', 'action_dim'], {}), '(256, action_dim)\n', (3476, 3493), True, 'import torch.nn as nn\n'), ((3760, 3821), 'torch.clamp', 'torch.clamp', (['log_std_head', 'self.min_log_std', 'self.max_log_std'], {}), '(log_std_head, self.min_log_std, self.max_log_std)\n', (3771, 3821), False, 'import torch\n'), ((4017, 4042), 'torch.nn.Linear', 'nn.Linear', (['state_dim', '(256)'], {}), '(state_dim, 256)\n', (4026, 4042), True, 'import torch.nn as nn\n'), ((4062, 4081), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(256)'], {}), '(256, 256)\n', (4071, 4081), True, 'import torch.nn as nn\n'), ((4101, 4118), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(1)'], {}), '(256, 1)\n', (4110, 4118), True, 'import torch.nn as nn\n'), ((4455, 4493), 'torch.nn.Linear', 'nn.Linear', (['(state_dim + action_dim)', '(256)'], {}), '(state_dim + action_dim, 256)\n', (4464, 4493), True, 'import torch.nn as nn\n'), ((4513, 4532), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(256)'], {}), '(256, 256)\n', (4522, 4532), True, 'import torch.nn as nn\n'), ((4552, 4569), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(1)'], {}), '(256, 1)\n', (4561, 4569), True, 'import torch.nn as nn\n'), ((4697, 4718), 'torch.cat', 'torch.cat', (['(s, a)', '(-1)'], {}), '((s, a), -1)\n', (4706, 4718), False, 'import torch\n'), ((5823, 5864), 'tensorboardX.SummaryWriter', 'SummaryWriter', (['"""./exp-SAC_dual_Q_network"""'], {}), "('./exp-SAC_dual_Q_network')\n", (5836, 5864), False, 'from tensorboardX import SummaryWriter\n'), ((5897, 5909), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (5907, 5909), True, 'import torch.nn as nn\n'), ((5938, 5950), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (5948, 5950), True, 'import torch.nn as nn\n'), ((5979, 5991), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (5989, 5991), True, 'import torch.nn as nn\n'), ((6377, 6397), 'torch.exp', 'torch.exp', (['log_sigma'], {}), '(log_sigma)\n', (6386, 6397), False, 'import torch\n'), ((6422, 6439), 'torch.distributions.Normal', 'Normal', (['mu', 'sigma'], {}), '(mu, sigma)\n', (6428, 6439), False, 'from torch.distributions import Normal\n'), ((6845, 6872), 'numpy.array', 'np.array', (['[steer, throttle]'], {}), '([steer, throttle])\n', (6853, 6872), True, 'import numpy as np\n'), ((7402, 7429), 'numpy.array', 'np.array', (['[steer, throttle]'], {}), '([steer, throttle])\n', (7410, 7429), True, 'import numpy as np\n'), ((7575, 7601), 'torch.exp', 'torch.exp', (['batch_log_sigma'], {}), '(batch_log_sigma)\n', (7584, 7601), False, 'import torch\n'), ((7617, 7646), 'torch.distributions.Normal', 'Normal', (['batch_mu', 'batch_sigma'], {}), '(batch_mu, batch_sigma)\n', (7623, 7646), False, 'from torch.distributions import Normal\n'), ((7663, 7675), 'torch.distributions.Normal', 'Normal', (['(0)', '(1)'], {}), '(0, 1)\n', (7669, 7675), False, 'from torch.distributions import Normal\n'), ((1621, 1640), 'torch.tensor', 'torch.tensor', (['(1e-07)'], {}), '(1e-07)\n', (1633, 1640), False, 'import torch\n'), ((6278, 6302), 'torch.FloatTensor', 'torch.FloatTensor', (['state'], {}), '(state)\n', (6295, 6302), False, 'import torch\n'), ((6918, 6942), 'torch.FloatTensor', 'torch.FloatTensor', (['state'], {}), '(state)\n', (6935, 6942), False, 'import torch\n'), ((1797, 1834), 'torch.zeros', 'torch.zeros', (['self.capacity', 'state_dim'], {}), '(self.capacity, state_dim)\n', (1808, 1834), False, 'import torch\n'), ((1881, 1919), 'torch.zeros', 'torch.zeros', (['self.capacity', 'action_dim'], {}), '(self.capacity, action_dim)\n', (1892, 1919), False, 'import torch\n'), ((1966, 1995), 'torch.zeros', 'torch.zeros', (['self.capacity', '(1)'], {}), '(self.capacity, 1)\n', (1977, 1995), False, 'import torch\n'), ((2046, 2083), 'torch.zeros', 'torch.zeros', (['self.capacity', 'state_dim'], {}), '(self.capacity, state_dim)\n', (2057, 2083), False, 'import torch\n'), ((2128, 2157), 'torch.zeros', 'torch.zeros', (['self.capacity', '(1)'], {}), '(self.capacity, 1)\n', (2139, 2157), False, 'import torch\n'), ((2310, 2325), 'torch.tensor', 'torch.tensor', (['s'], {}), '(s)\n', (2322, 2325), False, 'import torch\n'), ((2357, 2372), 'torch.tensor', 'torch.tensor', (['a'], {}), '(a)\n', (2369, 2372), False, 'import torch\n'), ((2404, 2419), 'torch.tensor', 'torch.tensor', (['r'], {}), '(r)\n', (2416, 2419), False, 'import torch\n'), ((2452, 2468), 'torch.tensor', 'torch.tensor', (['s_'], {}), '(s_)\n', (2464, 2468), False, 'import torch\n'), ((2500, 2515), 'torch.tensor', 'torch.tensor', (['d'], {}), '(d)\n', (2512, 2515), False, 'import torch\n'), ((6489, 6508), 'torch.tanh', 'torch.tanh', (['z[0, 0]'], {}), '(z[0, 0])\n', (6499, 6508), False, 'import torch\n'), ((6557, 6576), 'torch.tanh', 'torch.tanh', (['z[0, 1]'], {}), '(z[0, 1])\n', (6567, 6576), False, 'import torch\n'), ((7052, 7076), 'torch.tanh', 'torch.tanh', (['action[0, 0]'], {}), '(action[0, 0])\n', (7062, 7076), False, 'import torch\n'), ((7125, 7149), 'torch.tanh', 'torch.tanh', (['action[0, 1]'], {}), '(action[0, 1])\n', (7135, 7149), False, 'import torch\n')] |
"""
Alternative implementations of segmented regression routines.
"""
# Author: <NAME>
# License: BSD 3 clause
import numpy as np
from segreg.model.alt import regression_alt, one_bkpt_segreg_alt,\
likelihood_util
try:
from numba import jit
except ImportError as e:
from segreg.mockjit import jit
# cache can fill up and also cause issues; only turn on if stable
_CACHE_NUMBA = False
##########################################################################
# Hessian
##########################################################################
def ols_terms(indep, dep, u1, u2):
"""
"""
index1 = np.searchsorted(indep, u1, side='right')
index2 = np.searchsorted(indep, u2, side='right')
indep1 = indep[0:index1]
dep1 = dep[0:index1]
indep2 = indep[index1:index2]
dep2 = dep[index1:index2]
indep3 = indep[index2:]
dep3 = dep[index2:]
ols_terms_1 = regression_alt.ols_terms(indep1, dep1)
ols_terms_2 = regression_alt.ols_terms(indep2, dep2)
ols_terms_3 = regression_alt.ols_terms(indep3, dep3)
return ols_terms_1, ols_terms_2, ols_terms_3
def rss(params, ols_data1, ols_data2, ols_data3):
u1, v1, u2, v2, m1, m2 = params
rss1 = likelihood_util.rss_line_segment([u1, v1, m1], ols_data1)
mid_slope = (v2 - v1) / (u2 - u1)
rss2 = likelihood_util.rss_line_segment([u1, v1, mid_slope], ols_data2)
rss3 = likelihood_util.rss_line_segment([u2, v2, m2], ols_data3)
return rss1 + rss2 + rss3
def two_bkpt_loglik(params, indep, dep):
"""
Parameters
----------
params: list
[u1, v1, u2, v2, m1, m2, residual_variance]
indep: array-like
independent data
dep: array-like
dependent data
"""
u1, v1, u2, v2, m1, m2, resid_variance = params
ols_data1, ols_data2, ols_data3 = ols_terms(indep=indep,
dep=dep,
u1=u1,
u2=u2)
rss_term = rss(params=[u1, v1, u2, v2, m1, m2],
ols_data1=ols_data1,
ols_data2=ols_data2,
ols_data3=ols_data3)
num_data = ols_data1[0] + ols_data2[0] + ols_data3[0]
result = likelihood_util.loglikelihood(rss=rss_term,
resid_variance=resid_variance,
num_data=num_data)
return result
def _two_bkpt_loglik2(params, indep, dep):
"""
Different ordering of params.
Parameters
----------
params: list
[u1, u2, v1, v2, m1, m2, residual_variance]
indep: array-like
independent data
dep: array-like
dependent data
"""
u1, u2, v1, v2, m1, m2, resid_variance = params
ols_data1, ols_data2, ols_data3 = ols_terms(indep=indep,
dep=dep,
u1=u1,
u2=u2)
rss_term = rss(params=[u1, v1, u2, v2, m1, m2],
ols_data1=ols_data1,
ols_data2=ols_data2,
ols_data3=ols_data3)
num_data = ols_data1[0] + ols_data2[0] + ols_data3[0]
result = likelihood_util.loglikelihood(rss=rss_term,
resid_variance=resid_variance,
num_data=num_data)
return result
##########################################################################
# End Hessian
##########################################################################
def segmented_func_impl(x, params):
"""
PARAMETERS
----------
x: array-like (non-scalar)
"""
# TODO: REMEMBER THIS FUNCTION GIVES ODD RESULTS WITH INTEGER INPUT
x_arr = np.array(x, dtype=float)
u1, v1, u2, v2, m1, m2 = params
mid_slope = (v2 - v1) / (u2 - u1)
# we sort the data
argsort_inds = x_arr.argsort()
sorted_arr = x_arr[argsort_inds]
first = sorted_arr[sorted_arr <= u1]
second = sorted_arr[np.logical_and(u1 < sorted_arr, sorted_arr <= u2)]
third = sorted_arr[u2 < sorted_arr]
first_vals = v1 + m1 * (first - u1)
second_vals = v1 + mid_slope * (second - u1)
third_vals = v2 + m2 * (third - u2)
# print "IN FUNC"
# print "------------------------"
# print x_arr <= u1
# print "VAL1: ", v1 + m1 * (x_arr - u1)
# print "------------------------"
# print np.logical_and(u1 < x_arr, x_arr <= u2)
# print "VAL2: ", v1 + mid_slope * (x_arr - u1)
# print "------------------------"
# print u2 < x_arr
# print "VAL: ", v2 + m2*(x_arr-u2)
# print
#
# print "first"
# print first
# print "second"
# print second
# print "third"
# print third
# print "first vals"
# print first_vals
# print "second vals"
# print second_vals
# print "third vals"
# print third_vals
sorted_result = np.append(first_vals, second_vals)
sorted_result = np.append(sorted_result, third_vals)
result = sorted_result[argsort_inds]
return result
# TODO: duped: get rid of this impl
def segmented_func(x, params):
if np.isscalar(x):
result = segmented_func_impl([x], params)
return result[0]
else:
return segmented_func_impl(x, params)
# NOTE: bug in numpy? does not work when three or more conditions and input
# is a scalar
def segmented_funcORIG(x, params):
# TODO: REMEMBER THIS FUNCTION GIVES ODD RESULTS WITH INTEGER INPUT
x_arr = np.array(x, dtype=float)
u1, v1, u2, v2, m1, m2 = params
mid_slope = (v2 - v1) / (u2 - u1)
first = x_arr[x_arr <= u1]
print("IN FUNC")
print(u2 < x_arr)
print("VAL: ", v2 + m2 * (x - u2))
print()
return np.piecewise(x_arr,
[x_arr <= u1,
np.logical_and(u1 < x_arr, x_arr <= u2),
u2 < x_arr],
[lambda z: v1 + m1 * (z - u1),
lambda z: v1 + mid_slope * (z - u1),
lambda z: v2 + m2 * (z - u2)])
@jit(nopython=True)
def fixed_bkpt_ls_regression(indep, dep, u1, u2):
"""
Pure python implementation of the main cython impl:
two_bkpt_segreg.fixed_bkpt_least_squares
Segmented function params: (u1,v1,u2,v2,m1,m2), where (u1,v1) and (u2,v2)
are breakpoints, and m1,m2 are the slope of the line segments in regions
1 and 3 (the slope in region 2 being determined)
This implementation uses the regression formula Section 5.1.1 of
"Segmented Regression" by <NAME>. It gives the same answer as
the method fixed_bkpt_ls
"""
index1 = np.searchsorted(indep, u1, side='right')
index2 = np.searchsorted(indep, u2, side='right')
data_shiftu1 = indep - u1
data_shiftu2 = indep - u2
diff = u2 - u1
data0 = 1.0 - np.copy(data_shiftu1) / diff
data0[0:index1] = 1.0
data0[index2:] = 0.0
data1 = np.copy(data_shiftu1) / diff
data1[0:index1] = 0.0
data1[index2:] = 1.0
data2 = np.copy(data_shiftu1)
data2[index1:] = 0.0
data3 = np.copy(data_shiftu2)
data3[0:index2] = 0.0
data = np.vstack((data0, data1, data2, data3))
data = data.T
# matrix mult by hand faster than canned OLS routines
dep = dep.reshape(-1, 1)
v1, v2, m1, m2 = regression_alt.mat_by_hand_ols(data, dep)
return v1, v2, m1, m2
@jit(nopython=True)
def fixed_bkpt_ls_from_data(indep, dep, u1, u2):
"""
"""
index1 = np.searchsorted(indep, u1, side='right')
index2 = np.searchsorted(indep, u2, side='right')
indep1 = indep[0:index1]
dep1 = dep[0:index1]
indep2 = indep[index1:index2]
dep2 = dep[index1:index2]
indep3 = indep[index2:]
dep3 = dep[index2:]
ols_terms_1 = regression_alt.ols_terms(indep1, dep1)
ols_terms_2 = regression_alt.ols_terms(indep2, dep2)
ols_terms_3 = regression_alt.ols_terms(indep3, dep3)
return fixed_bkpt_ls(ols_terms_1, ols_terms_2, ols_terms_3, u1, u2)
@jit(nopython=True, cache=False)
def fixed_bkpt_ls(ols_terms_1, ols_terms_2, ols_terms_3, u1, u2):
"""
Pure python implementation of the main cython impl:
two_bkpt_segreg.fixed_bkpt_least_squares
Segmented function params: (u1,v1,u2,v2,m1,m2), where (u1,v1) and (u2,v2)
are breakpoints, and m1,m2 are the slope of the line segments in regions
1 and 3 (the slope in region 2 being determined)
NOTES
-----
The notation below follows the document
"Segmented Regression" by <NAME>
"""
num_data_1, sum_x_1, sum_y_1, sum_xx_1, sum_yy_1, sum_xy_1 = ols_terms_1
num_data_2, sum_x_2, sum_y_2, sum_xx_2, sum_yy_2, sum_xy_2 = ols_terms_2
num_data_3, sum_x_3, sum_y_3, sum_xx_3, sum_yy_3, sum_xy_3 = ols_terms_3
u1_sq = u1 * u1
u2_sq = u2 * u2
two_u1 = 2.0 * u1
two_u2 = 2.0 * u2
diff = u2 - u1
diff_sq = diff * diff
A1 = sum_y_1
A2 = sum_y_2
A3 = sum_y_3
B11 = sum_xy_1 - u1 * A1
B22 = sum_xy_2 - u2 * A2
B21 = sum_xy_2 - u1 * A2
B32 = sum_xy_3 - u2 * A3
C11 = sum_x_1 - u1 * num_data_1
C21 = sum_x_2 - u1 * num_data_2
C32 = sum_x_3 - u2 * num_data_3
D11 = sum_xx_1 - two_u1 * sum_x_1 + u1_sq * num_data_1
D22 = sum_xx_2 - two_u2 * sum_x_2 + u2_sq * num_data_2
D21 = sum_xx_2 - two_u1 * sum_x_2 + u1_sq * num_data_2
D32 = sum_xx_3 - two_u2 * sum_x_3 + u2_sq * num_data_3
E = sum_yy_1 + sum_yy_2 + sum_yy_3
F2 = sum_xx_2 - (u1 + u2) * sum_x_2 + u1 * u2 * num_data_2
##
term = D21 / diff_sq
a = -num_data_1 + C11 * C11 / D11 - D22 / diff_sq
b = F2 / diff_sq
c = b
d = -num_data_3 + C32 * C32 / D32 - term
e = -A1 + B11 * C11 / D11 + B22 / diff
f = -A3 + B32 * C32 / D32 - B21 / diff
# v estimates
v1, v2 = regression_alt.invert_two_by_two(a, b, c, d, e, f)
## BEGIN: slopes
m1 = (B11 - v1 * C11) / D11
m2 = (B32 - v2 * C32) / D32
## END: slopes
m = (v2 - v1) / (u2 - u1)
two_v1 = 2.0 * v1
two_v2 = 2.0 * v2
rss = (E - two_v1 * (A1 + A2) - two_v2 * A3
- 2.0 * m1 * B11 - 2.0 * m * B21 - 2.0 * m2 * B32
+ v1 * v1 * (num_data_1 + num_data_2) + v2 * v2 * num_data_3
+ two_v1 * (m1 * C11 + m * C21) + two_v2 * m2 * C32
+ m1 * m1 * D11 + m * m * D21 + m2 * m2 * D32)
return v1, v2, m1, m2, rss
@jit(nopython=True, cache=_CACHE_NUMBA)
def estimate_two_bkpt_segreg(indep,
dep,
num_end_to_skip=3,
num_between_to_skip=4,
verbose=False,
optimize=True):
"""
Estimate two-bkpt segmented regression model.
This method is limited to univariate, continuous, linear, two-bkpt
segmented regression problems. Estimates the parameters:
``[u1, v1, u2, v2, m1, m2]``
where
``(u1,v1), (u2, v2)`` are the breakpoints (in x-y plane), ordered such
that ``u1 < u2``
``m1`` is the slope of the left-most segment
``m2`` is the slope of the right-most segment
Parameters
----------
indep: numpy array of shape (num_data,)
The independent data. Also called predictor, explanatory variable,
regressor, or exogenous variable.
dep: numpy array of shape (num_data,)
The dependent data. Also called response, regressand, or endogenous
variable.
num_end_to_skip: int
Number of data points to skip at each end of the data when solving for
the bkpts. As such, this determines a guaranteed minimum number of data
points in the left and right segments in the returned fit.
If None, defaults to the underlying implementation.
TODO: explain
num_between_to_skip: int
Number of data points to skip between the two bkpts (ie: the middle
segment) when solving for the bkpts. Specifically, for each choice of
left bkpt ``u1``, will skip this many data points between ``u1`` and
``u2``. As such, this determines a guaranteed minimum number of data
points between the bkpts in the returned fit.
verbose: bool
optimize: bool
If True, will implement a few optimizations in the algorithm when
appropriate.
Examples
--------
>>> import numpy as np
>>> from segreg.model.alt import fit_two_bkpt
>>> indep = np.array([1,2,3,4,5,6,7,8,9,10,11,12,13,14])
>>> dep = np.array([1,2,3,4,5,4,3,2,1,0,1,2,3,4])
>>> fit_two_bkpt(indep, dep)
(array([ 5., 5., 10., -0., 1., 1.]), 0.0)
Returns
-------
params: array of shape (num_params,)
The estimated parameters. The returned parameters are, in order,
[u1, v1, u2, v2, m1, m2].
rss: float
Residual sum of squares of the fit.
"""
# TODO: do we need to sort the data?
# TODO: can we raise Exception with Numba?
# if num_between_to_skip < 2:
# pass
#raise Exception("num_between_to_skip must be greater than zero")
min_value = np.inf
min_params = None
# list call gets set out of order, so we call sort here on indices
unique_indep = list(set(indep))
unique_indep_lhs_indices = np.searchsorted(indep, unique_indep)
unique_indep_lhs_indices.sort()
unique_indep = indep[unique_indep_lhs_indices]
# STEP 2. check for local min in intervals between data points
# NOTE: we use array mask with minus, like: x[0:-3]
# these are indices of LHS of allowable intervals to check
num_uniq = len(unique_indep)
index1_begin = num_end_to_skip + 1
index2_end = num_uniq - num_end_to_skip - 3
index1_end = index2_end - num_between_to_skip
############################################################################
# try check near middle
check_near_middle = False
# this does not seem to help
if check_near_middle:
if num_uniq > 20:
# todo: check if violate contrainsts num ends, between
u1_ind = int(num_uniq / 3)
u1 = unique_indep[u1_ind]
u2_ind = int(2 * u1_ind)
u2 = unique_indep[u2_ind]
v1, v2, m1, m2, rss = fixed_bkpt_ls_from_data(indep, dep, u1, u2)
min_params = np.array([u1, v1, u2, v2, m1, m2])
min_value = rss
#print("min val: ", min_value)
############################################################################
if verbose:
print()
print("indep")
print(indep)
print()
print("unique_indep_lhs_indices")
print(unique_indep_lhs_indices)
print()
print()
print("num_end_to_skip: ", num_end_to_skip)
print("num_between_to_skip: ", num_between_to_skip)
print("unique range: ", np.arange(num_uniq))
print("index1_begin: ", index1_begin)
print("index1_end: ", index1_end)
print("index2_end: ", index2_end)
print()
index1_range = np.arange(index1_begin, index1_end + 1)
#print("index1_range: ", index1_range)
for index1 in index1_range:
index2_begin = index1 + num_between_to_skip
index2_range = np.arange(index2_begin, index2_end + 1)
for index2 in index2_range:
# reset for each square
can_skip_lower_left_corner = False
ind1 = unique_indep_lhs_indices[index1 + 1]
ind2 = unique_indep_lhs_indices[index2 + 1]
indep1 = indep[0:ind1]
dep1 = dep[0:ind1]
indep2 = indep[ind1:ind2]
dep2 = dep[ind1:ind2]
indep3 = indep[ind2:]
dep3 = dep[ind2:]
# TODO: put in check that mins actually hit
if len(indep1) < num_end_to_skip:
raise Exception("region one has too few data")
if len(indep2) < num_between_to_skip:
raise Exception("region two has too few data")
if len(indep3) < num_end_to_skip:
raise Exception("region three has too few data")
u1_data = indep1[-1]
u1_data_next = indep2[0]
u2_data = indep2[-1]
u2_data_next = indep3[0]
if verbose:
print()
print("--------------------------------------------------------")
print("INDICES: ", index1, ",", index2)
print()
print("u1 interval: ", u1_data, ", ", u1_data_next)
print("u2 interval: ", u2_data, ", ", u2_data_next)
print()
print("indep1: len: ", len(indep1))
print(indep1)
print("indep2: len: ", len(indep2))
print(indep2)
print("indep3: len: ", len(indep3))
print(indep3)
print()
###################################################################
# interior of square
# check regressions for right place
#
# get out early trick: based on fact that unrestricted regressions
# rss1 + rss2 + rss3 is lower bound for the segmented solution rss
###################################################################
(ols_intercept1,
ols_slope1,
rss1,
ols_terms1) = regression_alt.ols_verbose(indep1, dep1)
# TRICK: get out early if possible
if rss1 > min_value and optimize:
if verbose:
print("OUT EARLY on rss1")
continue
(ols_intercept2,
ols_slope2,
rss2,
ols_terms2) = regression_alt.ols_verbose(indep2, dep2)
# TRICK: get out early if possible
if rss1 + rss2 > min_value and optimize:
if verbose:
print("OUT EARLY on rss1 + rss2")
continue
(ols_intercept3,
ols_slope3,
rss3,
ols_terms3) = regression_alt.ols_verbose(indep3, dep3)
rss = rss1 + rss2 + rss3
if rss > min_value and optimize:
if verbose:
print("OUT EARLY on rss1 + rss2 + rss3")
continue
lhs_slope_diff = ols_slope2 - ols_slope1
rhs_slope_diff = ols_slope3 - ols_slope2
non_zero_slopes = abs(lhs_slope_diff) > 1.0e-14 and abs(rhs_slope_diff) > 1.0e-14
# either or both slopes are zero, then these cases will get covered
# by the square boundary calculations (edges, corners)
if non_zero_slopes:
u1_intersect = (ols_intercept1 - ols_intercept2) / lhs_slope_diff
u2_intersect = (ols_intercept2 - ols_intercept3) / rhs_slope_diff
u1_right_place = ((u1_data < u1_intersect) and
(u1_intersect < u1_data_next))
u2_right_place = ((u2_data < u2_intersect) and
(u2_intersect < u2_data_next))
if u1_right_place and u2_right_place:
if rss < min_value:
min_value = rss
v1 = ols_intercept1 + ols_slope1 * u1_intersect
v2 = ols_intercept2 + ols_slope2 * u2_intersect
min_params = np.array([u1_intersect,
v1,
u2_intersect,
v2,
ols_slope1,
ols_slope3])
if verbose:
print()
print("NEW LOW BOTH IN RIGHT PLACE")
print("params: ", min_params)
print("RSS: ", rss)
print()
print("bndies: ", u1_data, u1_data_next)
print("bndies: ", u2_data, u2_data_next)
continue
###################################################################
# sides of square
###################################################################
##########
# fix u1
##########
(check_min_params,
check_min_value,
can_skip_corners) = _fix_u1_bndy(u1_fixed=u1_data,
ols_terms1=ols_terms1,
ols_terms2=ols_terms2,
ols_intercept3=ols_intercept3,
ols_slope3=ols_slope3,
rss3=rss3,
u2_data=u2_data,
u2_data_next=u2_data_next,
min_value=min_value,
verbose=verbose)
# TODO: maybe not return None
if check_min_params is not None:
min_params = check_min_params
min_value = check_min_value
if can_skip_corners:
can_skip_lower_left_corner = True
# extra side-of-square bndy we need to check
if index2 == index2_range[0]:
# here we need to additionally do with bkpt u1_data_next
if verbose:
print("CHECK EXTRA SIDE BNDY FIX u1_data_next: ",
u1_data_next, "\n",
"left interval: [", u1_data, ", ", u1_data_next, "]",
" ; right interval: [", u2_data, ", ", u2_data_next, "]")
(check_min_params,
check_min_value,
can_skip_corners) = _fix_u1_bndy(u1_fixed=u1_data_next,
ols_terms1=ols_terms1,
ols_terms2=ols_terms2,
ols_intercept3=ols_intercept3,
ols_slope3=ols_slope3,
rss3=rss3,
u2_data=u2_data,
u2_data_next=u2_data_next,
min_value=min_value,
verbose=verbose)
if check_min_params is not None:
min_params = check_min_params
min_value = check_min_value
else:
if not can_skip_corners:
# need to check lower-right corner
(check_min_params,
check_min_value) = _corner(u1=u1_data_next,
u2=u2_data,
ols_terms1=ols_terms1,
ols_terms2=ols_terms2,
ols_terms3=ols_terms3,
min_value=min_value,
verbose=verbose)
if check_min_params is not None:
min_params = check_min_params
min_value = check_min_value
##########
# fix u2
##########
(check_min_params,
check_min_value,
can_skip_corners) = _fix_u2_bndy(u2_fixed=u2_data,
ols_terms2=ols_terms2,
ols_terms3=ols_terms3,
ols_intercept1=ols_intercept1,
ols_slope1=ols_slope1,
rss1=rss1,
u1_data=u1_data,
u1_data_next=u1_data_next,
min_value=min_value,
verbose=verbose)
if check_min_params is not None:
min_params = check_min_params
min_value = check_min_value
if can_skip_corners:
can_skip_lower_left_corner = True
# extra side-of-square bndy we need to check
if index2 == index2_range[-1]:
# here we need to additionally do with bkpt u1_data_next
if verbose:
print("CHECK EXTRA SIDE BNDY FIX u2_data_next: ",
u2_data_next, "\n",
"left interval: [", u1_data, ", ", u1_data_next, "]",
" ; right interval: [", u2_data, ", ", u2_data_next, "]")
print()
(check_min_params,
check_min_value,
can_skip_corners) = _fix_u2_bndy(u2_fixed=u2_data_next,
ols_terms2=ols_terms2,
ols_terms3=ols_terms3,
ols_intercept1=ols_intercept1,
ols_slope1=ols_slope1,
rss1=rss1,
u1_data=u1_data,
u1_data_next=u1_data_next,
min_value=min_value,
verbose=verbose)
if check_min_params is not None:
min_params = check_min_params
min_value = check_min_value
else:
if not can_skip_corners:
# need to check upper-right corner
(check_min_params,
check_min_value) = _corner(u1=u1_data_next,
u2=u2_data_next,
ols_terms1=ols_terms1,
ols_terms2=ols_terms2,
ols_terms3=ols_terms3,
min_value=min_value,
verbose=verbose)
if check_min_params is not None:
min_params = check_min_params
min_value = check_min_value
# only miss one corner at most upper-left, so need to
# check it separately
if index1 == index1_range[0]:
if not can_skip_corners:
# need to check upper-left corner
(check_min_params,
check_min_value) = _corner(u1=u1_data,
u2=u2_data_next,
ols_terms1=ols_terms1,
ols_terms2=ols_terms2,
ols_terms3=ols_terms3,
min_value=min_value,
verbose=verbose)
if check_min_params is not None:
min_params = check_min_params
min_value = check_min_value
###################################################################
# check corner boundaries
###################################################################
if not can_skip_lower_left_corner:
(check_min_params,
check_min_value) = _corner(u1=u1_data,
u2=u2_data,
ols_terms1=ols_terms1,
ols_terms2=ols_terms2,
ols_terms3=ols_terms3,
min_value=min_value,
verbose=verbose)
if check_min_params is not None:
min_params = check_min_params
min_value = check_min_value
# for straight-line data, the fitted rss can sometimes be negative,
# due to noise in the computations
if abs(min_value) < 1.0e-13:
min_value = 0.0
return min_params, min_value
@jit(nopython=True)
def _corner(u1,
u2,
ols_terms1,
ols_terms2,
ols_terms3,
min_value,
verbose):
v1, v2, m1, m2, rss = fixed_bkpt_ls(ols_terms1,
ols_terms2,
ols_terms3,
u1,
u2)
min_params = None
if rss < min_value:
min_value = rss
min_params = np.array([u1, v1, u2, v2, m1, m2])
if verbose:
print()
print("NEW LOW WITH CORNER VALUE")
print("params: ", min_params)
print("RSS: ", rss)
print("CORNER: u1: ", u1, " ; u2: ", u2)
print()
return min_params, min_value
@jit(nopython=True)
def _fix_u1_bndy(u1_fixed,
ols_terms1,
ols_terms2,
ols_intercept3,
ols_slope3,
rss3,
u2_data,
u2_data_next,
min_value,
verbose):
if verbose:
print()
print("CHECK FIXED u1: ", u1_fixed)
print("u2 interval")
print(u2_data, u2_data_next)
v1, m1, m2, rss_12 = one_bkpt_segreg_alt._fixed_bkpt_ls_impl(ols_terms1,
ols_terms2,
u1_fixed)
rss = rss_12 + rss3
if verbose:
print("initial RSS: ", rss)
print()
min_params = None
# corner rss > right place = initial rss above; so if right place already
# greater than current min_value, we can skip corners
can_skip_corners = True
# TODO: what if equals here? (ie: two solutions?)
if rss < min_value:
can_skip_corners = False
slope_diff = ols_slope3 - m2
if abs(slope_diff) > 1.0e-14:
u2_intersect = (v1 - ols_intercept3 - m2 * u1_fixed) / slope_diff
u2_right_place = ((u2_data < u2_intersect) and
(u2_intersect < u2_data_next))
if u2_right_place:
can_skip_corners = True
min_value = rss
v2 = ols_intercept3 + ols_slope3 * u2_intersect
min_params = np.array([u1_fixed,
v1,
u2_intersect,
v2,
m1,
ols_slope3])
if verbose:
print()
print("NEW LOW FIXED u1=", u1_fixed)
print("u2 interval")
print(u2_data, u2_data_next)
print("u2 intersect: ", u2_intersect)
print()
print("RSS: ", rss)
return min_params, min_value, can_skip_corners
@jit(nopython=True)
def _fix_u2_bndy(u2_fixed,
ols_terms2,
ols_terms3,
ols_intercept1,
ols_slope1,
rss1,
u1_data,
u1_data_next,
min_value,
verbose):
if verbose:
print()
print("CHECK FIXED u2: ", u2_fixed)
print("u1 interval")
print(u1_data, u1_data)
v2, m2, m3, rss_23 = one_bkpt_segreg_alt._fixed_bkpt_ls_impl(ols_terms2,
ols_terms3,
u2_fixed)
rss = rss1 + rss_23
min_params = None
# corner rss > right place = initial rss above; so if right place already
# greater than current min_value, we can skip corners
can_skip_corners = True
if rss < min_value:
can_skip_corners = False
slope_diff = m2 - ols_slope1
if abs(slope_diff) > 1.0e-14:
u1_intersect = (ols_intercept1 - v2 + m2 * u2_fixed) / slope_diff
u1_right_place = ((u1_data < u1_intersect) and (u1_intersect < u1_data_next))
if u1_right_place:
can_skip_corners = True
min_value = rss
v1 = ols_intercept1 + ols_slope1 * u1_intersect
min_params = np.array([u1_intersect,
v1,
u2_fixed,
v2,
ols_slope1,
m3])
if verbose:
print()
print("NEW LOW FIXED u2=", u2_fixed)
print("u1 interval")
print(u1_data, u1_data)
print("u1 intersect: ", u1_intersect)
print()
print("RSS: ", rss)
return min_params, min_value, can_skip_corners
| [
"numpy.copy",
"segreg.model.alt.regression_alt.ols_terms",
"numpy.isscalar",
"numpy.logical_and",
"numpy.searchsorted",
"segreg.model.alt.likelihood_util.loglikelihood",
"segreg.model.alt.likelihood_util.rss_line_segment",
"numpy.append",
"numpy.array",
"segreg.model.alt.regression_alt.mat_by_hand... | [((6142, 6160), 'segreg.mockjit.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (6145, 6160), False, 'from segreg.mockjit import jit\n'), ((7457, 7475), 'segreg.mockjit.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (7460, 7475), False, 'from segreg.mockjit import jit\n'), ((8069, 8100), 'segreg.mockjit.jit', 'jit', ([], {'nopython': '(True)', 'cache': '(False)'}), '(nopython=True, cache=False)\n', (8072, 8100), False, 'from segreg.mockjit import jit\n'), ((10428, 10466), 'segreg.mockjit.jit', 'jit', ([], {'nopython': '(True)', 'cache': '_CACHE_NUMBA'}), '(nopython=True, cache=_CACHE_NUMBA)\n', (10431, 10466), False, 'from segreg.mockjit import jit\n'), ((29169, 29187), 'segreg.mockjit.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (29172, 29187), False, 'from segreg.mockjit import jit\n'), ((29981, 29999), 'segreg.mockjit.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (29984, 29999), False, 'from segreg.mockjit import jit\n'), ((32164, 32182), 'segreg.mockjit.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (32167, 32182), False, 'from segreg.mockjit import jit\n'), ((628, 668), 'numpy.searchsorted', 'np.searchsorted', (['indep', 'u1'], {'side': '"""right"""'}), "(indep, u1, side='right')\n", (643, 668), True, 'import numpy as np\n'), ((682, 722), 'numpy.searchsorted', 'np.searchsorted', (['indep', 'u2'], {'side': '"""right"""'}), "(indep, u2, side='right')\n", (697, 722), True, 'import numpy as np\n'), ((913, 951), 'segreg.model.alt.regression_alt.ols_terms', 'regression_alt.ols_terms', (['indep1', 'dep1'], {}), '(indep1, dep1)\n', (937, 951), False, 'from segreg.model.alt import regression_alt, one_bkpt_segreg_alt, likelihood_util\n'), ((970, 1008), 'segreg.model.alt.regression_alt.ols_terms', 'regression_alt.ols_terms', (['indep2', 'dep2'], {}), '(indep2, dep2)\n', (994, 1008), False, 'from segreg.model.alt import regression_alt, one_bkpt_segreg_alt, likelihood_util\n'), ((1027, 1065), 'segreg.model.alt.regression_alt.ols_terms', 'regression_alt.ols_terms', (['indep3', 'dep3'], {}), '(indep3, dep3)\n', (1051, 1065), False, 'from segreg.model.alt import regression_alt, one_bkpt_segreg_alt, likelihood_util\n'), ((1216, 1273), 'segreg.model.alt.likelihood_util.rss_line_segment', 'likelihood_util.rss_line_segment', (['[u1, v1, m1]', 'ols_data1'], {}), '([u1, v1, m1], ols_data1)\n', (1248, 1273), False, 'from segreg.model.alt import regression_alt, one_bkpt_segreg_alt, likelihood_util\n'), ((1324, 1388), 'segreg.model.alt.likelihood_util.rss_line_segment', 'likelihood_util.rss_line_segment', (['[u1, v1, mid_slope]', 'ols_data2'], {}), '([u1, v1, mid_slope], ols_data2)\n', (1356, 1388), False, 'from segreg.model.alt import regression_alt, one_bkpt_segreg_alt, likelihood_util\n'), ((1400, 1457), 'segreg.model.alt.likelihood_util.rss_line_segment', 'likelihood_util.rss_line_segment', (['[u2, v2, m2]', 'ols_data3'], {}), '([u2, v2, m2], ols_data3)\n', (1432, 1457), False, 'from segreg.model.alt import regression_alt, one_bkpt_segreg_alt, likelihood_util\n'), ((2264, 2361), 'segreg.model.alt.likelihood_util.loglikelihood', 'likelihood_util.loglikelihood', ([], {'rss': 'rss_term', 'resid_variance': 'resid_variance', 'num_data': 'num_data'}), '(rss=rss_term, resid_variance=resid_variance,\n num_data=num_data)\n', (2293, 2361), False, 'from segreg.model.alt import regression_alt, one_bkpt_segreg_alt, likelihood_util\n'), ((3275, 3372), 'segreg.model.alt.likelihood_util.loglikelihood', 'likelihood_util.loglikelihood', ([], {'rss': 'rss_term', 'resid_variance': 'resid_variance', 'num_data': 'num_data'}), '(rss=rss_term, resid_variance=resid_variance,\n num_data=num_data)\n', (3304, 3372), False, 'from segreg.model.alt import regression_alt, one_bkpt_segreg_alt, likelihood_util\n'), ((3839, 3863), 'numpy.array', 'np.array', (['x'], {'dtype': 'float'}), '(x, dtype=float)\n', (3847, 3863), True, 'import numpy as np\n'), ((4974, 5008), 'numpy.append', 'np.append', (['first_vals', 'second_vals'], {}), '(first_vals, second_vals)\n', (4983, 5008), True, 'import numpy as np\n'), ((5029, 5065), 'numpy.append', 'np.append', (['sorted_result', 'third_vals'], {}), '(sorted_result, third_vals)\n', (5038, 5065), True, 'import numpy as np\n'), ((5204, 5218), 'numpy.isscalar', 'np.isscalar', (['x'], {}), '(x)\n', (5215, 5218), True, 'import numpy as np\n'), ((5564, 5588), 'numpy.array', 'np.array', (['x'], {'dtype': 'float'}), '(x, dtype=float)\n', (5572, 5588), True, 'import numpy as np\n'), ((6717, 6757), 'numpy.searchsorted', 'np.searchsorted', (['indep', 'u1'], {'side': '"""right"""'}), "(indep, u1, side='right')\n", (6732, 6757), True, 'import numpy as np\n'), ((6771, 6811), 'numpy.searchsorted', 'np.searchsorted', (['indep', 'u2'], {'side': '"""right"""'}), "(indep, u2, side='right')\n", (6786, 6811), True, 'import numpy as np\n'), ((7098, 7119), 'numpy.copy', 'np.copy', (['data_shiftu1'], {}), '(data_shiftu1)\n', (7105, 7119), True, 'import numpy as np\n'), ((7158, 7179), 'numpy.copy', 'np.copy', (['data_shiftu2'], {}), '(data_shiftu2)\n', (7165, 7179), True, 'import numpy as np\n'), ((7218, 7257), 'numpy.vstack', 'np.vstack', (['(data0, data1, data2, data3)'], {}), '((data0, data1, data2, data3))\n', (7227, 7257), True, 'import numpy as np\n'), ((7385, 7426), 'segreg.model.alt.regression_alt.mat_by_hand_ols', 'regression_alt.mat_by_hand_ols', (['data', 'dep'], {}), '(data, dep)\n', (7415, 7426), False, 'from segreg.model.alt import regression_alt, one_bkpt_segreg_alt, likelihood_util\n'), ((7555, 7595), 'numpy.searchsorted', 'np.searchsorted', (['indep', 'u1'], {'side': '"""right"""'}), "(indep, u1, side='right')\n", (7570, 7595), True, 'import numpy as np\n'), ((7609, 7649), 'numpy.searchsorted', 'np.searchsorted', (['indep', 'u2'], {'side': '"""right"""'}), "(indep, u2, side='right')\n", (7624, 7649), True, 'import numpy as np\n'), ((7840, 7878), 'segreg.model.alt.regression_alt.ols_terms', 'regression_alt.ols_terms', (['indep1', 'dep1'], {}), '(indep1, dep1)\n', (7864, 7878), False, 'from segreg.model.alt import regression_alt, one_bkpt_segreg_alt, likelihood_util\n'), ((7897, 7935), 'segreg.model.alt.regression_alt.ols_terms', 'regression_alt.ols_terms', (['indep2', 'dep2'], {}), '(indep2, dep2)\n', (7921, 7935), False, 'from segreg.model.alt import regression_alt, one_bkpt_segreg_alt, likelihood_util\n'), ((7954, 7992), 'segreg.model.alt.regression_alt.ols_terms', 'regression_alt.ols_terms', (['indep3', 'dep3'], {}), '(indep3, dep3)\n', (7978, 7992), False, 'from segreg.model.alt import regression_alt, one_bkpt_segreg_alt, likelihood_util\n'), ((9860, 9910), 'segreg.model.alt.regression_alt.invert_two_by_two', 'regression_alt.invert_two_by_two', (['a', 'b', 'c', 'd', 'e', 'f'], {}), '(a, b, c, d, e, f)\n', (9892, 9910), False, 'from segreg.model.alt import regression_alt, one_bkpt_segreg_alt, likelihood_util\n'), ((13304, 13340), 'numpy.searchsorted', 'np.searchsorted', (['indep', 'unique_indep'], {}), '(indep, unique_indep)\n', (13319, 13340), True, 'import numpy as np\n'), ((15063, 15102), 'numpy.arange', 'np.arange', (['index1_begin', '(index1_end + 1)'], {}), '(index1_begin, index1_end + 1)\n', (15072, 15102), True, 'import numpy as np\n'), ((30450, 30523), 'segreg.model.alt.one_bkpt_segreg_alt._fixed_bkpt_ls_impl', 'one_bkpt_segreg_alt._fixed_bkpt_ls_impl', (['ols_terms1', 'ols_terms2', 'u1_fixed'], {}), '(ols_terms1, ols_terms2, u1_fixed)\n', (30489, 30523), False, 'from segreg.model.alt import regression_alt, one_bkpt_segreg_alt, likelihood_util\n'), ((32629, 32702), 'segreg.model.alt.one_bkpt_segreg_alt._fixed_bkpt_ls_impl', 'one_bkpt_segreg_alt._fixed_bkpt_ls_impl', (['ols_terms2', 'ols_terms3', 'u2_fixed'], {}), '(ols_terms2, ols_terms3, u2_fixed)\n', (32668, 32702), False, 'from segreg.model.alt import regression_alt, one_bkpt_segreg_alt, likelihood_util\n'), ((4103, 4152), 'numpy.logical_and', 'np.logical_and', (['(u1 < sorted_arr)', '(sorted_arr <= u2)'], {}), '(u1 < sorted_arr, sorted_arr <= u2)\n', (4117, 4152), True, 'import numpy as np\n'), ((7005, 7026), 'numpy.copy', 'np.copy', (['data_shiftu1'], {}), '(data_shiftu1)\n', (7012, 7026), True, 'import numpy as np\n'), ((15255, 15294), 'numpy.arange', 'np.arange', (['index2_begin', '(index2_end + 1)'], {}), '(index2_begin, index2_end + 1)\n', (15264, 15294), True, 'import numpy as np\n'), ((29674, 29708), 'numpy.array', 'np.array', (['[u1, v1, u2, v2, m1, m2]'], {}), '([u1, v1, u2, v2, m1, m2])\n', (29682, 29708), True, 'import numpy as np\n'), ((5887, 5926), 'numpy.logical_and', 'np.logical_and', (['(u1 < x_arr)', '(x_arr <= u2)'], {}), '(u1 < x_arr, x_arr <= u2)\n', (5901, 5926), True, 'import numpy as np\n'), ((6912, 6933), 'numpy.copy', 'np.copy', (['data_shiftu1'], {}), '(data_shiftu1)\n', (6919, 6933), True, 'import numpy as np\n'), ((14336, 14370), 'numpy.array', 'np.array', (['[u1, v1, u2, v2, m1, m2]'], {}), '([u1, v1, u2, v2, m1, m2])\n', (14344, 14370), True, 'import numpy as np\n'), ((14876, 14895), 'numpy.arange', 'np.arange', (['num_uniq'], {}), '(num_uniq)\n', (14885, 14895), True, 'import numpy as np\n'), ((17419, 17459), 'segreg.model.alt.regression_alt.ols_verbose', 'regression_alt.ols_verbose', (['indep1', 'dep1'], {}), '(indep1, dep1)\n', (17445, 17459), False, 'from segreg.model.alt import regression_alt, one_bkpt_segreg_alt, likelihood_util\n'), ((17756, 17796), 'segreg.model.alt.regression_alt.ols_verbose', 'regression_alt.ols_verbose', (['indep2', 'dep2'], {}), '(indep2, dep2)\n', (17782, 17796), False, 'from segreg.model.alt import regression_alt, one_bkpt_segreg_alt, likelihood_util\n'), ((18106, 18146), 'segreg.model.alt.regression_alt.ols_verbose', 'regression_alt.ols_verbose', (['indep3', 'dep3'], {}), '(indep3, dep3)\n', (18132, 18146), False, 'from segreg.model.alt import regression_alt, one_bkpt_segreg_alt, likelihood_util\n'), ((31525, 31583), 'numpy.array', 'np.array', (['[u1_fixed, v1, u2_intersect, v2, m1, ols_slope3]'], {}), '([u1_fixed, v1, u2_intersect, v2, m1, ols_slope3])\n', (31533, 31583), True, 'import numpy as np\n'), ((33550, 33608), 'numpy.array', 'np.array', (['[u1_intersect, v1, u2_fixed, v2, ols_slope1, m3]'], {}), '([u1_intersect, v1, u2_fixed, v2, ols_slope1, m3])\n', (33558, 33608), True, 'import numpy as np\n'), ((19466, 19536), 'numpy.array', 'np.array', (['[u1_intersect, v1, u2_intersect, v2, ols_slope1, ols_slope3]'], {}), '([u1_intersect, v1, u2_intersect, v2, ols_slope1, ols_slope3])\n', (19474, 19536), True, 'import numpy as np\n')] |
#DataFormatFunctions
import numpy as np
from PIL import Image
from HelperClasses import *
from ValueDefinitions import *
def createMNISTVector(filename):
image = Image.open(filename).convert("L") #Convert to greyscale
width, height = image.size
assert (width==IMG_WIDTH and height==IMG_HEIGHT)
imgData = image.getdata()
imageVector = np.zeros((IMG_PIXELS, 1))
for pixeli in range(len(imgData)):
pixelValue = (255-imgData[pixeli])/255
pixelValue = pixelValue - .001
imageVector[pixeli] = pixelValue
return imageVector
def createImageVectorFromList(coordsList):
imageVector = np.zeros((IMG_PIXELS, 1))
for coords in coordsList:
drawn_index = coords.getMNISTIndex()
imageVector[drawn_index] = 1
return imageVector
def imgVectorToSquareMatrix(imgVector):
resultMatrix = np.zeros((IMG_HEIGHT,IMG_WIDTH))
for i in range(IMG_WIDTH):
flatIndex = i*IMG_HEIGHT
resultMatrix[i,:] = imgVector[flatIndex:flatIndex+IMG_HEIGHT]
return resultMatrix
def squareMatrixToImgVector(squareMatrix):
result = np.zeros((IMG_PIXELS,1))
i = 0
for y in range(IMG_HEIGHT):
for x in range(IMG_WIDTH):
result[i,0] = squareMatrix[y,x]
i += 1
return result
def printMNISTVectorAsVectorInt(imageVector):
print()
i = 0
for yi in range(IMG_HEIGHT):
curList = []
for xi in range(IMG_WIDTH):
val = int(imageVector[i])
curList.append(val)
i += 1
print(curList)
print()
def getBoundsOfNumber(coordsList):
minX, minY = IMG_WIDTH+1, IMG_HEIGHT+1
maxX, maxY = -1, -1
for coords in coordsList:
curX, curY = coords.x, coords.y
if (curX < minX):
minX = curX
if (curY < minY):
minY = curY
if (curX > maxX):
maxX = curX
if (curY > maxY):
maxY = curY
return EdgeBounds(minX, minY, maxX, maxY)
def createReadableOutputVector(probVector):
result = ""
for i,val in enumerate(probVector):
#entryStr = str(i) + ": " + str(round(val, 3)) + ", "
entryStr = str(round(val, 2)) + ", "
result += entryStr
return result
| [
"numpy.zeros",
"PIL.Image.open"
] | [((359, 384), 'numpy.zeros', 'np.zeros', (['(IMG_PIXELS, 1)'], {}), '((IMG_PIXELS, 1))\n', (367, 384), True, 'import numpy as np\n'), ((623, 648), 'numpy.zeros', 'np.zeros', (['(IMG_PIXELS, 1)'], {}), '((IMG_PIXELS, 1))\n', (631, 648), True, 'import numpy as np\n'), ((838, 871), 'numpy.zeros', 'np.zeros', (['(IMG_HEIGHT, IMG_WIDTH)'], {}), '((IMG_HEIGHT, IMG_WIDTH))\n', (846, 871), True, 'import numpy as np\n'), ((1080, 1105), 'numpy.zeros', 'np.zeros', (['(IMG_PIXELS, 1)'], {}), '((IMG_PIXELS, 1))\n', (1088, 1105), True, 'import numpy as np\n'), ((177, 197), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (187, 197), False, 'from PIL import Image\n')] |
import chess
import numpy as np
class State:
def __init__(self, board=None):
if board is None:
self.board = chess.Board()
else:
self.board = board
def serialize(self) -> np.ndarray:
"""
Convert board into matrix representation for use with numpy.
state[0:63] are board squares (0 = empty, -int=black piece, +int = white piece)
state[64] = turn
state[65] = white kingside castling rights
state[66] = white queenside castling rights
state[67] = black kingside castling rights
state[68] = black queenside castling rights
"""
assert self.board.is_valid()
state = np.zeros(8 * 8 + 5)
# https://stackoverflow.com/questions/55876336/is-there-a-way-to-convert-a-python-chess-board-into-a-list-of-integers
state = np.zeros(64 + 5)
for sq in chess.scan_reversed(
self.board.occupied_co[chess.WHITE]
): # Check if white
state[sq] = self.board.piece_type_at(sq)
for sq in chess.scan_reversed(
self.board.occupied_co[chess.BLACK]
): # Check if black
state[sq] = -self.board.piece_type_at(sq)
# turn
state[64] = float(self.board.turn)
# white kingside castling rights
state[65] = self.board.has_kingside_castling_rights(chess.WHITE)
# white queenside castling rights
state[66] = self.board.has_queenside_castling_rights(chess.WHITE)
# black kingside castling rights
state[67] = self.board.has_queenside_castling_rights(chess.BLACK)
# black queenside castling rights
state[68] = self.board.has_kingside_castling_rights(chess.BLACK)
return state
def edges(self) -> list:
return list(self.board.legal_moves)
| [
"chess.scan_reversed",
"numpy.zeros",
"chess.Board"
] | [((699, 718), 'numpy.zeros', 'np.zeros', (['(8 * 8 + 5)'], {}), '(8 * 8 + 5)\n', (707, 718), True, 'import numpy as np\n'), ((862, 878), 'numpy.zeros', 'np.zeros', (['(64 + 5)'], {}), '(64 + 5)\n', (870, 878), True, 'import numpy as np\n'), ((897, 953), 'chess.scan_reversed', 'chess.scan_reversed', (['self.board.occupied_co[chess.WHITE]'], {}), '(self.board.occupied_co[chess.WHITE])\n', (916, 953), False, 'import chess\n'), ((1066, 1122), 'chess.scan_reversed', 'chess.scan_reversed', (['self.board.occupied_co[chess.BLACK]'], {}), '(self.board.occupied_co[chess.BLACK])\n', (1085, 1122), False, 'import chess\n'), ((134, 147), 'chess.Board', 'chess.Board', ([], {}), '()\n', (145, 147), False, 'import chess\n')] |
import logging
import unittest
import numpy as np
import torch
from reagent.core import types as rlt
from reagent.evaluation.evaluation_data_page import EvaluationDataPage
from reagent.evaluation.ope_adapter import OPEstimatorAdapter
from reagent.ope.estimators.contextual_bandits_estimators import (
DMEstimator,
DoublyRobustEstimator,
IPSEstimator,
SwitchDREstimator,
SwitchEstimator,
)
from reagent.test.evaluation.test_evaluation_data_page import (
FakeSeq2SlateRewardNetwork,
FakeSeq2SlateTransformerNet,
)
logger = logging.getLogger(__name__)
class TestOPEModuleAlgs(unittest.TestCase):
GAMMA = 0.9
CPE_PASS_BAR = 1.0
CPE_MAX_VALUE = 2.0
MAX_HORIZON = 1000
NOISE_EPSILON = 0.3
EPISODES = 2
def test_seq2slate_eval_data_page(self):
"""
Create 3 slate ranking logs and evaluate using Direct Method, Inverse
Propensity Scores, and Doubly Robust.
The logs are as follows:
state: [1, 0, 0], [0, 1, 0], [0, 0, 1]
indices in logged slates: [3, 2], [3, 2], [3, 2]
model output indices: [2, 3], [3, 2], [2, 3]
logged reward: 4, 5, 7
logged propensities: 0.2, 0.5, 0.4
predicted rewards on logged slates: 2, 4, 6
predicted rewards on model outputted slates: 1, 4, 5
predicted propensities: 0.4, 0.3, 0.7
When eval_greedy=True:
Direct Method uses the predicted rewards on model outputted slates.
Thus the result is expected to be (1 + 4 + 5) / 3
Inverse Propensity Scores would scale the reward by 1.0 / logged propensities
whenever the model output slate matches with the logged slate.
Since only the second log matches with the model output, the IPS result
is expected to be 5 / 0.5 / 3
Doubly Robust is the sum of the direct method result and propensity-scaled
reward difference; the latter is defined as:
1.0 / logged_propensities * (logged reward - predicted reward on logged slate)
* Indicator(model slate == logged slate)
Since only the second logged slate matches with the model outputted slate,
the DR result is expected to be (1 + 4 + 5) / 3 + 1.0 / 0.5 * (5 - 4) / 3
When eval_greedy=False:
Only Inverse Propensity Scores would be accurate. Because it would be too
expensive to compute all possible slates' propensities and predicted rewards
for Direct Method.
The expected IPS = (0.4 / 0.2 * 4 + 0.3 / 0.5 * 5 + 0.7 / 0.4 * 7) / 3
"""
batch_size = 3
state_dim = 3
src_seq_len = 2
tgt_seq_len = 2
candidate_dim = 2
reward_net = FakeSeq2SlateRewardNetwork()
seq2slate_net = FakeSeq2SlateTransformerNet()
src_seq = torch.eye(candidate_dim).repeat(batch_size, 1, 1)
tgt_out_idx = torch.LongTensor([[3, 2], [3, 2], [3, 2]])
tgt_out_seq = src_seq[
torch.arange(batch_size).repeat_interleave(tgt_seq_len),
tgt_out_idx.flatten() - 2,
].reshape(batch_size, tgt_seq_len, candidate_dim)
ptb = rlt.PreprocessedTrainingBatch(
training_input=rlt.PreprocessedRankingInput(
state=rlt.FeatureData(float_features=torch.eye(state_dim)),
src_seq=rlt.FeatureData(float_features=src_seq),
tgt_out_seq=rlt.FeatureData(float_features=tgt_out_seq),
src_src_mask=torch.ones(batch_size, src_seq_len, src_seq_len),
tgt_out_idx=tgt_out_idx,
tgt_out_probs=torch.tensor([0.2, 0.5, 0.4]),
slate_reward=torch.tensor([4.0, 5.0, 7.0]),
),
extras=rlt.ExtraData(
sequence_number=torch.tensor([0, 0, 0]),
mdp_id=np.array(["0", "1", "2"]),
),
)
edp = EvaluationDataPage.create_from_tensors_seq2slate(
seq2slate_net, reward_net, ptb.training_input, eval_greedy=True
)
logger.info("---------- Start evaluating eval_greedy=True -----------------")
doubly_robust_estimator = OPEstimatorAdapter(DoublyRobustEstimator())
dm_estimator = OPEstimatorAdapter(DMEstimator())
ips_estimator = OPEstimatorAdapter(IPSEstimator())
switch_estimator = OPEstimatorAdapter(SwitchEstimator())
switch_dr_estimator = OPEstimatorAdapter(SwitchDREstimator())
doubly_robust = doubly_robust_estimator.estimate(edp)
inverse_propensity = ips_estimator.estimate(edp)
direct_method = dm_estimator.estimate(edp)
# Verify that Switch with low exponent is equivalent to IPS
switch_ips = switch_estimator.estimate(edp, exp_base=1)
# Verify that Switch with no candidates is equivalent to DM
switch_dm = switch_estimator.estimate(edp, candidates=0)
# Verify that SwitchDR with low exponent is equivalent to DR
switch_dr_dr = switch_dr_estimator.estimate(edp, exp_base=1)
# Verify that SwitchDR with no candidates is equivalent to DM
switch_dr_dm = switch_dr_estimator.estimate(edp, candidates=0)
logger.info(f"{direct_method}, {inverse_propensity}, {doubly_robust}")
avg_logged_reward = (4 + 5 + 7) / 3
self.assertAlmostEqual(direct_method.raw, (1 + 4 + 5) / 3, delta=1e-6)
self.assertAlmostEqual(
direct_method.normalized, direct_method.raw / avg_logged_reward, delta=1e-6
)
self.assertAlmostEqual(inverse_propensity.raw, 5 / 0.5 / 3, delta=1e-6)
self.assertAlmostEqual(
inverse_propensity.normalized,
inverse_propensity.raw / avg_logged_reward,
delta=1e-6,
)
self.assertAlmostEqual(
doubly_robust.raw, direct_method.raw + 1 / 0.5 * (5 - 4) / 3, delta=1e-6
)
self.assertAlmostEqual(
doubly_robust.normalized, doubly_robust.raw / avg_logged_reward, delta=1e-6
)
self.assertAlmostEqual(switch_ips.raw, inverse_propensity.raw, delta=1e-6)
self.assertAlmostEqual(switch_dm.raw, direct_method.raw, delta=1e-6)
self.assertAlmostEqual(switch_dr_dr.raw, doubly_robust.raw, delta=1e-6)
self.assertAlmostEqual(switch_dr_dm.raw, direct_method.raw, delta=1e-6)
logger.info("---------- Finish evaluating eval_greedy=True -----------------")
logger.info("---------- Start evaluating eval_greedy=False -----------------")
edp = EvaluationDataPage.create_from_tensors_seq2slate(
seq2slate_net, reward_net, ptb.training_input, eval_greedy=False
)
doubly_robust_estimator = OPEstimatorAdapter(DoublyRobustEstimator())
dm_estimator = OPEstimatorAdapter(DMEstimator())
ips_estimator = OPEstimatorAdapter(IPSEstimator())
doubly_robust = doubly_robust_estimator.estimate(edp)
inverse_propensity = ips_estimator.estimate(edp)
direct_method = dm_estimator.estimate(edp)
self.assertAlmostEqual(
inverse_propensity.raw,
(0.4 / 0.2 * 4 + 0.3 / 0.5 * 5 + 0.7 / 0.4 * 7) / 3,
delta=1e-6,
)
self.assertAlmostEqual(
inverse_propensity.normalized,
inverse_propensity.raw / avg_logged_reward,
delta=1e-6,
)
logger.info("---------- Finish evaluating eval_greedy=False -----------------")
| [
"logging.getLogger",
"reagent.evaluation.evaluation_data_page.EvaluationDataPage.create_from_tensors_seq2slate",
"torch.LongTensor",
"torch.eye",
"reagent.test.evaluation.test_evaluation_data_page.FakeSeq2SlateTransformerNet",
"reagent.ope.estimators.contextual_bandits_estimators.IPSEstimator",
"reagent... | [((552, 579), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (569, 579), False, 'import logging\n'), ((2703, 2731), 'reagent.test.evaluation.test_evaluation_data_page.FakeSeq2SlateRewardNetwork', 'FakeSeq2SlateRewardNetwork', ([], {}), '()\n', (2729, 2731), False, 'from reagent.test.evaluation.test_evaluation_data_page import FakeSeq2SlateRewardNetwork, FakeSeq2SlateTransformerNet\n'), ((2756, 2785), 'reagent.test.evaluation.test_evaluation_data_page.FakeSeq2SlateTransformerNet', 'FakeSeq2SlateTransformerNet', ([], {}), '()\n', (2783, 2785), False, 'from reagent.test.evaluation.test_evaluation_data_page import FakeSeq2SlateRewardNetwork, FakeSeq2SlateTransformerNet\n'), ((2877, 2919), 'torch.LongTensor', 'torch.LongTensor', (['[[3, 2], [3, 2], [3, 2]]'], {}), '([[3, 2], [3, 2], [3, 2]])\n', (2893, 2919), False, 'import torch\n'), ((3871, 3988), 'reagent.evaluation.evaluation_data_page.EvaluationDataPage.create_from_tensors_seq2slate', 'EvaluationDataPage.create_from_tensors_seq2slate', (['seq2slate_net', 'reward_net', 'ptb.training_input'], {'eval_greedy': '(True)'}), '(seq2slate_net, reward_net,\n ptb.training_input, eval_greedy=True)\n', (3919, 3988), False, 'from reagent.evaluation.evaluation_data_page import EvaluationDataPage\n'), ((6483, 6601), 'reagent.evaluation.evaluation_data_page.EvaluationDataPage.create_from_tensors_seq2slate', 'EvaluationDataPage.create_from_tensors_seq2slate', (['seq2slate_net', 'reward_net', 'ptb.training_input'], {'eval_greedy': '(False)'}), '(seq2slate_net, reward_net,\n ptb.training_input, eval_greedy=False)\n', (6531, 6601), False, 'from reagent.evaluation.evaluation_data_page import EvaluationDataPage\n'), ((4146, 4169), 'reagent.ope.estimators.contextual_bandits_estimators.DoublyRobustEstimator', 'DoublyRobustEstimator', ([], {}), '()\n', (4167, 4169), False, 'from reagent.ope.estimators.contextual_bandits_estimators import DMEstimator, DoublyRobustEstimator, IPSEstimator, SwitchDREstimator, SwitchEstimator\n'), ((4213, 4226), 'reagent.ope.estimators.contextual_bandits_estimators.DMEstimator', 'DMEstimator', ([], {}), '()\n', (4224, 4226), False, 'from reagent.ope.estimators.contextual_bandits_estimators import DMEstimator, DoublyRobustEstimator, IPSEstimator, SwitchDREstimator, SwitchEstimator\n'), ((4271, 4285), 'reagent.ope.estimators.contextual_bandits_estimators.IPSEstimator', 'IPSEstimator', ([], {}), '()\n', (4283, 4285), False, 'from reagent.ope.estimators.contextual_bandits_estimators import DMEstimator, DoublyRobustEstimator, IPSEstimator, SwitchDREstimator, SwitchEstimator\n'), ((4333, 4350), 'reagent.ope.estimators.contextual_bandits_estimators.SwitchEstimator', 'SwitchEstimator', ([], {}), '()\n', (4348, 4350), False, 'from reagent.ope.estimators.contextual_bandits_estimators import DMEstimator, DoublyRobustEstimator, IPSEstimator, SwitchDREstimator, SwitchEstimator\n'), ((4401, 4420), 'reagent.ope.estimators.contextual_bandits_estimators.SwitchDREstimator', 'SwitchDREstimator', ([], {}), '()\n', (4418, 4420), False, 'from reagent.ope.estimators.contextual_bandits_estimators import DMEstimator, DoublyRobustEstimator, IPSEstimator, SwitchDREstimator, SwitchEstimator\n'), ((6673, 6696), 'reagent.ope.estimators.contextual_bandits_estimators.DoublyRobustEstimator', 'DoublyRobustEstimator', ([], {}), '()\n', (6694, 6696), False, 'from reagent.ope.estimators.contextual_bandits_estimators import DMEstimator, DoublyRobustEstimator, IPSEstimator, SwitchDREstimator, SwitchEstimator\n'), ((6740, 6753), 'reagent.ope.estimators.contextual_bandits_estimators.DMEstimator', 'DMEstimator', ([], {}), '()\n', (6751, 6753), False, 'from reagent.ope.estimators.contextual_bandits_estimators import DMEstimator, DoublyRobustEstimator, IPSEstimator, SwitchDREstimator, SwitchEstimator\n'), ((6798, 6812), 'reagent.ope.estimators.contextual_bandits_estimators.IPSEstimator', 'IPSEstimator', ([], {}), '()\n', (6810, 6812), False, 'from reagent.ope.estimators.contextual_bandits_estimators import DMEstimator, DoublyRobustEstimator, IPSEstimator, SwitchDREstimator, SwitchEstimator\n'), ((2805, 2829), 'torch.eye', 'torch.eye', (['candidate_dim'], {}), '(candidate_dim)\n', (2814, 2829), False, 'import torch\n'), ((3320, 3359), 'reagent.core.types.FeatureData', 'rlt.FeatureData', ([], {'float_features': 'src_seq'}), '(float_features=src_seq)\n', (3335, 3359), True, 'from reagent.core import types as rlt\n'), ((3389, 3432), 'reagent.core.types.FeatureData', 'rlt.FeatureData', ([], {'float_features': 'tgt_out_seq'}), '(float_features=tgt_out_seq)\n', (3404, 3432), True, 'from reagent.core import types as rlt\n'), ((3463, 3511), 'torch.ones', 'torch.ones', (['batch_size', 'src_seq_len', 'src_seq_len'], {}), '(batch_size, src_seq_len, src_seq_len)\n', (3473, 3511), False, 'import torch\n'), ((3584, 3613), 'torch.tensor', 'torch.tensor', (['[0.2, 0.5, 0.4]'], {}), '([0.2, 0.5, 0.4])\n', (3596, 3613), False, 'import torch\n'), ((3644, 3673), 'torch.tensor', 'torch.tensor', (['[4.0, 5.0, 7.0]'], {}), '([4.0, 5.0, 7.0])\n', (3656, 3673), False, 'import torch\n'), ((3756, 3779), 'torch.tensor', 'torch.tensor', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (3768, 3779), False, 'import torch\n'), ((3804, 3829), 'numpy.array', 'np.array', (["['0', '1', '2']"], {}), "(['0', '1', '2'])\n", (3812, 3829), True, 'import numpy as np\n'), ((2963, 2987), 'torch.arange', 'torch.arange', (['batch_size'], {}), '(batch_size)\n', (2975, 2987), False, 'import torch\n'), ((3273, 3293), 'torch.eye', 'torch.eye', (['state_dim'], {}), '(state_dim)\n', (3282, 3293), False, 'import torch\n')] |
#from frm_generate_data_np import *
import numpy as np
from numpy import pi,sqrt
model_folder="models/freq_2019_07_02_2/"
from frm_modulations import mod_list,cont_phase_mod_list,linear_mod_const
# import tensorflow as tf
import datetime
import pickle
import sys
import copy
from frm_dataset_creator import *
from numba import jit
from frm_modulations_fast import modulate_symbols_fast,modulate_symbols
# In[199]:
def func(my_dict):
# print(my_dict)
return generate_dataset_sig2(**my_dict)
def generate_dataset_sig2_parallel(n_samples, pkt_size,max_sps,mod_list,sps_rng,pulse_ebw_list,timing_offset_rng,fading_spread_rng,freq_err_rng,phase_err_rng,snr_rng, complex_fading = False,freq_in_hz = False,
seed = None, fname = None, version = 1,nthreads = 10 ): #1e4
args_in = locals()
args_in.pop('nthreads',None)
rand_step =374861
args_list = []
for i in range(nthreads):
args_list.append(copy.deepcopy(args_in))
if args_in['seed'] is not None:
for indx,args in enumerate(args_list):
args['seed'] = args_in['seed'] + indx * rand_step
get_tmp_name = lambda base, indx : "{}_{}".format(base, indx)
# if fname is not None:
# base_name = fname
# else:
# base_name = 'tmp/dataset'
# base_name = '/tmp/dataset{}'.format(np.random.randint(0,1000000))
for indx,args in enumerate(args_list):
args['fname'] = None #get_tmp_name(base_name,indx)
args['n_samples'] = args_in['n_samples']//nthreads
p = Pool(nthreads)
datasets = p.map(func, args_list)
# with open(get_tmp_name(base_name,0),'rb') as f:
# dataset = pickle.load(f)
dataset_out = datasets[0]
for i in range(1,nthreads):
dataset_i = datasets[i]
for k1 in dataset_out.keys():
if isinstance(dataset_out[k1],dict):
for k2 in dataset_out[k1].keys():
# print(k1,k2)
if k1!='args' and k1!='time':
dataset_out[k1][k2] = np.append(dataset_out[k1][k2],dataset_i[k1][k2],axis = 0)
dataset_out['args'] = args_in
dataset_out['time'] = str(datetime.datetime.now())
if fname is not None:
with open(fname,'wb') as f:
pickle.dump(dataset_out,f)
return dataset_out
def generate_dataset_sig2(n_samples, pkt_size,max_sps,mod_list,sps_rng,pulse_ebw_list,timing_offset_rng,fading_spread_rng,freq_err_rng,phase_err_rng,snr_rng,complex_fading = False, freq_in_hz = False,
seed = None, fname = None, version = 1):
args = locals()
if seed is not None:
np.random.seed(seed)
comb_v = np.zeros((n_samples,pkt_size,2))
carrier_v = np.zeros((n_samples,pkt_size,2))
fading_v = np.zeros((n_samples,pkt_size,2))
clean_v = np.zeros((n_samples,pkt_size,2))
timing_v = np.zeros((n_samples,pkt_size,2))
raw_v = np.zeros((n_samples,pkt_size,2))
mod_v = np.zeros((n_samples,len(mod_list)))
if not complex_fading:
coeff = np.zeros((n_samples,6))
else:
coeff = np.zeros((n_samples,6),dtype='complex')
mod_index = np.random.choice(len(mod_list),(n_samples,)).astype(np.int_)
mod_v[range(n_samples),mod_index] = 1
sps = np.random.uniform(sps_rng[0],sps_rng[1],(n_samples,))
pulse_ebw = np.random.choice(pulse_ebw_list,(n_samples,))
timing_offset = np.random.uniform(timing_offset_rng[0],timing_offset_rng[1],(n_samples,))
fading_spread = np.random.uniform(fading_spread_rng[0],fading_spread_rng[1],(n_samples,))
freq_err = np.random.uniform(freq_err_rng[0],freq_err_rng[1],(n_samples,))
phase_err = np.random.uniform(phase_err_rng[0],phase_err_rng[1],(n_samples,))
if np.array(snr_rng).size==2:
snr = np.random.uniform(snr_rng[0],snr_rng[1],(n_samples,))
else:
snr = np.random.choice(snr_rng,(n_samples,))
progress_step = 1000
a = datetime.datetime.now()
strt_time = copy.deepcopy(a)
for samp_indx in range(n_samples):
mod = mod_list[mod_index[samp_indx]]
op = create_sample_fast( mod = mod,pkt_len = pkt_size,sps=sps[samp_indx],pulse_ebw = pulse_ebw[samp_indx],
timing_offset = timing_offset[samp_indx],
fading_spread = fading_spread[samp_indx],
freq_err = freq_err[samp_indx], phase_err =phase_err[samp_indx],
snr = snr[samp_indx], max_sps = max_sps, complex_fading = complex_fading, freq_in_hz = freq_in_hz,
seed = None)
mod_v[:,0] = 1
comb_v[samp_indx] ,carrier_v[samp_indx],fading_v[samp_indx],clean_v[samp_indx],timing_v[samp_indx],raw_v[samp_indx],coeff[samp_indx] = op
if samp_indx%progress_step == 0 and samp_indx>0:
b = datetime.datetime.now()
diff_time = b-a
# the exact output you're looking for:
sys.stdout.write("\rGenerated {} out of {} ({:.1f}%), Elapsed {} , estimated {}".format(samp_indx,n_samples, float(samp_indx)/n_samples*100, b-strt_time , (n_samples-samp_indx)*diff_time /progress_step ))
sys.stdout.flush()
a = copy.deepcopy(b)
op ={'sig':{},'params':{},'data':{}}
op['sig']['comb'] = comb_v
op['sig']['timing_fading_carrier'] = carrier_v
op['sig']['timing_fading'] = fading_v
op['sig']['timing'] = clean_v
op['params']['mod'] = mod_index
op['params']['fading_spread'] = fading_spread
op['params']['fading_taps'] = coeff
op['params']['freq_off'] = freq_err
op['params']['phase_off'] = phase_err
op['params']['timing_off'] = timing_offset
op['params']['symb_rate'] = sps
op['data']['binary_marking'] = timing_v
op['params']['sps'] = sps
op['params']['pulse_ebw'] = pulse_ebw
op['sig']['timing_raw_unique'] = raw_v
op['params']['snr'] = snr
op['args'] = args
op['time'] = str(datetime.datetime.now())
op['version'] = version
if fname is not None:
with open(fname,'wb') as f:
pickle.dump(op,f)
return op
def create_sample( mod = 'bpsk',pkt_len = 128,sps=8,pulse_ebw = 0.35,
timing_offset = 0.5,
fading_spread = 1,
freq_err = 0.0001, phase_err = np.pi,
snr = 10, max_sps = 128, complex_fading = False, freq_in_hz = False,
seed = None):
samp_rate = 1
if seed is not None:
np.random.seed(seed)
if mod in cont_phase_mod_list:
order = 2
else: # Linear modulation
order = linear_mod_const[mod].size
n_symbols = int( (pkt_len)/(sps*0.5)) + 2
data_symbs=np.random.randint(0,order,n_symbols)
mag = timing_offset
timing_offset = calc_timing_offset(mag, max_sps)
timing_step = int(max_sps/sps)
mod_symbs_max_sps=modulate_symbols(data_symbs,mod,max_sps,ebw = pulse_ebw)
data_symbs_max_sps= np.repeat(data_symbs,max_sps)
t_max_sps= np.arange(0,1.0*max_sps*n_symbols/samp_rate,1.0/samp_rate)
transition_data_ideal = np.array(([1,]*max_sps + [0,]*max_sps) * int(n_symbols/2+1))
mod_symbs_timing_err = simulate_timing_error(mod_symbs_max_sps,timing_offset,timing_step, pkt_len)
data_symbs_timing_err = simulate_timing_error(data_symbs_max_sps,timing_offset,timing_step, pkt_len)
mod_raw_symbs_timing_err = modulate_symbols(data_symbs_timing_err,mod,sps = 1, ebw = None, pulse_shape = None)
t_timing_err = simulate_timing_error(t_max_sps,timing_offset,timing_step, pkt_len)
marking_b_timing = simulate_timing_error(transition_data_ideal,timing_offset,timing_step, pkt_len)
transition_data_timing = simulate_timing_error(transition_data_ideal,timing_offset,timing_step, pkt_len+1)
transition_data_timing = np.abs(np.diff(transition_data_timing)).astype('int')
mod_raw_unique_symbs_timing_err = mod_raw_symbs_timing_err*transition_data_timing
mod_raw_unique_symbs_timing_err[transition_data_timing==0]=np.nan+1j*np.nan
if not complex_fading:
coeff = generate_fading_taps(max_sps / timing_step, fading_spread)
mod_symbs_timing_fading = simulate_fading_channel(mod_symbs_timing_err, coeff)
else:
coeff=generate_complex_fading_taps(max_sps / timing_step, fading_spread)
mod_symbs_timing_fading = simulate_fading_channel_complex(mod_symbs_timing_err, coeff)
if not freq_in_hz:
t_freq = t_timing_err
else:
t_freq = np.arange(t_timing_err.size)
mod_symbs_timing_fading_freq_err = simulate_frequency_error(mod_symbs_timing_fading,t_freq,freq_err,phase_err)
carrier_timing_err = simulate_frequency_error(1.0,t_freq,freq_err,phase_err)
mod_symbs_timing_fading_freq_noise = add_noise(mod_symbs_timing_fading_freq_err,snr)
op = mod_symbs_timing_fading_freq_noise
comb = assign_iq2(mod_symbs_timing_fading_freq_noise)
carrier = assign_iq2(mod_symbs_timing_fading_freq_err)
fading = assign_iq2(mod_symbs_timing_fading)
clean = assign_iq2(mod_symbs_timing_err)#assign_iq2(mod_symbs_max_sps)#
timing = np.zeros((pkt_len,2))
timing[range(pkt_len),marking_b_timing] = 1
raw = assign_iq2(mod_raw_unique_symbs_timing_err)
return (comb ,carrier,fading,clean,timing,raw,coeff)
@jit(nopython=True)
def create_marking(max_sps,timing_step,timing_offset,pkt_len):
x = np.zeros(pkt_len+1,dtype=np.int_)
timing_offset = int(timing_offset)
indx = int(timing_offset)
state = True
prev_max_sps = indx% max_sps
for i in range(0,x.size):
x[i] = state
indx = indx +timing_step
cur_max_sps = indx%max_sps
if cur_max_sps<prev_max_sps:
state = not state
prev_max_sps = cur_max_sps
return x
def create_sample_fast( mod = 'bpsk',pkt_len = 128,sps=8,pulse_ebw = 0.35,
timing_offset = 0.5,
fading_spread = 1,
freq_err = 0.0001, phase_err = np.pi,
snr = 10, max_sps = 128,complex_fading = False, freq_in_hz = False,
seed = None):
samp_rate = 1
if seed is not None:
np.random.seed(seed)
if mod in cont_phase_mod_list:
order = 2
else: # Linear modulation
order = linear_mod_const[mod].size
n_symbols = int( (pkt_len)/(sps*0.5)) + 2
data_symbs=np.random.randint(0,order,n_symbols)
mag = timing_offset
timing_offset = calc_timing_offset(mag, max_sps)
timing_step = int(max_sps/sps)
mod_symbs_max_sps=modulate_symbols_fast(data_symbs,mod,max_sps,timing_offset,timing_step,ebw = pulse_ebw)
data_symbs_max_sps= np.repeat(data_symbs,max_sps)
t_max_sps= np.arange(0,1.0*max_sps*n_symbols/samp_rate,1.0/samp_rate)
mod_symbs_timing_err = mod_symbs_max_sps[:pkt_len]
data_symbs_timing_err = simulate_timing_error(data_symbs_max_sps,timing_offset,timing_step, pkt_len)
mod_raw_symbs_timing_err = modulate_symbols(data_symbs_timing_err,mod,sps = 1, ebw = None, pulse_shape = None)
t_timing_err = simulate_timing_error(t_max_sps,timing_offset,timing_step, pkt_len)
transition_data_timing = create_marking(max_sps,timing_step,timing_offset,pkt_len)
marking_b_timing = transition_data_timing[:-1]
transition_data_timing = np.abs(np.diff(transition_data_timing)).astype('int')
mod_raw_unique_symbs_timing_err = mod_raw_symbs_timing_err*transition_data_timing
mod_raw_unique_symbs_timing_err[transition_data_timing==0]=np.nan+1j*np.nan
if not complex_fading:
coeff = generate_fading_taps(max_sps / timing_step, fading_spread)
mod_symbs_timing_fading = simulate_fading_channel(mod_symbs_timing_err, coeff)
else:
coeff=generate_complex_fading_taps(max_sps / timing_step, fading_spread)
mod_symbs_timing_fading = simulate_fading_channel_complex(mod_symbs_timing_err, coeff)
if not freq_in_hz:
t_freq = t_timing_err
else:
t_freq = np.arange(t_timing_err.size)
mod_symbs_timing_fading_freq_err = simulate_frequency_error(mod_symbs_timing_fading,t_freq,freq_err,phase_err)
carrier_timing_err = simulate_frequency_error(1.0,t_freq,freq_err,phase_err)
mod_symbs_timing_fading_freq_noise = add_noise(mod_symbs_timing_fading_freq_err,snr)
op = mod_symbs_timing_fading_freq_noise
comb = assign_iq2(mod_symbs_timing_fading_freq_noise)
carrier = assign_iq2(mod_symbs_timing_fading_freq_err)
fading = assign_iq2(mod_symbs_timing_fading)
clean = assign_iq2(mod_symbs_timing_err)
timing = np.zeros((pkt_len,2))
timing[range(pkt_len),marking_b_timing.astype(np.int_)] = 1
raw = assign_iq2(mod_raw_unique_symbs_timing_err)
return (comb ,carrier,fading,clean,timing,raw,coeff)
def assign_iq2( complex_vec):
op_vec = np.zeros((complex_vec.shape[0],2))
op_vec[:,0] = np.real(complex_vec)
op_vec[:,1] = np.imag(complex_vec)
return op_vec
if __name__ == '__main__':
test_data_sig_parallel() | [
"frm_modulations_fast.modulate_symbols_fast",
"frm_modulations_fast.modulate_symbols",
"numpy.array",
"copy.deepcopy",
"numpy.imag",
"numpy.arange",
"numpy.repeat",
"numpy.diff",
"numpy.real",
"numpy.random.seed",
"sys.stdout.flush",
"numpy.random.choice",
"numba.jit",
"pickle.dump",
"nu... | [((9567, 9585), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (9570, 9585), False, 'from numba import jit\n'), ((2796, 2830), 'numpy.zeros', 'np.zeros', (['(n_samples, pkt_size, 2)'], {}), '((n_samples, pkt_size, 2))\n', (2804, 2830), True, 'import numpy as np\n'), ((2845, 2879), 'numpy.zeros', 'np.zeros', (['(n_samples, pkt_size, 2)'], {}), '((n_samples, pkt_size, 2))\n', (2853, 2879), True, 'import numpy as np\n'), ((2893, 2927), 'numpy.zeros', 'np.zeros', (['(n_samples, pkt_size, 2)'], {}), '((n_samples, pkt_size, 2))\n', (2901, 2927), True, 'import numpy as np\n'), ((2940, 2974), 'numpy.zeros', 'np.zeros', (['(n_samples, pkt_size, 2)'], {}), '((n_samples, pkt_size, 2))\n', (2948, 2974), True, 'import numpy as np\n'), ((2988, 3022), 'numpy.zeros', 'np.zeros', (['(n_samples, pkt_size, 2)'], {}), '((n_samples, pkt_size, 2))\n', (2996, 3022), True, 'import numpy as np\n'), ((3034, 3068), 'numpy.zeros', 'np.zeros', (['(n_samples, pkt_size, 2)'], {}), '((n_samples, pkt_size, 2))\n', (3042, 3068), True, 'import numpy as np\n'), ((3378, 3433), 'numpy.random.uniform', 'np.random.uniform', (['sps_rng[0]', 'sps_rng[1]', '(n_samples,)'], {}), '(sps_rng[0], sps_rng[1], (n_samples,))\n', (3395, 3433), True, 'import numpy as np\n'), ((3448, 3494), 'numpy.random.choice', 'np.random.choice', (['pulse_ebw_list', '(n_samples,)'], {}), '(pulse_ebw_list, (n_samples,))\n', (3464, 3494), True, 'import numpy as np\n'), ((3514, 3589), 'numpy.random.uniform', 'np.random.uniform', (['timing_offset_rng[0]', 'timing_offset_rng[1]', '(n_samples,)'], {}), '(timing_offset_rng[0], timing_offset_rng[1], (n_samples,))\n', (3531, 3589), True, 'import numpy as np\n'), ((3608, 3683), 'numpy.random.uniform', 'np.random.uniform', (['fading_spread_rng[0]', 'fading_spread_rng[1]', '(n_samples,)'], {}), '(fading_spread_rng[0], fading_spread_rng[1], (n_samples,))\n', (3625, 3683), True, 'import numpy as np\n'), ((3697, 3762), 'numpy.random.uniform', 'np.random.uniform', (['freq_err_rng[0]', 'freq_err_rng[1]', '(n_samples,)'], {}), '(freq_err_rng[0], freq_err_rng[1], (n_samples,))\n', (3714, 3762), True, 'import numpy as np\n'), ((3777, 3844), 'numpy.random.uniform', 'np.random.uniform', (['phase_err_rng[0]', 'phase_err_rng[1]', '(n_samples,)'], {}), '(phase_err_rng[0], phase_err_rng[1], (n_samples,))\n', (3794, 3844), True, 'import numpy as np\n'), ((4056, 4079), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4077, 4079), False, 'import datetime\n'), ((4096, 4112), 'copy.deepcopy', 'copy.deepcopy', (['a'], {}), '(a)\n', (4109, 4112), False, 'import copy\n'), ((6867, 6905), 'numpy.random.randint', 'np.random.randint', (['(0)', 'order', 'n_symbols'], {}), '(0, order, n_symbols)\n', (6884, 6905), True, 'import numpy as np\n'), ((7053, 7110), 'frm_modulations_fast.modulate_symbols', 'modulate_symbols', (['data_symbs', 'mod', 'max_sps'], {'ebw': 'pulse_ebw'}), '(data_symbs, mod, max_sps, ebw=pulse_ebw)\n', (7069, 7110), False, 'from frm_modulations_fast import modulate_symbols_fast, modulate_symbols\n'), ((7134, 7164), 'numpy.repeat', 'np.repeat', (['data_symbs', 'max_sps'], {}), '(data_symbs, max_sps)\n', (7143, 7164), True, 'import numpy as np\n'), ((7197, 7265), 'numpy.arange', 'np.arange', (['(0)', '(1.0 * max_sps * n_symbols / samp_rate)', '(1.0 / samp_rate)'], {}), '(0, 1.0 * max_sps * n_symbols / samp_rate, 1.0 / samp_rate)\n', (7206, 7265), True, 'import numpy as np\n'), ((7613, 7692), 'frm_modulations_fast.modulate_symbols', 'modulate_symbols', (['data_symbs_timing_err', 'mod'], {'sps': '(1)', 'ebw': 'None', 'pulse_shape': 'None'}), '(data_symbs_timing_err, mod, sps=1, ebw=None, pulse_shape=None)\n', (7629, 7692), False, 'from frm_modulations_fast import modulate_symbols_fast, modulate_symbols\n'), ((9376, 9398), 'numpy.zeros', 'np.zeros', (['(pkt_len, 2)'], {}), '((pkt_len, 2))\n', (9384, 9398), True, 'import numpy as np\n'), ((9657, 9693), 'numpy.zeros', 'np.zeros', (['(pkt_len + 1)'], {'dtype': 'np.int_'}), '(pkt_len + 1, dtype=np.int_)\n', (9665, 9693), True, 'import numpy as np\n'), ((10671, 10709), 'numpy.random.randint', 'np.random.randint', (['(0)', 'order', 'n_symbols'], {}), '(0, order, n_symbols)\n', (10688, 10709), True, 'import numpy as np\n'), ((10860, 10954), 'frm_modulations_fast.modulate_symbols_fast', 'modulate_symbols_fast', (['data_symbs', 'mod', 'max_sps', 'timing_offset', 'timing_step'], {'ebw': 'pulse_ebw'}), '(data_symbs, mod, max_sps, timing_offset, timing_step,\n ebw=pulse_ebw)\n', (10881, 10954), False, 'from frm_modulations_fast import modulate_symbols_fast, modulate_symbols\n'), ((10973, 11003), 'numpy.repeat', 'np.repeat', (['data_symbs', 'max_sps'], {}), '(data_symbs, max_sps)\n', (10982, 11003), True, 'import numpy as np\n'), ((11037, 11105), 'numpy.arange', 'np.arange', (['(0)', '(1.0 * max_sps * n_symbols / samp_rate)', '(1.0 / samp_rate)'], {}), '(0, 1.0 * max_sps * n_symbols / samp_rate, 1.0 / samp_rate)\n', (11046, 11105), True, 'import numpy as np\n'), ((11312, 11391), 'frm_modulations_fast.modulate_symbols', 'modulate_symbols', (['data_symbs_timing_err', 'mod'], {'sps': '(1)', 'ebw': 'None', 'pulse_shape': 'None'}), '(data_symbs_timing_err, mod, sps=1, ebw=None, pulse_shape=None)\n', (11328, 11391), False, 'from frm_modulations_fast import modulate_symbols_fast, modulate_symbols\n'), ((12969, 12991), 'numpy.zeros', 'np.zeros', (['(pkt_len, 2)'], {}), '((pkt_len, 2))\n', (12977, 12991), True, 'import numpy as np\n'), ((13218, 13253), 'numpy.zeros', 'np.zeros', (['(complex_vec.shape[0], 2)'], {}), '((complex_vec.shape[0], 2))\n', (13226, 13253), True, 'import numpy as np\n'), ((13271, 13291), 'numpy.real', 'np.real', (['complex_vec'], {}), '(complex_vec)\n', (13278, 13291), True, 'import numpy as np\n'), ((13310, 13330), 'numpy.imag', 'np.imag', (['complex_vec'], {}), '(complex_vec)\n', (13317, 13330), True, 'import numpy as np\n'), ((2289, 2312), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2310, 2312), False, 'import datetime\n'), ((2762, 2782), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2776, 2782), True, 'import numpy as np\n'), ((3158, 3182), 'numpy.zeros', 'np.zeros', (['(n_samples, 6)'], {}), '((n_samples, 6))\n', (3166, 3182), True, 'import numpy as np\n'), ((3208, 3249), 'numpy.zeros', 'np.zeros', (['(n_samples, 6)'], {'dtype': '"""complex"""'}), "((n_samples, 6), dtype='complex')\n", (3216, 3249), True, 'import numpy as np\n'), ((3891, 3946), 'numpy.random.uniform', 'np.random.uniform', (['snr_rng[0]', 'snr_rng[1]', '(n_samples,)'], {}), '(snr_rng[0], snr_rng[1], (n_samples,))\n', (3908, 3946), True, 'import numpy as np\n'), ((3969, 4008), 'numpy.random.choice', 'np.random.choice', (['snr_rng', '(n_samples,)'], {}), '(snr_rng, (n_samples,))\n', (3985, 4008), True, 'import numpy as np\n'), ((6080, 6103), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (6101, 6103), False, 'import datetime\n'), ((6622, 6642), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (6636, 6642), True, 'import numpy as np\n'), ((8737, 8765), 'numpy.arange', 'np.arange', (['t_timing_err.size'], {}), '(t_timing_err.size)\n', (8746, 8765), True, 'import numpy as np\n'), ((10426, 10446), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (10440, 10446), True, 'import numpy as np\n'), ((12354, 12382), 'numpy.arange', 'np.arange', (['t_timing_err.size'], {}), '(t_timing_err.size)\n', (12363, 12382), True, 'import numpy as np\n'), ((962, 984), 'copy.deepcopy', 'copy.deepcopy', (['args_in'], {}), '(args_in)\n', (975, 984), False, 'import copy\n'), ((2400, 2427), 'pickle.dump', 'pickle.dump', (['dataset_out', 'f'], {}), '(dataset_out, f)\n', (2411, 2427), False, 'import pickle\n'), ((3850, 3867), 'numpy.array', 'np.array', (['snr_rng'], {}), '(snr_rng)\n', (3858, 3867), True, 'import numpy as np\n'), ((4908, 4931), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4929, 4931), False, 'import datetime\n'), ((5262, 5280), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (5278, 5280), False, 'import sys\n'), ((5301, 5317), 'copy.deepcopy', 'copy.deepcopy', (['b'], {}), '(b)\n', (5314, 5317), False, 'import copy\n'), ((6207, 6225), 'pickle.dump', 'pickle.dump', (['op', 'f'], {}), '(op, f)\n', (6218, 6225), False, 'import pickle\n'), ((8046, 8077), 'numpy.diff', 'np.diff', (['transition_data_timing'], {}), '(transition_data_timing)\n', (8053, 8077), True, 'import numpy as np\n'), ((11668, 11699), 'numpy.diff', 'np.diff', (['transition_data_timing'], {}), '(transition_data_timing)\n', (11675, 11699), True, 'import numpy as np\n'), ((2156, 2213), 'numpy.append', 'np.append', (['dataset_out[k1][k2]', 'dataset_i[k1][k2]'], {'axis': '(0)'}), '(dataset_out[k1][k2], dataset_i[k1][k2], axis=0)\n', (2165, 2213), True, 'import numpy as np\n')] |
import tensorflow as tf
import os
import numpy as np
import logging
logger = logging.getLogger("detect")
class Step1CNN:
def __init__(self, step1_model_dir):
self.model_dir = step1_model_dir
self.model_path = os.path.join(self.model_dir, 'frozen_inference_graph.pb')
self._graph = None
self._session = None
self._isload = False
def load(self, config):
logger.info('begin loading step1 model: {}'.format(self.model_path))
self._graph = tf.Graph()
with self._graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(self.model_path, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
logger.info('end loading step1 graph...')
# config.gpu_options.per_process_gpu_memory_fraction = 0.5 # 占用GPU50%的显存
self._session = tf.Session(graph=self._graph, config=config)
# Definite input and output Tensors for detection_graph
self._image_tensor = self._graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
self._detection_boxes = self._graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
self._detection_scores = self._graph.get_tensor_by_name('detection_scores:0')
self._isload = True
def is_load(self):
return self._isload
def detect(self,image_np):
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
# Actual detection.
(boxes, scores) = self._session.run(
[self._detection_boxes, self._detection_scores],
feed_dict={self._image_tensor: image_np_expanded})
return (boxes, scores)
| [
"logging.getLogger",
"tensorflow.Graph",
"tensorflow.Session",
"os.path.join",
"tensorflow.GraphDef",
"numpy.expand_dims",
"tensorflow.import_graph_def",
"tensorflow.gfile.GFile"
] | [((78, 105), 'logging.getLogger', 'logging.getLogger', (['"""detect"""'], {}), "('detect')\n", (95, 105), False, 'import logging\n'), ((232, 289), 'os.path.join', 'os.path.join', (['self.model_dir', '"""frozen_inference_graph.pb"""'], {}), "(self.model_dir, 'frozen_inference_graph.pb')\n", (244, 289), False, 'import os\n'), ((503, 513), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (511, 513), True, 'import tensorflow as tf\n'), ((983, 1027), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'self._graph', 'config': 'config'}), '(graph=self._graph, config=config)\n', (993, 1027), True, 'import tensorflow as tf\n'), ((1821, 1853), 'numpy.expand_dims', 'np.expand_dims', (['image_np'], {'axis': '(0)'}), '(image_np, axis=0)\n', (1835, 1853), True, 'import numpy as np\n'), ((580, 593), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (591, 593), True, 'import tensorflow as tf\n'), ((611, 648), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['self.model_path', '"""rb"""'], {}), "(self.model_path, 'rb')\n", (625, 648), True, 'import tensorflow as tf\n'), ((782, 824), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['od_graph_def'], {'name': '""""""'}), "(od_graph_def, name='')\n", (801, 824), True, 'import tensorflow as tf\n')] |
# Visualize Reults with MatPlotlib etc...
import matplotlib.pyplot as plt
import numpy as np
class Visualization(object):
def __init__(self, dataset):
super().__init__()
self.dataset = dataset
self.estimated_state = None
self.estimated_varf = None
def getEstimatedValues(self, estimated_state, estimated_var):
assert (
estimated_state.ndim == 2
and estimated_state.shape[1] == 3
and estimated_state.shape[1] == estimated_var.shape[1]
), "There's some miss in Estimated values, Check Plz"
self.estimated_traj = estimated_state.T
self.estimated_var = estimated_var.T
print()
print(self.estimated_traj.shape)
print(self.dataset.gt_trajectory_xyz.shape)
print()
def plotGPStrajactory(self):
"""
run after generateGroundTruthSets, plot trajactory with GPS lng/lat data
"""
lons, lats, _ = self.dataset.gt_trajectory_lla
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
ax.plot(lons, lats)
plt.title("GPS trajactory")
ax.set_xlabel("longitude [deg]")
ax.set_ylabel("latitude [deg]")
ax.grid()
plt.show()
def plotXYZtrajactory(self):
"""
lla_to_enu converts lng/lat/alt to enu form
plot converted XYZ trajectory
"""
xs, ys, _ = self.dataset.gt_trajectory_xyz
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
ax.plot(xs, ys)
plt.title("XYZ trajactory")
ax.set_xlabel("X [m]")
ax.set_ylabel("Y [m]")
ax.grid()
plt.show()
def plotGTvalue(self):
"""
plot Ground-Truth Yaw angles / Yaw rates /Forward velocity
ts required for plotlib x-axis
"""
fig, ax = plt.subplots(
3, 1, gridspec_kw={"height_ratios": [1, 1, 1]}, figsize=(10, 14)
)
ax[0].plot(self.dataset.ts, self.dataset.gt_yaws)
ax[0].title.set_text("Ground-Truth yaw angles")
ax[0].set_ylabel("ground-truth yaw angle [rad]")
ax[1].plot(self.dataset.ts, self.dataset.gt_yaw_rates)
ax[1].title.set_text("Yaw Rates")
ax[1].set_ylabel("ground-truth yaw rate [rad/s]")
ax[2].plot(self.dataset.ts, self.dataset.gt_forward_velocities)
ax[2].title.set_text("Forward Velocitis")
ax[2].set_xlabel("time elapsed [sec]")
ax[2].set_ylabel("ground-truth forward velocity [m/s]")
plt.show()
# TODO No handles with labels found to put in legend.
def plotNoisyData(self):
"""
After addGaussianNoiseToGPS, plot 3 types of noisy/ground-truth data at same plot
1. GT/Noisy XYZ Traj
2. GT/Noisy Yaw Rates
3. GT/Noisy Forward Velocities
"""
fig, ax = plt.subplots(
3, 1, gridspec_kw={"height_ratios": [2, 1, 1]}, figsize=(10, 14)
)
# Plot1 GT/Noisy Traj
gt_xs, gt_ys, _ = self.dataset.gt_trajectory_xyz
noisy_xs, noisy_ys, _ = self.dataset.noisy_trajectory_xyz
ax[0].title.set_text("Traj Comparison - GT & Noisy")
ax[0].plot(gt_xs, gt_ys, lw=2, label="ground-truth trajectory")
ax[0].plot(
noisy_xs,
noisy_ys,
lw=0,
marker=".",
markersize=5,
alpha=0.4,
label="noisy trajectory",
)
ax[0].set_xlabel("X [m]")
ax[0].set_ylabel("Y [m]")
ax[0].legend()
ax[0].grid()
# Plot2 GT/Noisy Yaw Rates
ax[1].plot(
self.dataset.ts, self.dataset.gt_yaw_rates, lw=1, label="ground-truth"
)
ax[1].plot(
self.dataset.ts,
self.dataset.noisy_yaw_rates,
lw=0,
marker=".",
alpha=0.4,
label="noisy",
)
# ax[1].set_xlabel("time elapsed [sec]")
ax[1].set_ylabel("yaw rate [rad/s]")
ax[1].legend()
# Plot3 GT/Noisy Forward Velocities
ax[2].plot(
self.dataset.ts,
self.dataset.gt_forward_velocities,
lw=1,
label="ground-truth",
)
ax[2].plot(
self.dataset.ts,
self.dataset.noisy_forward_velocities,
lw=0,
marker=".",
alpha=0.4,
label="noisy",
)
ax[2].set_xlabel("time elapsed [sec]")
ax[2].set_ylabel("forward velocity [m/s]")
ax[2].legend()
plt.show()
def plotEstimatedTraj(self):
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
gt_xs, gt_ys, _ = self.dataset.gt_trajectory_xyz
ax.plot(gt_xs, gt_ys, lw=2, label="ground-truth trajectory")
noisy_xs, noisy_ys, _ = self.dataset.noisy_trajectory_xyz
ax.plot(
noisy_xs,
noisy_ys,
lw=0,
marker=".",
markersize=4,
alpha=1.0,
label="observed trajectory",
)
est_xs, ext_ys, _ = self.estimated_traj
ax.plot(est_xs, ext_ys, lw=2, label="estimated trajectory", color="r")
plt.title("Trajactory Comparison")
ax.set_xlabel("X [m]")
ax.set_ylabel("Y [m]")
ax.legend()
ax.grid()
plt.show()
def setSubPlot(self, type, ax, x_val, y_val_1, y_val_2, labels):
if type == "Position":
ax.plot(
x_val,
y_val_1,
lw=2,
label=labels[0]
)
ax.plot(
x_val,
y_val_2,
lw=1,
label=labels[1],
color="r",
)
ax.set_xlabel(labels[2])
ax.set_ylabel(labels[3])
ax.legend()
elif type == "Angle":
ax.plot(
x_val,
y_val_1,
lw=1.5,
label=labels[0],
)
ax.plot(
x_val,
np.sqrt(y_val_2),
lw=1.5,
label=labels[1],
color="darkorange",
)
ax.plot(
x_val,
-np.sqrt(y_val_2),
lw=1.5,
label=labels[2],
color="darkorange",
)
ax.set_xlabel(labels[3])
ax.set_ylabel(labels[4])
ax.legend()
return ax
def plotEstimated2DStates(self):
fig, ax = plt.subplots(2, 3, figsize=(14, 6))
# Analyze estimation error of X
ax[0, 0] = self.setSubPlot(
type="Position",
ax=ax[0, 0],
x_val=self.dataset.ts,
y_val_1=self.dataset.gt_trajectory_xyz[0],
y_val_2=self.estimated_traj[0],
labels=["ground-truth", "estimated", "time elapsed [sec]", "X [m]"]
)
ax[1, 0] = self.setSubPlot(
type="Angle",
ax=ax[1, 0],
x_val=self.dataset.ts,
y_val_1=self.estimated_traj[0] - self.dataset.gt_trajectory_xyz[0],
y_val_2=self.estimated_var[0],
labels=["estimation error", "estimated 1-sigma interval", "", "time elapsed [sec]", "X estimation error [m]"]
)
# Analyze estimation error of Y
ax[0, 1] = self.setSubPlot(
type="Position",
ax=ax[0, 1],
x_val=self.dataset.ts,
y_val_1=self.dataset.gt_trajectory_xyz[1],
y_val_2=self.estimated_traj[1],
labels=["ground-truth", "estimated", "time elapsed [sec]", "Y [m]"]
)
ax[1, 1] = self.setSubPlot(
type="Angle",
ax=ax[1, 1],
x_val=self.dataset.ts,
y_val_1=self.estimated_traj[1] - self.dataset.gt_trajectory_xyz[1],
y_val_2=self.estimated_var[1],
labels=["estimation error", "estimated 1-sigma interval", "", "time elapsed [sec]", "Y estimation error [m]"]
)
# Analyze estimation error of Theta
ax[0, 2] = self.setSubPlot(
type="Position",
ax=ax[0, 2],
x_val=self.dataset.ts,
y_val_1=self.dataset.gt_trajectory_xyz[2],
y_val_2=self.estimated_traj[2],
labels=["ground-truth", "estimated", "time elapsed [sec]", "yaw angle [rad/s]"]
)
ax[1, 2] = self.setSubPlot(
type="Angle",
ax=ax[1, 2],
x_val=self.dataset.ts,
y_val_1=self.normalize_angles(self.estimated_traj[2] - self.dataset.gt_yaws),
y_val_2=self.estimated_var[2],
labels=["estimation error", "estimated 1-sigma interval", "", "time elapsed [sec]", "yaw estimation error [m]"]
)
ax[1, 2].set_ylim(-0.5, 0.5)
plt.show()
def normalize_angles(self, angles):
"""
Args:
angles (float or numpy.array): angles in radian (= [a1, a2, ...], shape of [n,])
Returns:
numpy.array or float: angles in radians normalized b/w/ -pi and +pi (same shape w/ angles)
"""
angles = (angles + np.pi) % (2 * np.pi) - np.pi
return angles
def plot3d(self):
pass
def animePlay(self):
pass
if __name__ == "__main__":
# Unit Test Here
pass
| [
"matplotlib.pyplot.title",
"numpy.sqrt",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((1034, 1068), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(8, 6)'}), '(1, 1, figsize=(8, 6))\n', (1046, 1068), True, 'import matplotlib.pyplot as plt\n'), ((1105, 1132), 'matplotlib.pyplot.title', 'plt.title', (['"""GPS trajactory"""'], {}), "('GPS trajactory')\n", (1114, 1132), True, 'import matplotlib.pyplot as plt\n'), ((1240, 1250), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1248, 1250), True, 'import matplotlib.pyplot as plt\n'), ((1469, 1503), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(8, 6)'}), '(1, 1, figsize=(8, 6))\n', (1481, 1503), True, 'import matplotlib.pyplot as plt\n'), ((1536, 1563), 'matplotlib.pyplot.title', 'plt.title', (['"""XYZ trajactory"""'], {}), "('XYZ trajactory')\n", (1545, 1563), True, 'import matplotlib.pyplot as plt\n'), ((1652, 1662), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1660, 1662), True, 'import matplotlib.pyplot as plt\n'), ((1839, 1917), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'gridspec_kw': "{'height_ratios': [1, 1, 1]}", 'figsize': '(10, 14)'}), "(3, 1, gridspec_kw={'height_ratios': [1, 1, 1]}, figsize=(10, 14))\n", (1851, 1917), True, 'import matplotlib.pyplot as plt\n'), ((2519, 2529), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2527, 2529), True, 'import matplotlib.pyplot as plt\n'), ((2849, 2927), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'gridspec_kw': "{'height_ratios': [2, 1, 1]}", 'figsize': '(10, 14)'}), "(3, 1, gridspec_kw={'height_ratios': [2, 1, 1]}, figsize=(10, 14))\n", (2861, 2927), True, 'import matplotlib.pyplot as plt\n'), ((4546, 4556), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4554, 4556), True, 'import matplotlib.pyplot as plt\n'), ((4609, 4643), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(8, 6)'}), '(1, 1, figsize=(8, 6))\n', (4621, 4643), True, 'import matplotlib.pyplot as plt\n'), ((5178, 5212), 'matplotlib.pyplot.title', 'plt.title', (['"""Trajactory Comparison"""'], {}), "('Trajactory Comparison')\n", (5187, 5212), True, 'import matplotlib.pyplot as plt\n'), ((5322, 5332), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5330, 5332), True, 'import matplotlib.pyplot as plt\n'), ((6549, 6584), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(3)'], {'figsize': '(14, 6)'}), '(2, 3, figsize=(14, 6))\n', (6561, 6584), True, 'import matplotlib.pyplot as plt\n'), ((8866, 8876), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8874, 8876), True, 'import matplotlib.pyplot as plt\n'), ((6065, 6081), 'numpy.sqrt', 'np.sqrt', (['y_val_2'], {}), '(y_val_2)\n', (6072, 6081), True, 'import numpy as np\n'), ((6251, 6267), 'numpy.sqrt', 'np.sqrt', (['y_val_2'], {}), '(y_val_2)\n', (6258, 6267), True, 'import numpy as np\n')] |
import pymc3 as pm
import matplotlib.pyplot as pl
from matplotlib import dates
import numpy as np
import pandas as pd
from . import ephem, MPLSTYLE, PACKAGEDIR
from .cobs import read_cobs
class CometModel():
def __init__(self, comet="2019 Y4", cobs_id=None, horizons_id=None,
start="2020-01-01", stop="2020-08-01"):
self.cobs_id = comet if cobs_id is None else cobs_id
self.horizons_id = comet if horizons_id is None else horizons_id
self.comet = comet
self.start = start
self.stop = stop
# Load the observations
self.obs = read_cobs(comet=self.cobs_id, start=self.start, stop=self.stop)
# Load the ephemeris
self.ephem = ephem.get_ephemeris(self.horizons_id, self.start, self.stop)
self.sun_distance_func = ephem.create_sun_distance_func(self.ephem)
self.earth_distance_func = ephem.create_earth_distance_func(self.ephem)
# Initialize
self.model = None
self.trace = None
def sample(self, draws=500, cores=4):
if self.model is None:
self.model = self.create_pymc_model()
with self.model:
trace = pm.sample(draws=draws, cores=cores)
self.trace = trace
return trace
def traceplot(self, var_names=('n', 'h', 'beta')):
return pm.traceplot(self.trace, var_names=var_names);
def get_parameter_summary(self, params=('n', 'h', 'beta')):
result = {'comet': self.comet}
for par in params:
partrace = self.trace.get_values(par)
result[f'{par}_mean'] = np.nanmean(partrace)
result[f'{par}_std'] = np.nanstd(partrace)
for percentile in [0.1, 16, 50, 84, 0.5, 99.9]:
result[f'{par}_{percentile:.1f}p'] = np.nanpercentile(partrace, percentile)
return result
def mean_model(self, times):
trace_n = self.trace.get_values('n')
trace_h = self.trace.get_values('h')
mean_h = np.nanmean(trace_h)
mean_n = np.nanmean(trace_n)
return comet_magnitude_power_law(h=mean_h,
delta=self.earth_distance_func(times),
n=mean_n,
r=self.sun_distance_func(times))
def plot(self, title=None, show_year=False, min_elongation=None, n_samples=400):
"""Plot the model alongside the observations.
Parameters
----------
n_samples : int
Number of sample draws to show to visualize the uncertainty.
"""
if title is None:
title = f"Light curve of Comet {self.comet}"
if self.trace is None:
self.trace = self.sample()
with pl.style.context(str(MPLSTYLE)):
ax = pl.subplot(111)
times = np.arange(self.ephem.date.values[0],
self.ephem.date.values[-1] + 1_000_000_000_000_000,
np.timedelta64(12, 'h'))
# Show the observations
pl.scatter(self.obs.data[self.obs.data.visual].time,
self.obs.data[self.obs.data.visual].magnitude,
marker='+', lw=0.7, s=40, label="Visual observations (COBS)",
c='#2980b9', alpha=0.8, zorder=50)
pl.scatter(self.obs.data[~self.obs.data.visual].time,
self.obs.data[~self.obs.data.visual].magnitude,
marker='x', lw=0.7, s=30, label="CCD observations (COBS)",
c='#c0392b', alpha=0.8, zorder=50)
# Show the model fit
trace_n = self.trace.get_values('n')
trace_h = self.trace.get_values('h')
mean_h = np.nanmean(trace_h)
mean_n = np.nanmean(trace_n)
# Show the uncertainty based on n_samples
if len(trace_n) < n_samples:
step = 1
else:
step = int(len(trace_n) / n_samples)
for idx in range(0, len(trace_n), step):
model = comet_magnitude_power_law(h=trace_h[idx],
delta=self.earth_distance_func(times),
n=trace_n[idx],
r=self.sun_distance_func(times))
pl.plot(times, model, c='black', lw=1, alpha=0.02, zorder=10)
model = comet_magnitude_power_law(h=mean_h,
delta=self.earth_distance_func(times),
n=mean_n,
r=self.sun_distance_func(times))
pl.plot(times, model, ls='dashed', c='black', lw=2,
label=f"Model (H={mean_h:.1f}; n={mean_n:.1f})", zorder=20)
# Show the elongation black-out
if min_elongation is not None:
d1 = self.ephem[self.ephem.elongation < min_elongation].date.min()
d2 = self.ephem[self.ephem.elongation < min_elongation].date.max()
ax.fill_between([d1, d2], -30, 30,
label=f'Elongation < {min_elongation}°',
hatch='//////', facecolor='None',
edgecolor='#bdc3c7', zorder=5)
if show_year:
ax.xaxis.set_major_formatter(dates.DateFormatter('%-d %b %Y'))
pl.xlabel("Date")
else:
ax.xaxis.set_major_formatter(dates.DateFormatter('%-d %b'))
pl.xlabel(f"Date ({self.start[:4]})")
ax.xaxis.set_minor_locator(dates.AutoDateLocator(minticks=50, maxticks=100))
labels = ax.get_xmajorticklabels()
pl.setp(labels, rotation=45)
labels = ax.get_xminorticklabels()
pl.setp(labels, rotation=45)
pl.ylim([int(np.max(model)+3), int(np.min(model-6))])
pl.xlim([np.datetime64(self.start), np.datetime64(self.stop)])
pl.ylabel("Magnitude")
pl.title(title)
handles, labels = ax.get_legend_handles_labels()
try:
pl.legend([handles[x] for x in (1, 2, 0, 3)],
[labels[x] for x in (1, 2, 0, 3)])
except IndexError:
pl.legend()
pl.tight_layout()
return ax
def create_pymc_model(self, min_observations=0, observer_bias=False):
"""Returns a PyMC3 model."""
dfobs = self.obs.data[self.obs.data.observations > min_observations]
with pm.Model() as model:
delta = self.earth_distance_func(dfobs.time.values)
r = self.sun_distance_func(dfobs.time.values)
n = pm.Normal('n', mu=3.49, sigma=1.36) # activity parameter
h = pm.Normal('h', mu=6.66, sigma=1.98) # absolute magnitude
model_mag = comet_magnitude_power_law(h=h, n=n, delta=delta, r=r)
if observer_bias == True:
observers = dfobs.observer.unique()
for obs in observers:
mask = np.array(dfobs.observer.values == obs)
beta = pm.HalfNormal('beta_'+obs, sigma=0.5)
bias = pm.Normal('bias_'+obs, mu=0., sigma=.5)
obsmag = pm.Cauchy('obsmag_'+obs,
alpha=model_mag[mask] + bias,
beta=beta,
observed=dfobs.magnitude[mask])
else:
beta = 0.47 + pm.HalfNormal('beta', sigma=0.02)
obsmag = pm.Cauchy('obsmag', alpha=model_mag,
beta=beta, observed=dfobs.magnitude)
self.model = model
return self.model
def comet_magnitude_power_law(h=10., n=4., delta=1., r=1.):
"""The conventional power-law formula to predict a comet's brightness.
Parameters
----------
h : float
Absolute magnitude.
n : float
Activity parameter.
delta : float
Comet's geocentric distance in AU.
r : float
Comet's heliocentric distance in AU.
"""
return h + 5*np.log10(delta) + 2.5*n*np.log10(r)
def fit_all_comets():
comets = pd.read_csv(PACKAGEDIR / "data/comets.csv")
result = []
for comet in comets.itertuples():
print(comet.comet)
model = CometModel(comet=comet.comet,
cobs_id=comet.cobs_id,
horizons_id=comet.horizons_id,
start=comet.start,
stop=comet.stop)
model.sample(draws=300)
ax = model.plot()
output_fn = f"output/{comet.cobs_id}.png"
print(f"Writing {output_fn}")
ax.figure.savefig(output_fn)
pl.close()
params = model.get_parameter_summary()
result.append(params)
df = pd.DataFrame(result)
print(f"Prior for n: mean={df.n_mean.mean():.2f}, std={df.n_mean.std():.2f}")
print(f"Prior for h: mean={df.h_mean.mean():.2f}, std={df.h_mean.std():.2f}")
print(f"Prior for beta: mean={df.beta_mean.mean():.2f}, std={df.beta_mean.std():.2f}")
return df
def fit_all_comets2():
result = []
df = read_cobs()
comet_counts = df[df.date > '1990'].comet.value_counts()
well_observed_comets = comet_counts[comet_counts > 300].index
well_observed_mask = df.comet.isin(well_observed_comets)
for comet in well_observed_comets:
print(comet)
model = CometModel(comet=comet.comet,
cobs_id=comet.cobs_id,
horizons_id=comet.horizons_id,
start=comet.start,
stop=comet.stop)
model.sample(draws=300)
ax = model.plot()
output_fn = f"output/{comet.cobs_id}.png"
print(f"Writing {output_fn}")
ax.figure.savefig(output_fn)
pl.close()
params = model.get_parameter_summary()
result.append(params)
df = pd.DataFrame(result)
print(f"Prior for n: mean={df.n_mean.mean():.2f}, std={df.n_mean.std():.2f}")
print(f"Prior for h: mean={df.h_mean.mean():.2f}, std={df.h_mean.std():.2f}")
print(f"Prior for beta: mean={df.beta_mean.mean():.2f}, std={df.beta_mean.std():.2f}")
return df
| [
"numpy.log10",
"numpy.nanpercentile",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"numpy.nanmean",
"numpy.array",
"pymc3.sample",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.max",
"matplotlib.pyplot.close",
"matplotlib.pyplot.scatter",
"numpy.datetime64",
"pandas.DataFr... | [((8380, 8423), 'pandas.read_csv', 'pd.read_csv', (["(PACKAGEDIR / 'data/comets.csv')"], {}), "(PACKAGEDIR / 'data/comets.csv')\n", (8391, 8423), True, 'import pandas as pd\n'), ((9033, 9053), 'pandas.DataFrame', 'pd.DataFrame', (['result'], {}), '(result)\n', (9045, 9053), True, 'import pandas as pd\n'), ((10161, 10181), 'pandas.DataFrame', 'pd.DataFrame', (['result'], {}), '(result)\n', (10173, 10181), True, 'import pandas as pd\n'), ((1333, 1378), 'pymc3.traceplot', 'pm.traceplot', (['self.trace'], {'var_names': 'var_names'}), '(self.trace, var_names=var_names)\n', (1345, 1378), True, 'import pymc3 as pm\n'), ((1988, 2007), 'numpy.nanmean', 'np.nanmean', (['trace_h'], {}), '(trace_h)\n', (1998, 2007), True, 'import numpy as np\n'), ((2025, 2044), 'numpy.nanmean', 'np.nanmean', (['trace_n'], {}), '(trace_n)\n', (2035, 2044), True, 'import numpy as np\n'), ((8936, 8946), 'matplotlib.pyplot.close', 'pl.close', ([], {}), '()\n', (8944, 8946), True, 'import matplotlib.pyplot as pl\n'), ((10064, 10074), 'matplotlib.pyplot.close', 'pl.close', ([], {}), '()\n', (10072, 10074), True, 'import matplotlib.pyplot as pl\n'), ((1178, 1213), 'pymc3.sample', 'pm.sample', ([], {'draws': 'draws', 'cores': 'cores'}), '(draws=draws, cores=cores)\n', (1187, 1213), True, 'import pymc3 as pm\n'), ((1597, 1617), 'numpy.nanmean', 'np.nanmean', (['partrace'], {}), '(partrace)\n', (1607, 1617), True, 'import numpy as np\n'), ((1653, 1672), 'numpy.nanstd', 'np.nanstd', (['partrace'], {}), '(partrace)\n', (1662, 1672), True, 'import numpy as np\n'), ((2807, 2822), 'matplotlib.pyplot.subplot', 'pl.subplot', (['(111)'], {}), '(111)\n', (2817, 2822), True, 'import matplotlib.pyplot as pl\n'), ((3077, 3283), 'matplotlib.pyplot.scatter', 'pl.scatter', (['self.obs.data[self.obs.data.visual].time', 'self.obs.data[self.obs.data.visual].magnitude'], {'marker': '"""+"""', 'lw': '(0.7)', 's': '(40)', 'label': '"""Visual observations (COBS)"""', 'c': '"""#2980b9"""', 'alpha': '(0.8)', 'zorder': '(50)'}), "(self.obs.data[self.obs.data.visual].time, self.obs.data[self.obs\n .data.visual].magnitude, marker='+', lw=0.7, s=40, label=\n 'Visual observations (COBS)', c='#2980b9', alpha=0.8, zorder=50)\n", (3087, 3283), True, 'import matplotlib.pyplot as pl\n'), ((3355, 3560), 'matplotlib.pyplot.scatter', 'pl.scatter', (['self.obs.data[~self.obs.data.visual].time', 'self.obs.data[~self.obs.data.visual].magnitude'], {'marker': '"""x"""', 'lw': '(0.7)', 's': '(30)', 'label': '"""CCD observations (COBS)"""', 'c': '"""#c0392b"""', 'alpha': '(0.8)', 'zorder': '(50)'}), "(self.obs.data[~self.obs.data.visual].time, self.obs.data[~self.\n obs.data.visual].magnitude, marker='x', lw=0.7, s=30, label=\n 'CCD observations (COBS)', c='#c0392b', alpha=0.8, zorder=50)\n", (3365, 3560), True, 'import matplotlib.pyplot as pl\n'), ((3773, 3792), 'numpy.nanmean', 'np.nanmean', (['trace_h'], {}), '(trace_h)\n', (3783, 3792), True, 'import numpy as np\n'), ((3814, 3833), 'numpy.nanmean', 'np.nanmean', (['trace_n'], {}), '(trace_n)\n', (3824, 3833), True, 'import numpy as np\n'), ((4754, 4870), 'matplotlib.pyplot.plot', 'pl.plot', (['times', 'model'], {'ls': '"""dashed"""', 'c': '"""black"""', 'lw': '(2)', 'label': 'f"""Model (H={mean_h:.1f}; n={mean_n:.1f})"""', 'zorder': '(20)'}), "(times, model, ls='dashed', c='black', lw=2, label=\n f'Model (H={mean_h:.1f}; n={mean_n:.1f})', zorder=20)\n", (4761, 4870), True, 'import matplotlib.pyplot as pl\n'), ((5846, 5874), 'matplotlib.pyplot.setp', 'pl.setp', (['labels'], {'rotation': '(45)'}), '(labels, rotation=45)\n', (5853, 5874), True, 'import matplotlib.pyplot as pl\n'), ((5935, 5963), 'matplotlib.pyplot.setp', 'pl.setp', (['labels'], {'rotation': '(45)'}), '(labels, rotation=45)\n', (5942, 5963), True, 'import matplotlib.pyplot as pl\n'), ((6118, 6140), 'matplotlib.pyplot.ylabel', 'pl.ylabel', (['"""Magnitude"""'], {}), "('Magnitude')\n", (6127, 6140), True, 'import matplotlib.pyplot as pl\n'), ((6153, 6168), 'matplotlib.pyplot.title', 'pl.title', (['title'], {}), '(title)\n', (6161, 6168), True, 'import matplotlib.pyplot as pl\n'), ((6439, 6456), 'matplotlib.pyplot.tight_layout', 'pl.tight_layout', ([], {}), '()\n', (6454, 6456), True, 'import matplotlib.pyplot as pl\n'), ((6686, 6696), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (6694, 6696), True, 'import pymc3 as pm\n'), ((6845, 6880), 'pymc3.Normal', 'pm.Normal', (['"""n"""'], {'mu': '(3.49)', 'sigma': '(1.36)'}), "('n', mu=3.49, sigma=1.36)\n", (6854, 6880), True, 'import pymc3 as pm\n'), ((6919, 6954), 'pymc3.Normal', 'pm.Normal', (['"""h"""'], {'mu': '(6.66)', 'sigma': '(1.98)'}), "('h', mu=6.66, sigma=1.98)\n", (6928, 6954), True, 'import pymc3 as pm\n'), ((8331, 8342), 'numpy.log10', 'np.log10', (['r'], {}), '(r)\n', (8339, 8342), True, 'import numpy as np\n'), ((1786, 1824), 'numpy.nanpercentile', 'np.nanpercentile', (['partrace', 'percentile'], {}), '(partrace, percentile)\n', (1802, 1824), True, 'import numpy as np\n'), ((3003, 3026), 'numpy.timedelta64', 'np.timedelta64', (['(12)', '"""h"""'], {}), "(12, 'h')\n", (3017, 3026), True, 'import numpy as np\n'), ((4399, 4460), 'matplotlib.pyplot.plot', 'pl.plot', (['times', 'model'], {'c': '"""black"""', 'lw': '(1)', 'alpha': '(0.02)', 'zorder': '(10)'}), "(times, model, c='black', lw=1, alpha=0.02, zorder=10)\n", (4406, 4460), True, 'import matplotlib.pyplot as pl\n'), ((5515, 5532), 'matplotlib.pyplot.xlabel', 'pl.xlabel', (['"""Date"""'], {}), "('Date')\n", (5524, 5532), True, 'import matplotlib.pyplot as pl\n'), ((5659, 5696), 'matplotlib.pyplot.xlabel', 'pl.xlabel', (['f"""Date ({self.start[:4]})"""'], {}), "(f'Date ({self.start[:4]})')\n", (5668, 5696), True, 'import matplotlib.pyplot as pl\n'), ((5737, 5785), 'matplotlib.dates.AutoDateLocator', 'dates.AutoDateLocator', ([], {'minticks': '(50)', 'maxticks': '(100)'}), '(minticks=50, maxticks=100)\n', (5758, 5785), False, 'from matplotlib import dates\n'), ((6263, 6348), 'matplotlib.pyplot.legend', 'pl.legend', (['[handles[x] for x in (1, 2, 0, 3)]', '[labels[x] for x in (1, 2, 0, 3)]'], {}), '([handles[x] for x in (1, 2, 0, 3)], [labels[x] for x in (1, 2, 0, 3)]\n )\n', (6272, 6348), True, 'import matplotlib.pyplot as pl\n'), ((7747, 7820), 'pymc3.Cauchy', 'pm.Cauchy', (['"""obsmag"""'], {'alpha': 'model_mag', 'beta': 'beta', 'observed': 'dfobs.magnitude'}), "('obsmag', alpha=model_mag, beta=beta, observed=dfobs.magnitude)\n", (7756, 7820), True, 'import pymc3 as pm\n'), ((8307, 8322), 'numpy.log10', 'np.log10', (['delta'], {}), '(delta)\n', (8315, 8322), True, 'import numpy as np\n'), ((5465, 5497), 'matplotlib.dates.DateFormatter', 'dates.DateFormatter', (['"""%-d %b %Y"""'], {}), "('%-d %b %Y')\n", (5484, 5497), False, 'from matplotlib import dates\n'), ((5612, 5641), 'matplotlib.dates.DateFormatter', 'dates.DateFormatter', (['"""%-d %b"""'], {}), "('%-d %b')\n", (5631, 5641), False, 'from matplotlib import dates\n'), ((6052, 6077), 'numpy.datetime64', 'np.datetime64', (['self.start'], {}), '(self.start)\n', (6065, 6077), True, 'import numpy as np\n'), ((6079, 6103), 'numpy.datetime64', 'np.datetime64', (['self.stop'], {}), '(self.stop)\n', (6092, 6103), True, 'import numpy as np\n'), ((6415, 6426), 'matplotlib.pyplot.legend', 'pl.legend', ([], {}), '()\n', (6424, 6426), True, 'import matplotlib.pyplot as pl\n'), ((7224, 7262), 'numpy.array', 'np.array', (['(dfobs.observer.values == obs)'], {}), '(dfobs.observer.values == obs)\n', (7232, 7262), True, 'import numpy as np\n'), ((7291, 7330), 'pymc3.HalfNormal', 'pm.HalfNormal', (["('beta_' + obs)"], {'sigma': '(0.5)'}), "('beta_' + obs, sigma=0.5)\n", (7304, 7330), True, 'import pymc3 as pm\n'), ((7356, 7399), 'pymc3.Normal', 'pm.Normal', (["('bias_' + obs)"], {'mu': '(0.0)', 'sigma': '(0.5)'}), "('bias_' + obs, mu=0.0, sigma=0.5)\n", (7365, 7399), True, 'import pymc3 as pm\n'), ((7425, 7528), 'pymc3.Cauchy', 'pm.Cauchy', (["('obsmag_' + obs)"], {'alpha': '(model_mag[mask] + bias)', 'beta': 'beta', 'observed': 'dfobs.magnitude[mask]'}), "('obsmag_' + obs, alpha=model_mag[mask] + bias, beta=beta,\n observed=dfobs.magnitude[mask])\n", (7434, 7528), True, 'import pymc3 as pm\n'), ((7688, 7721), 'pymc3.HalfNormal', 'pm.HalfNormal', (['"""beta"""'], {'sigma': '(0.02)'}), "('beta', sigma=0.02)\n", (7701, 7721), True, 'import pymc3 as pm\n'), ((6012, 6029), 'numpy.min', 'np.min', (['(model - 6)'], {}), '(model - 6)\n', (6018, 6029), True, 'import numpy as np\n'), ((5990, 6003), 'numpy.max', 'np.max', (['model'], {}), '(model)\n', (5996, 6003), True, 'import numpy as np\n')] |
"""
Serve as a convenient wrapper for validate.py.
"""
import gc
import os
import timeit
import IPython
import torch
import numpy as np
import scipy.misc as misc
import yaml
from torch.utils import data
from ptsemseg.loader import get_loader
from ptsemseg.utils import get_logger
from utils import test_parser
from validate import validate, load_model_and_preprocess
from validate_from_file import load_complete_info_from_dir, final_run_dirs
try:
import pydensecrf.densecrf as dcrf
except:
print(
"Failed to import pydensecrf,\
CRF post-processing will not work"
)
ROOT = '/cvlabdata2/home/kaicheng/pycharm/pytorch-semseg'
M_ROOT = "runs/"
os.chdir(ROOT)
def _save_output(img_path_target, outputs, resized_img, loader, cfg):
"""
Better code reusing.
:param outputs:
:param resized_img:
:return:
"""
if args.dcrf:
unary = outputs.data.cpu().numpy()
unary = np.squeeze(unary, 0)
unary = -np.log(unary)
unary = unary.transpose(2, 1, 0)
w, h, c = unary.shape
unary = unary.transpose(2, 0, 1).reshape(loader.n_classes, -1)
unary = np.ascontiguousarray(unary)
resized_img = np.ascontiguousarray(resized_img)
d = dcrf.DenseCRF2D(w, h, loader.n_classes)
d.setUnaryEnergy(unary)
d.addPairwiseBilateral(sxy=5, srgb=3, rgbim=resized_img, compat=1)
q = d.inference(50)
mask = np.argmax(q, axis=0).reshape(w, h).transpose(1, 0)
decoded_crf = loader.decode_segmap(np.array(mask, dtype=np.uint8))
dcrf_path = os.path.splitext(img_path_target)[0] + "_drf.png"
misc.imsave(dcrf_path, decoded_crf)
print("Dense CRF Processed Mask Saved at: {}".format(dcrf_path))
if isinstance(outputs, torch.Tensor):
pred = np.squeeze(outputs.data.max(1)[1].cpu().numpy(), axis=0)
else:
pred = np.squeeze(outputs, axis=0)
if cfg['model']['arch'] in ["pspnet", "icnet", "icnetBN"]:
pred = pred.astype(np.float32)
# float32 with F mode, resize back to orig_size
pred = misc.imresize(pred, cfg['data']['orig_size'], "nearest", mode="F")
if cfg['data']['dataset'] == 'drive':
pred = misc.imresize(pred, (584, 565), "nearest", mode="F")
decoded = pred
print("Classes found: ", np.unique(pred))
misc.imsave(img_path_target, decoded)
print("Segmentation Mask Saved at: {}".format(img_path_target))
def output_masks_to_files(outputs, loader, resized_img, img_name, output_dir, args, cfg):
"""
Save the prediction in ORIGINAL image size. to output_dir
:param outputs:
:param loader:
:param resized_img:
:param img_name: img_name
:param args:
:param cfg:
:return:
"""
# img_name = os.path.splitext(os.path.basename(img_path))[0]
img_name = img_name.replace('/', '-')
out_path = output_dir
if args.is_recurrent:
for step, output in enumerate(outputs):
img_path_target = os.path.join(out_path, img_name + '_step{}.png'.format(step + 1))
_save_output(img_path_target, output, resized_img, loader, cfg)
else:
img_path_target = os.path.join(out_path, img_name + '.png')
_save_output(img_path_target, outputs, resized_img, loader, cfg)
def _evaluate_from_model(model, images, args, cfg, n_classes, device):
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if args.eval_flip:
# Flip images in numpy (not support in tensor)
flipped_images = np.copy(images.data.cpu().numpy()[:, :, :, ::-1])
flipped_images = torch.from_numpy(flipped_images).float().to(device)
if cfg['model']['arch'] in ['reclast']:
h0 = torch.ones([images.shape[0], args.hidden_size, images.shape[2], images.shape[3]],
dtype=torch.float32, device=device)
outputs = model(images, h0)
outputs_flipped = model(flipped_images, h0)
elif cfg['model']['arch'] in ['recmid']:
W, H = images.shape[2], images.shape[3]
w = int(np.floor(np.floor(np.floor(W / 2) / 2) / 2) / 2)
h = int(np.floor(np.floor(np.floor(H / 2) / 2) / 2) / 2)
h0 = torch.ones([images.shape[0], args.hidden_size, w, h],
dtype=torch.float32, device=device)
outputs = model(images, h0)
outputs_flipped = model(flipped_images, h0)
elif cfg['model']['arch'] in ['dru']:
W, H = images.shape[2], images.shape[3]
w = int(np.floor(np.floor(np.floor(W / 2) / 2) / 2) / 2)
h = int(np.floor(np.floor(np.floor(H / 2) / 2) / 2) / 2)
h0 = torch.ones([images.shape[0], args.hidden_size, w, h],
dtype=torch.float32, device=device)
s0 = torch.ones([images.shape[0], n_classes, W, H],
dtype=torch.float32, device=device)
outputs = model(images, h0, s0)
outputs_flipped = model(flipped_images, h0, s0)
elif cfg['model']['arch'] in ['druvgg16', 'druresnet50']:
W, H = images.shape[2], images.shape[3]
w, h = int(W / 2 ** 4), int(H / 2 ** 4)
if cfg['model']['arch'] in ['druresnet50']:
w, h = int(W / 2 ** 5), int(H / 2 ** 5)
h0 = torch.ones([images.shape[0], args.hidden_size, w, h],
dtype=torch.float32, device=device)
s0 = torch.zeros([images.shape[0], n_classes, W, H],
dtype=torch.float32, device=device)
outputs = model(images, h0, s0)
outputs_flipped = model(flipped_images, h0, s0)
else:
outputs = model(images)
outputs_flipped = model(flipped_images)
if type(outputs) is list:
outputs_list = [output.data.cpu().numpy() for output in outputs]
outputs_flipped_list = [output_flipped.data.cpu().numpy() for output_flipped in outputs_flipped]
outputs_list = [(outputs + outputs_flipped[:, :, :, ::-1]) / 2.0 for
outputs, outputs_flipped in zip(outputs_list, outputs_flipped_list)]
pred = [np.argmax(outputs, axis=1) for outputs in outputs_list]
else:
outputs = outputs.data.cpu().numpy()
outputs_flipped = outputs_flipped.data.cpu().numpy()
outputs = (outputs + outputs_flipped[:, :, :, ::-1]) / 2.0
pred = np.argmax(outputs, axis=1)
else:
if cfg['model']['arch'] in ['reclast']:
h0 = torch.ones([images.shape[0], args.hidden_size, images.shape[2], images.shape[3]],
dtype=torch.float32, device=device)
outputs = model(images, h0)
elif cfg['model']['arch'] in ['recmid']:
W, H = images.shape[2], images.shape[3]
w = int(np.floor(np.floor(np.floor(W / 2) / 2) / 2) / 2)
h = int(np.floor(np.floor(np.floor(H / 2) / 2) / 2) / 2)
h0 = torch.ones([images.shape[0], args.hidden_size, w, h],
dtype=torch.float32, device=device)
outputs = model(images, h0)
elif cfg['model']['arch'] in ['dru']:
W, H = images.shape[2], images.shape[3]
w = int(np.floor(np.floor(np.floor(W / 2) / 2) / 2) / 2)
h = int(np.floor(np.floor(np.floor(H / 2) / 2) / 2) / 2)
h0 = torch.ones([images.shape[0], args.hidden_size, w, h],
dtype=torch.float32, device=device)
s0 = torch.ones([images.shape[0], n_classes, W, H],
dtype=torch.float32, device=device)
outputs = model(images, h0, s0)
elif cfg['model']['arch'] in ['druvgg16', 'druresnet50']:
W, H = images.shape[2], images.shape[3]
w, h = int(W / 2 ** 4), int(H / 2 ** 4)
if cfg['model']['arch'] in ['druresnet50']:
w, h = int(W / 2 ** 5), int(H / 2 ** 5)
h0 = torch.ones([images.shape[0], args.hidden_size, w, h],
dtype=torch.float32, device=device)
s0 = torch.zeros([images.shape[0], n_classes, W, H],
dtype=torch.float32, device=device)
outputs = model(images, h0, s0)
else:
outputs = model(images)
outputs_list = [output.data.cpu().numpy() for output in outputs]
if len(outputs_list)>1:
outputs_list = [output.data.cpu().numpy() for output in outputs]
pred = [np.argmax(outputs, axis=1) for outputs in outputs_list]
else:
outputs = outputs.data.cpu().numpy()
pred = np.argmax(outputs, axis=1)
# if args.eval_flip:
# outputs = model(images)
# # Flip images in numpy (not support in tensor)
# flipped_images = np.copy(images.data.cpu().numpy()[:, :, :, ::-1])
# flipped_images = torch.from_numpy(flipped_images).float().to(args.device)
# outputs_flipped = model(flipped_images)
#
# if args.is_recurrent:
# outputs_list = [output.data.cpu().numpy() for output in outputs]
# outputs_flipped_list = [output_flipped.data.cpu().numpy() for output_flipped in outputs_flipped]
# outputs_list = [(outputs + outputs_flipped[:, :, :, ::-1]) / 2.0 for
# outputs, outputs_flipped in zip(outputs_list, outputs_flipped_list)]
# pred = [np.argmax(outputs, axis=1) for outputs in outputs_list]
# else:
# outputs = outputs.data.cpu().numpy()
# outputs_flipped = outputs_flipped.data.cpu().numpy()
# outputs = (outputs + outputs_flipped[:, :, :, ::-1]) / 2.0
# pred = np.argmax(outputs, axis=1)
# else:
# outputs = model(images)
# if args.is_recurrent:
# pred = [output.data.max(1)[1].cpu().numpy() for output in outputs]
# else:
# pred = outputs.data.max(1)[1].cpu().numpy()
return pred
def test_with_cfg(cfg, args):
logger = get_logger(cfg['logdir'], 'test')
device = torch.device(args.device)
out_path = cfg['eval_out_path']
if not os.path.exists(out_path):
os.makedirs(out_path)
# Setup image
valid_images = [".jpg", ".gif", ".png", ".tga", ".tif", ".tiff"]
data_loader = get_loader(cfg['data']['dataset'])
data_path = cfg['data']['path']
print("Read Input Image from : {}".format(data_path))
print(f"Save the output to : {cfg['eval_out_path']}")
loader = data_loader(
data_path,
is_transform=True, # Return the original image without any augmentation.
split=cfg['data']['test_split'],
img_size=(cfg['data']['img_rows'],
cfg['data']['img_cols']),
)
im_loader = data_loader(
data_path,
is_transform=False, # Return the original image without any augmentation.
split=cfg['data']['test_split'],
img_size=(cfg['data']['img_rows'],
cfg['data']['img_cols']),
)
testloader = data.DataLoader(
loader,
batch_size=1,
num_workers=2
)
roi_only = 'roi' in cfg['data']['dataset']
n_classes = loader.n_classes
# Setup Model
model, model_path = load_model_and_preprocess(cfg, args, n_classes, device)
logger.info(f"Loading model {cfg['model']['arch']} from {model_path}")
# model_file_name = os.path.split(args.model_path)[1]
# model_name = cfg['model']['arch']
# flag_subf = False
# Replace the entire loader, like doing the validation.
# IPython.embed()
with torch.no_grad():
# For all the images in this loader.
for i, (images, labels) in enumerate(testloader):
# TODO DEBUGING here.
# if i > 2:
# break
(orig_img, org_lab) = im_loader[i]
img_name = loader.files[loader.split][i]
if type(img_name) is list:
img_name = img_name[0]
start_time = timeit.default_timer()
images = images.to(device)
n_classes = loader.n_classes
pred = _evaluate_from_model(model, images, args, cfg, n_classes, device)
gt = labels.numpy()
# CHeck the org_lab == labels
# IPython.embed()
if roi_only:
""" Process for ROI, basically, mask the Pred based on GT"""
# IPython.embed()
# if args.is_recurrent:
if type(pred) is list:
for k in range(len(pred)):
pred[k] = np.where(gt == loader.void_classes, loader.void_classes, pred[k])
if cfg['data']['dataset'] == 'drive':
pred[k] = pred[k] + 1
pred[k][gt == 250] = 2
pred[k][pred[k] == 2] = 0
else:
pred = np.where(gt == loader.void_classes, loader.void_classes, pred)
if cfg['data']['dataset'] == 'drive':
pred = pred + 1
pred[gt == 250] = 2
pred[pred == 2] = 0
if type(pred) is list:
for k in range(len(pred)):
pred[k] = np.where(gt == loader.void_classes, loader.void_classes, pred[k])
if cfg['data']['dataset'] == 'drive':
pred[k] = pred[k] + 1
pred[k][gt == 250] = 2
pred[k][pred[k] == 2] = 0
else:
pred = np.where(gt == loader.void_classes, loader.void_classes, pred)
if cfg['data']['dataset'] == 'drive':
pred = pred + 1
pred[gt == 250] = 2
pred[pred == 2] = 0
output_masks_to_files(pred, loader, orig_img, img_name, out_path, args, cfg)
# Other unrelated stuff
if args.measure_time:
elapsed_time = timeit.default_timer() - start_time
if (i + 1) % 50 == 0:
print(
"Inference time \
(iter {0:5d}): {1:3.5f} fps".format(
i + 1, pred[-1].shape[0] / elapsed_time
)
)
def run_test(args, run_dirs):
"""
run multiple validation here.
"""
for i, r_dir in enumerate(run_dirs):
cfg, args = load_complete_info_from_dir(r_dir, args)
# IPython.embed()
test_with_cfg(cfg, args)
cfg = None
gc.collect()
if __name__ == '__main__':
parser = test_parser()
args = parser.parse_args()
run_dirs = final_run_dirs(args)
del args.dataset
run_test(args, run_dirs)
| [
"numpy.log",
"torch.from_numpy",
"numpy.ascontiguousarray",
"numpy.array",
"scipy.misc.imresize",
"ptsemseg.loader.get_loader",
"pydensecrf.densecrf.DenseCRF2D",
"os.path.exists",
"utils.test_parser",
"numpy.where",
"ptsemseg.utils.get_logger",
"scipy.misc.imsave",
"validate_from_file.final_... | [((679, 693), 'os.chdir', 'os.chdir', (['ROOT'], {}), '(ROOT)\n', (687, 693), False, 'import os\n'), ((2340, 2377), 'scipy.misc.imsave', 'misc.imsave', (['img_path_target', 'decoded'], {}), '(img_path_target, decoded)\n', (2351, 2377), True, 'import scipy.misc as misc\n'), ((10122, 10155), 'ptsemseg.utils.get_logger', 'get_logger', (["cfg['logdir']", '"""test"""'], {}), "(cfg['logdir'], 'test')\n", (10132, 10155), False, 'from ptsemseg.utils import get_logger\n'), ((10169, 10194), 'torch.device', 'torch.device', (['args.device'], {}), '(args.device)\n', (10181, 10194), False, 'import torch\n'), ((10406, 10440), 'ptsemseg.loader.get_loader', 'get_loader', (["cfg['data']['dataset']"], {}), "(cfg['data']['dataset'])\n", (10416, 10440), False, 'from ptsemseg.loader import get_loader\n'), ((11137, 11189), 'torch.utils.data.DataLoader', 'data.DataLoader', (['loader'], {'batch_size': '(1)', 'num_workers': '(2)'}), '(loader, batch_size=1, num_workers=2)\n', (11152, 11189), False, 'from torch.utils import data\n'), ((11345, 11400), 'validate.load_model_and_preprocess', 'load_model_and_preprocess', (['cfg', 'args', 'n_classes', 'device'], {}), '(cfg, args, n_classes, device)\n', (11370, 11400), False, 'from validate import validate, load_model_and_preprocess\n'), ((14758, 14771), 'utils.test_parser', 'test_parser', ([], {}), '()\n', (14769, 14771), False, 'from utils import test_parser\n'), ((14818, 14838), 'validate_from_file.final_run_dirs', 'final_run_dirs', (['args'], {}), '(args)\n', (14832, 14838), False, 'from validate_from_file import load_complete_info_from_dir, final_run_dirs\n'), ((941, 961), 'numpy.squeeze', 'np.squeeze', (['unary', '(0)'], {}), '(unary, 0)\n', (951, 961), True, 'import numpy as np\n'), ((1151, 1178), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['unary'], {}), '(unary)\n', (1171, 1178), True, 'import numpy as np\n'), ((1202, 1235), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['resized_img'], {}), '(resized_img)\n', (1222, 1235), True, 'import numpy as np\n'), ((1249, 1288), 'pydensecrf.densecrf.DenseCRF2D', 'dcrf.DenseCRF2D', (['w', 'h', 'loader.n_classes'], {}), '(w, h, loader.n_classes)\n', (1264, 1288), True, 'import pydensecrf.densecrf as dcrf\n'), ((1644, 1679), 'scipy.misc.imsave', 'misc.imsave', (['dcrf_path', 'decoded_crf'], {}), '(dcrf_path, decoded_crf)\n', (1655, 1679), True, 'import scipy.misc as misc\n'), ((1893, 1920), 'numpy.squeeze', 'np.squeeze', (['outputs'], {'axis': '(0)'}), '(outputs, axis=0)\n', (1903, 1920), True, 'import numpy as np\n'), ((2094, 2160), 'scipy.misc.imresize', 'misc.imresize', (['pred', "cfg['data']['orig_size']", '"""nearest"""'], {'mode': '"""F"""'}), "(pred, cfg['data']['orig_size'], 'nearest', mode='F')\n", (2107, 2160), True, 'import scipy.misc as misc\n'), ((2218, 2270), 'scipy.misc.imresize', 'misc.imresize', (['pred', '(584, 565)', '"""nearest"""'], {'mode': '"""F"""'}), "(pred, (584, 565), 'nearest', mode='F')\n", (2231, 2270), True, 'import scipy.misc as misc\n'), ((2319, 2334), 'numpy.unique', 'np.unique', (['pred'], {}), '(pred)\n', (2328, 2334), True, 'import numpy as np\n'), ((3171, 3212), 'os.path.join', 'os.path.join', (['out_path', "(img_name + '.png')"], {}), "(out_path, img_name + '.png')\n", (3183, 3212), False, 'import os\n'), ((10243, 10267), 'os.path.exists', 'os.path.exists', (['out_path'], {}), '(out_path)\n', (10257, 10267), False, 'import os\n'), ((10277, 10298), 'os.makedirs', 'os.makedirs', (['out_path'], {}), '(out_path)\n', (10288, 10298), False, 'import os\n'), ((11691, 11706), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11704, 11706), False, 'import torch\n'), ((14576, 14616), 'validate_from_file.load_complete_info_from_dir', 'load_complete_info_from_dir', (['r_dir', 'args'], {}), '(r_dir, args)\n', (14603, 14616), False, 'from validate_from_file import load_complete_info_from_dir, final_run_dirs\n'), ((14703, 14715), 'gc.collect', 'gc.collect', ([], {}), '()\n', (14713, 14715), False, 'import gc\n'), ((979, 992), 'numpy.log', 'np.log', (['unary'], {}), '(unary)\n', (985, 992), True, 'import numpy as np\n'), ((1534, 1564), 'numpy.array', 'np.array', (['mask'], {'dtype': 'np.uint8'}), '(mask, dtype=np.uint8)\n', (1542, 1564), True, 'import numpy as np\n'), ((3731, 3853), 'torch.ones', 'torch.ones', (['[images.shape[0], args.hidden_size, images.shape[2], images.shape[3]]'], {'dtype': 'torch.float32', 'device': 'device'}), '([images.shape[0], args.hidden_size, images.shape[2], images.\n shape[3]], dtype=torch.float32, device=device)\n', (3741, 3853), False, 'import torch\n'), ((6496, 6522), 'numpy.argmax', 'np.argmax', (['outputs'], {'axis': '(1)'}), '(outputs, axis=1)\n', (6505, 6522), True, 'import numpy as np\n'), ((6599, 6721), 'torch.ones', 'torch.ones', (['[images.shape[0], args.hidden_size, images.shape[2], images.shape[3]]'], {'dtype': 'torch.float32', 'device': 'device'}), '([images.shape[0], args.hidden_size, images.shape[2], images.\n shape[3]], dtype=torch.float32, device=device)\n', (6609, 6721), False, 'import torch\n'), ((8728, 8754), 'numpy.argmax', 'np.argmax', (['outputs'], {'axis': '(1)'}), '(outputs, axis=1)\n', (8737, 8754), True, 'import numpy as np\n'), ((12097, 12119), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (12117, 12119), False, 'import timeit\n'), ((1586, 1619), 'os.path.splitext', 'os.path.splitext', (['img_path_target'], {}), '(img_path_target)\n', (1602, 1619), False, 'import os\n'), ((4230, 4323), 'torch.ones', 'torch.ones', (['[images.shape[0], args.hidden_size, w, h]'], {'dtype': 'torch.float32', 'device': 'device'}), '([images.shape[0], args.hidden_size, w, h], dtype=torch.float32,\n device=device)\n', (4240, 4323), False, 'import torch\n'), ((6222, 6248), 'numpy.argmax', 'np.argmax', (['outputs'], {'axis': '(1)'}), '(outputs, axis=1)\n', (6231, 6248), True, 'import numpy as np\n'), ((7042, 7135), 'torch.ones', 'torch.ones', (['[images.shape[0], args.hidden_size, w, h]'], {'dtype': 'torch.float32', 'device': 'device'}), '([images.shape[0], args.hidden_size, w, h], dtype=torch.float32,\n device=device)\n', (7052, 7135), False, 'import torch\n'), ((8590, 8616), 'numpy.argmax', 'np.argmax', (['outputs'], {'axis': '(1)'}), '(outputs, axis=1)\n', (8599, 8616), True, 'import numpy as np\n'), ((13685, 13747), 'numpy.where', 'np.where', (['(gt == loader.void_classes)', 'loader.void_classes', 'pred'], {}), '(gt == loader.void_classes, loader.void_classes, pred)\n', (13693, 13747), True, 'import numpy as np\n'), ((4698, 4791), 'torch.ones', 'torch.ones', (['[images.shape[0], args.hidden_size, w, h]'], {'dtype': 'torch.float32', 'device': 'device'}), '([images.shape[0], args.hidden_size, w, h], dtype=torch.float32,\n device=device)\n', (4708, 4791), False, 'import torch\n'), ((4833, 4920), 'torch.ones', 'torch.ones', (['[images.shape[0], n_classes, W, H]'], {'dtype': 'torch.float32', 'device': 'device'}), '([images.shape[0], n_classes, W, H], dtype=torch.float32, device=\n device)\n', (4843, 4920), False, 'import torch\n'), ((7454, 7547), 'torch.ones', 'torch.ones', (['[images.shape[0], args.hidden_size, w, h]'], {'dtype': 'torch.float32', 'device': 'device'}), '([images.shape[0], args.hidden_size, w, h], dtype=torch.float32,\n device=device)\n', (7464, 7547), False, 'import torch\n'), ((7589, 7676), 'torch.ones', 'torch.ones', (['[images.shape[0], n_classes, W, H]'], {'dtype': 'torch.float32', 'device': 'device'}), '([images.shape[0], n_classes, W, H], dtype=torch.float32, device=\n device)\n', (7599, 7676), False, 'import torch\n'), ((13019, 13081), 'numpy.where', 'np.where', (['(gt == loader.void_classes)', 'loader.void_classes', 'pred'], {}), '(gt == loader.void_classes, loader.void_classes, pred)\n', (13027, 13081), True, 'import numpy as np\n'), ((13377, 13442), 'numpy.where', 'np.where', (['(gt == loader.void_classes)', 'loader.void_classes', 'pred[k]'], {}), '(gt == loader.void_classes, loader.void_classes, pred[k])\n', (13385, 13442), True, 'import numpy as np\n'), ((14110, 14132), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (14130, 14132), False, 'import timeit\n'), ((1440, 1460), 'numpy.argmax', 'np.argmax', (['q'], {'axis': '(0)'}), '(q, axis=0)\n', (1449, 1460), True, 'import numpy as np\n'), ((3613, 3645), 'torch.from_numpy', 'torch.from_numpy', (['flipped_images'], {}), '(flipped_images)\n', (3629, 3645), False, 'import torch\n'), ((5348, 5441), 'torch.ones', 'torch.ones', (['[images.shape[0], args.hidden_size, w, h]'], {'dtype': 'torch.float32', 'device': 'device'}), '([images.shape[0], args.hidden_size, w, h], dtype=torch.float32,\n device=device)\n', (5358, 5441), False, 'import torch\n'), ((5483, 5571), 'torch.zeros', 'torch.zeros', (['[images.shape[0], n_classes, W, H]'], {'dtype': 'torch.float32', 'device': 'device'}), '([images.shape[0], n_classes, W, H], dtype=torch.float32, device\n =device)\n', (5494, 5571), False, 'import torch\n'), ((8044, 8137), 'torch.ones', 'torch.ones', (['[images.shape[0], args.hidden_size, w, h]'], {'dtype': 'torch.float32', 'device': 'device'}), '([images.shape[0], args.hidden_size, w, h], dtype=torch.float32,\n device=device)\n', (8054, 8137), False, 'import torch\n'), ((8179, 8267), 'torch.zeros', 'torch.zeros', (['[images.shape[0], n_classes, W, H]'], {'dtype': 'torch.float32', 'device': 'device'}), '([images.shape[0], n_classes, W, H], dtype=torch.float32, device\n =device)\n', (8190, 8267), False, 'import torch\n'), ((12687, 12752), 'numpy.where', 'np.where', (['(gt == loader.void_classes)', 'loader.void_classes', 'pred[k]'], {}), '(gt == loader.void_classes, loader.void_classes, pred[k])\n', (12695, 12752), True, 'import numpy as np\n'), ((4113, 4128), 'numpy.floor', 'np.floor', (['(W / 2)'], {}), '(W / 2)\n', (4121, 4128), True, 'import numpy as np\n'), ((4182, 4197), 'numpy.floor', 'np.floor', (['(H / 2)'], {}), '(H / 2)\n', (4190, 4197), True, 'import numpy as np\n'), ((6925, 6940), 'numpy.floor', 'np.floor', (['(W / 2)'], {}), '(W / 2)\n', (6933, 6940), True, 'import numpy as np\n'), ((6994, 7009), 'numpy.floor', 'np.floor', (['(H / 2)'], {}), '(H / 2)\n', (7002, 7009), True, 'import numpy as np\n'), ((4581, 4596), 'numpy.floor', 'np.floor', (['(W / 2)'], {}), '(W / 2)\n', (4589, 4596), True, 'import numpy as np\n'), ((4650, 4665), 'numpy.floor', 'np.floor', (['(H / 2)'], {}), '(H / 2)\n', (4658, 4665), True, 'import numpy as np\n'), ((7337, 7352), 'numpy.floor', 'np.floor', (['(W / 2)'], {}), '(W / 2)\n', (7345, 7352), True, 'import numpy as np\n'), ((7406, 7421), 'numpy.floor', 'np.floor', (['(H / 2)'], {}), '(H / 2)\n', (7414, 7421), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
@author: <NAME> <<EMAIL>>
Date: Oct 2017
"""
import numba as nb
from numba import jit, njit, float64, int32
import numpy as np
nb.NUMBA_DISABLE_JIT = 0
GLOB_NOGIL = True
GLOB_PARALLEL = True
# %% Simulation environment processing
# f.i. port and part flow processing, material properties etc..
# %%% Simulation Env. port updating
@jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def upd_p_arr(ports_all, port_ids, values, _port_own_idx):
"""
Updates the array which stores the values of all ports. This only updates
the port values of a single part per call!
"""
# get values from part result arrays at ports and pass them to the model
# environment ports array (using flattened array is about 6% slower than
# direct indexing, but allows 1D and 2D indexing):
for i in range(_port_own_idx.shape[0]):
ports_all[port_ids[i]] = values.flat[_port_own_idx[i]]
@njit(nogil=GLOB_NOGIL, cache=True)
def _upddate_ports_interm(ports_all, trgt_indices, ports_src, source=0):
"""
This function updates the array which stores the port values of all parts
with the intermediate result values of the current step stored in
`ports_src`. If more than one intermediate step is calculated during the
solver run, these can be update by passing the number of the intermediate
result to `source=X`, where X is an integer value starting with 0 for the
first intermediate step.
"""
values = ports_src[source]
i = 0
for val in values:
ports_all[trgt_indices[i]] = val[0]
i += 1
@njit(nogil=GLOB_NOGIL, cache=True)
def _upddate_ports_result(
ports_all, trgt_indices, ports_src, stepnum, src_list
):
"""
This function updates the array which stores the port values of all parts
with the final result values of the current step stored in `ports_src`.
"""
i = 0
for val in ports_src:
ports_all[trgt_indices[i]] = val[
(stepnum * src_list[i][0]) + src_list[i][1]
]
i += 1
@njit(nogil=GLOB_NOGIL, cache=True) # parallel=GLOB_PARALLEL useful
def _port_values_to_idx(ports_all, port_link_idx, port_own_idx, out):
"""
Values of requested ports are saved to a non-contiguous array (port values
are only stored at specific locations).
"""
for i in range(port_link_idx.size):
out.flat[port_own_idx[i]] = ports_all[port_link_idx[i]]
@nb.njit(nogil=GLOB_NOGIL, cache=True) # parallel=GLOB_PARALLEL useful
def _port_values_to_cont(ports_all, port_link_idx, out):
"""
Values of requested ports are saved to a contiguous array.
"""
for i in range(port_link_idx.size):
out.flat[i] = ports_all[port_link_idx[i]]
# %%% Simulation Env. in-part flow processing:
@njit(nogil=GLOB_NOGIL, cache=True) # parallel=GLOB_PARALLEL useful
def _process_flow_invar(
process_flows, dm_io, dm_top, dm_bot, dm_port, stepnum, res_dm
):
"""
Process massflows.
Massflows are being processed for parts where the massflow is defined as
invariant.
"""
if process_flows[0]:
if dm_io[0] >= 0.0: # massflow from the top
# get massflow though top cell-cell border:
dm_top[1:] = dm_io[0]
dm_bot[:-1] = 0.0 # set massflow from the bottom to zero
else: # massflow from the bottom:
# get massflow though bottom cell-cell border (and *-1!):
dm_bot[:-1] = -dm_io[0]
dm_top[1:] = 0.0 # set massflow from the top to zero
# get ports massflow (only for the positive inflow):
if dm_io[0] >= 0.0:
dm_port[0] = dm_io[0]
dm_port[-1] = 0.0
else:
dm_port[-1] = -dm_io[0]
dm_port[0] = 0.0
res_dm[stepnum[0]] = dm_io[0]
# return process flows bool to disable processing flows until next step
return False
@njit(nogil=GLOB_NOGIL, cache=True) # parallel=GLOB_PARALLEL useful
def _process_flow_invar_fast(
process_flows, dm_io, dm_top, dm_bot, stepnum, res_dm
):
"""
Process massflows for parts with invariant flows.
Massflows are being processed for parts where the massflow is defined as
invariant.
"""
if process_flows[0]:
# preallocate arrays which need assignment:
dm_port = np.empty(2)
if dm_io[0] >= 0.0: # massflow from the top
# get massflow though top cell-cell border:
dm_top[1:] = dm_io[0]
dm_bot[:-1] = 0.0 # set massflow from the bottom to zero
else: # massflow from the bottom:
# get massflow though bottom cell-cell border (and *-1!):
dm_bot[:-1] = -dm_io[0]
dm_top[1:] = 0.0 # set massflow from the top to zero
# get ports massflow (only for the positive inflow):
if dm_io[0] >= 0.0:
dm_port[0] = dm_io[0]
dm_port[-1] = 0.0
else:
dm_port[-1] = -dm_io[0]
dm_port[0] = 0.0
res_dm[stepnum[0]] = dm_io[0]
# return process flows bool to disable processing flows until next step
return False, dm_port
@njit(nogil=GLOB_NOGIL, cache=True) # parallel=GLOB_PARALLEL useful
def _process_flow_var(
process_flows,
dm_io,
dm,
dm_top,
dm_bot,
dm_port,
port_own_idx,
stepnum,
res_dm,
):
"""
Process massflows for parts with variant flows.
Massflows are being processed for parts where the massflow is defined as
variant.
"""
if process_flows[0]:
# massflow through ports is aquired by update_FlowNet
# get massflow through each cell (sum up in/out dm of ports and then
# run cumulative sum over all cells)
# copy I/O flows to NOT alter the I/O array during calculations:
# this is the procedure for collapsed and flattened dm_io.
dm[:] = 0.0
cs = np.cumsum(dm_io)
for i in range(port_own_idx.size - 1):
dm[port_own_idx[i] : port_own_idx[i + 1]] += cs[i]
# get port values
dm_port[:] = dm_io
# get massflow though top cell-cell border:
dm_top[1:] = dm[:-1]
# get massflow though bottom cell-cell border (and *-1!):
dm_bot[:-1] = -dm[:-1]
# remove negative values:
dm_top[dm_top < 0] = 0.0
dm_bot[dm_bot < 0] = 0.0
dp = dm_port.ravel() # flattened view to ports for 2D indexing
dp[dp < 0.0] = 0.0
# set last value of dm to be the same as the value of the previous cell
# to avoid having 0-massflow in it due to cumsum:
dm[-1] = dm[-2]
res_dm[stepnum[0]] = dm
# return process flows bool to disable processing flows until next step
return False
@njit(nogil=GLOB_NOGIL, cache=True) # parallel=GLOB_PARALLEL useful
def _process_flow_multi_flow(
process_flows, dm_io, dm_top, dm_bot, dm_port, stepnum, res_dm
):
"""
Process masssflows for parts with multiple flow channels.
Massflows are being processed for parts which have multiple separated flow
channels. The massflow in each flow channel must be invariant.
The massflow through ports in `dm_io` is aquired by update_FlowNet.
"""
# if flows were not yet processed in this timestep
if process_flows[0]:
# loop over channels and get each channel's massflow
for i in range(dm_io.size):
if dm_io[i] >= 0.0: # massflow from in port (top)
# get massflow though top cell-cell border:
dm_top[1:, i] = dm_io[i]
dm_bot[:-1, i] = 0.0 # set massflow from the bottom to zero
# set port massflows. dm_port has 2 cells per flow channel,
# first is in, second is out. Thus if flow from in port, set
# flow to in and out to 0.
dm_port[i * 2] = dm_io[i]
dm_port[i * 2 + 1] = 0.0
else: # massflow from out port (bottom):
# get massflow though bottom cell-cell border (and *-1!):
dm_bot[:-1, i] = -dm_io[i] # -1 makes this a pos. massflow!
dm_top[1:, i] = 0.0 # set massflow from the top to zero
# set port massflows. dm_port has 2 cells per flow channel,
# first is in, second is out. Thus if flow from out port, set
# flow to out (make it positive!) and in to 0.
dm_port[i * 2] = 0.0
dm_port[i * 2 + 1] = -dm_io[i] # dm port is ALWAYS positive!
# set current steps flow to result
res_dm[stepnum[0]] = dm_io
# return process flows bool to disable processing flows until next step
return False
# %%% Simulation Env. in-part port temperatures processing:
@nb.njit(cache=True, nogil=GLOB_NOGIL)
def _process_ports_collapsed(
ports_all,
port_link_idx,
port_own_idx,
T,
mcp,
UA_port,
UA_port_wll,
A_p_fld_mean,
port_gsp,
grid_spacing,
lam_T,
cp_port,
lam_port_fld,
T_port,
):
"""
Values of requested ports are saved to results array. Only works for parts
which use collapsed port arrays.
"""
dT_cond_port = np.zeros(port_own_idx.shape)
for i in range(port_link_idx.size):
p_val = ports_all[port_link_idx[i]]
idx = port_own_idx[i]
# collapsed arrays only take index i:
T_port.flat[i] = p_val
cp_port.flat[i] = cp_water(p_val)
lam_port_fld.flat[i] = lambda_water(p_val)
# lam_fld_own_p[i] =
# get total port heat conduction:
UA_port.flat[i] = (
A_p_fld_mean[i]
/ (
+(port_gsp[i] / (2 * lam_port_fld[i]))
+ (grid_spacing / (2 * lam_T.flat[idx]))
)
+ UA_port_wll[i]
)
dT_cond_port.flat[i] = (
UA_port.flat[i] * (p_val - T.flat[idx]) / mcp.flat[idx]
)
return dT_cond_port
@njit(nogil=GLOB_NOGIL, cache=True) # parallel=GLOB_PARALLEL useful
def _process_ports(
ports_all,
port_link_idx,
port_own_idx,
T,
mcp,
UA_port,
UA_port_wll,
A_p_fld_mean,
port_gsp,
grid_spacing,
lam_T,
cp_port,
lam_port_fld,
T_port,
):
"""
Values of requested ports are saved to results array.
"""
dT_cond_port = np.zeros(port_own_idx.shape)
for i in range(port_link_idx.size):
p_val = ports_all[port_link_idx[i]]
idx = port_own_idx[i]
T_port.flat[idx] = p_val
cp_port.flat[idx] = cp_water(p_val)
# collapsed arrays only take index i:
lam_port_fld.flat[idx] = lambda_water(p_val)
# lam_fld_own_p[i] =
# get total port heat conduction:
UA_port.flat[idx] = (
A_p_fld_mean.flat[idx]
/ (
+(port_gsp.flat[idx] / (2 * lam_port_fld.flat[idx]))
+ (grid_spacing / (2 * lam_T.flat[idx]))
)
+ UA_port_wll.flat[idx]
)
dT_cond_port.flat[i] = UA_port.flat[idx] * (p_val - T[idx]) / mcp[idx]
return dT_cond_port
# %%% Simulation Env. in-part material properties processing:
@njit(nogil=GLOB_NOGIL, cache=True)
def water_mat_props_ext_view(T_ext, cp_T, lam_T, rho_T, ny_T):
"""
Get the relevant temperature dependent material properties of water for
parts which use the extended array format:
cp: Specific heat capacity in [J/(kg K)]
lam: Heat conductivity in [W/(m K)]
rho: Density in [kg/m^3]
ny: Kinematic viscosity in [Pa/m^2]
"""
get_cp_water(T_ext, cp_T) # extended array for top/bot views in adv.
get_lambda_water(T_ext[1:-1], lam_T) # non-ext. array for other mat.
get_rho_water(T_ext[1:-1], rho_T) # props. since no views needed here
get_ny_water(T_ext[1:-1], ny_T)
@njit(nogil=GLOB_NOGIL, cache=True)
def water_mat_props_ext(T_ext):
"""
Get the relevant temperature dependent material properties of water for
parts which use the extended array format:
cp: Specific heat capacity in [J/(kg K)]
lam: Heat conductivity in [W/(m K)]
rho: Density in [kg/m^3]
ny: Kinematic viscosity in [Pa/m^2]
"""
# cp: extended array for top/bot views in adv.
# non-ext. array for other mat. props. since no views needed here
return (
cp_water(T_ext),
lambda_water(T_ext[1:-1]),
rho_water(T_ext[1:-1]),
ny_water(T_ext[1:-1]),
)
@njit(nogil=GLOB_NOGIL, cache=True)
def water_mat_props(T, cp_T, lam_T, rho_T, ny_T):
"""
Get the relevant temperature dependent material properties of water:
cp: Specific heat capacity in [J/(kg K)]
lam: Heat conductivity in [W/(m K)]
rho: Density in [kg/m^3]
ny: Kinematic viscosity in [Pa/m^2]
"""
get_cp_water(T, cp_T)
get_lambda_water(T, lam_T) # non-ext. array for mat.
get_rho_water(T, rho_T) # props. since no views needed here
get_ny_water(T, ny_T)
@njit(nogil=GLOB_NOGIL, cache=True)
def cell_temp_props_ext(T_ext, V_cell, cp_T, rho_T, mcp_wll, rhocp, mcp, ui):
"""
Calculate the each cells specific temperature dependent properties for
parts which use the extended array format:
rho*cp: Volume specific heat capacity in [J / (K m^3)]
m*cp: heat capacity (of fluid AND wall) in [J / K]
u_i: mass specific inner energy in [J / kg]
"""
# precalculate values which are needed multiple times:
# volume specific heat capacity:
rhocp[:] = rho_T * cp_T[1:-1]
# heat capacity of fluid AND wall:
mcp[:] = V_cell * rhocp + mcp_wll
# mass specific inner energy:
ui[:] = cp_T[1:-1] * T_ext[1:-1]
@njit(nogil=GLOB_NOGIL, cache=True)
def cell_temp_props_fld(
T_ext_fld, V_cell, cp_T, rho_T, rhocp_fld, mcp_fld, ui_fld
):
"""
Calculate the each cells fluid specific temperature dependent properties
for parts which use the extended array format:
rho*cp: Volume specific heat capacity in [J / (K m^3)]
m*cp: heat capacity (of fluid) in [J / K]
u_i: mass specific inner energy in [J / kg]
"""
# precalculate values which are needed multiple times:
# volume specific heat capacity:
rhocp_fld[:] = rho_T * cp_T[1:-1]
# heat capacity of fluid AND wall:
mcp_fld[:] = V_cell * rhocp_fld
# mass specific inner energy:
ui_fld[:] = cp_T[1:-1] * T_ext_fld[1:-1]
@njit(nogil=GLOB_NOGIL, cache=True)
def specific_inner_energy_wll(T_wll, cp_wll, ui):
"""
Calculate the each cells specific temperature dependent properties for
parts which use the extended array format:
rho*cp: Volume specific heat capacity in [J / (K m^3)]
m*cp: heat capacity (of fluid) in [J / K]
u_i: mass specific inner energy in [J / kg]
"""
# mass specific inner energy:
ui[:] = cp_wll * T_wll
@njit(nogil=GLOB_NOGIL, cache=True)
def cell_temp_props(T, V_cell, cp_T, rho_T, mcp_wll, rhocp, mcp, ui):
"""
Calculate the each cells specific temperature dependent properties:
rho*cp: Volume specific heat capacity in [J / (K m^3)]
m*cp: heat capacity (of fluid AND wall) in [J / K]
u_i: mass specific inner energy in [J / kg]
"""
# precalculate values which are needed multiple times:
# volume specific heat capacity:
rhocp[:] = rho_T * cp_T
# heat capacity of fluid AND wall:
mcp[:] = V_cell * rhocp + mcp_wll
# mass specific inner energy:
ui[:] = cp_T * T
@njit(nogil=GLOB_NOGIL, cache=True)
def _lambda_mean_view(lam_T, out):
"""
Get mean lambda of two neighbouring cells for the first axis of an
n-dimensional grid.
This is **NOT** the arithmetic mean, since the mean value of two heat
conductivities in series circuit is calculated by adding the inverse of
the heat conductivities.
For example for two heat conductivites `lam_1=40` and `lam_2=80`, each over
a length of `L=0.2`, the mean value is:
eq:: $$lam_{mean} = 2*L / (L/lam_1 + L/lam_2) = 2 / (1/lam_1 + 1/lam_2)$$
where the second right hand side of the equation is only true for
equally spaced grids.
"""
out[:] = 2 * lam_T[:-1] * lam_T[1:] / (lam_T[:-1] + lam_T[1:])
@njit(nogil=GLOB_NOGIL, cache=True)
def _lambda_mean(lam_T):
"""
Get mean lambda of two neighbouring cells for the first axis of an
n-dimensional grid.
This is **NOT** the arithmetic mean, since the mean value of two heat
conductivities in series circuit is calculated by adding the inverse of
the heat conductivities.
For example for two heat conductivites `lam_1=40` and `lam_2=80`, each over
a length of `L=0.2`, the mean value is:
eq:: $$lam_{mean} = 2*L / (L/lam_1 + L/lam_2) = 2 / (1/lam_1 + 1/lam_2)$$
where the second right hand side of the equation is only true for
equally spaced grids.
"""
return 2 * lam_T[:-1] * lam_T[1:] / (lam_T[:-1] + lam_T[1:])
# %% U*A values calculation:
@njit(nogil=GLOB_NOGIL, cache=True)
def UA_plate_tb(A_cell, grid_spacing, lam_mean, UA_tb_wll, out):
"""
Get the UA-value for plate-like geometry, for example in a pipe or TES,
between neighboring cells.
"""
# get UA value between cells. UA value of walls added (parallel circuit).
# UA is extended array to enable using views for calculation:
out[1:-1] = A_cell / grid_spacing * lam_mean + UA_tb_wll
@njit(nogil=GLOB_NOGIL, cache=True)
def UA_plate_tb_fld(A_cell, grid_spacing, lam_mean, out):
"""
Get the UA-value for plate-like geometry, for example in a pipe or TES,
between neighboring cells.
"""
# get UA value between cells. UA value of walls added (parallel circuit).
# UA is extended array to enable using views for calculation:
out[1:-1] = A_cell / grid_spacing * lam_mean
@njit(nogil=GLOB_NOGIL, cache=True)
def UA_plate_tb_wll(UA_tb_wll, out):
"""
Get the UA-value for plate-like geometry, for example in a pipe or TES,
between neighboring cells.
"""
# get UA value between cells. UA value of walls added (parallel circuit).
# UA is extended array to enable using views for calculation:
out[1:-1] = UA_tb_wll
@nb.njit(nogil=GLOB_NOGIL, cache=True)
def buoyancy_byNusselt(T, ny, d_i, lam_mean):
"""
Calculate the buoyancy driven heat flow by Nusselt approximation.
Calculate the buoyancy driven heat flow inside a vertically stratified
thermal energy storage tank by using Nusselt approximations to calculate a
correction factor for the heat conductivity.
"""
# get temperature difference for all cells (temperature below last cell
# is 0, thus don't use the last cell):
# T_diff = T_bot[:-1] - T[:-1] # replaced with stencil operation below:
T_diff = T[1:] - T[:-1]
# if there is no temperature inversion, skip this function:
if np.all(T_diff <= 0):
return
# only use the positive difference (inverted cells):
T_diff[T_diff < 0] = 0
# buoyancy correction factor to get the buoyant flow from fluid-fluid
# instead of a solid-fluid horizontal plate:
corr_f = 20
# preallocate arrays:
shape = T.shape[0] - 1
Nu = np.zeros(shape)
# free convection over a horizontal plate, VDI F2 3.1:
# get material properties for all bottom cells:
Pr = Pr_water_return(T[1:])
beta = beta_water_return(T[1:])
# to deal with the minimum in water density at 4°C, just set negative
# values to pos.
beta[beta < 0] *= -1
# get characteristic length:
L = d_i / 4
# get Rayleigh number
Ra = Pr * 9.81 * L ** 3 * beta * T_diff / ny[1:] ** 2
# get Rayleigh number with Prandtl function, VDI F2 eq (9):
Ra_f2 = Ra * (1 + (0.322 / Pr) ** (11 / 20)) ** (-20 / 11)
# get bool index for laminar or turbulent convection:
turb = Ra_f2 > 7e4
# get Nusselt number, following VDI Wärmeatlas 2013 F2 eq (7) and (8):
Nu[~turb] = 0.766 * (Ra_f2[~turb]) ** 0.2
Nu[turb] = 0.15 * (Ra_f2[turb]) ** (1 / 3)
# get bool index for Nusselt number > 1 to ignore lower values
Nu_idx = Nu >= 1
# multiplicate lambda value between cells with the Nusselt number. The
# calculation of the alpha value is implemented in the calculation of
# the UA value.
lam_mean[Nu_idx] *= Nu[Nu_idx] * corr_f
@nb.njit(nogil=GLOB_NOGIL, cache=True)
def buoyancy_AixLib(T, cp, rho, ny, grid_spacing, lam_mean):
"""
Calculate the buoyancy driven heat flow by conductivity plus.
Calculate the buoyancy driven heat flow inside a vertically stratified
thermal energy storage tank by using AixLib based epmirical relations for
an additional heat conductivity [1]_.
Sources:
[1] : https://github.com/RWTH-EBC/AixLib/blob/master/AixLib/Fluid/Storage/BaseClasses/Bouyancy.mo
"""
# get temperature difference for all cells (temperature below last cell
# is 0, thus don't use the last cell):
# T_diff = T_bot[:-1] - T[:-1] # replaced with stencil operation below:
T_diff = T[1:] - T[:-1]
# if there is no temperature inversion, skip this function:
if np.all(T_diff <= 0):
return
# only use the positive difference (inverted cells):
T_diff[T_diff < 0] = 0
# kappa is assumed to be constant at 0.4, g at 9.81
kappa = 0.4
g = 9.81
# get material properties for all bottom cells:
beta = beta_water_return(T[1:])
# to deal with the minimum in water density at 4°C, just set negative
# values to pos.
beta[beta < 0] *= -1
# calculate lambda surplus due to buoyancy
lambda_plus = (
2
/ 3
* rho
* cp
* kappa
* grid_spacing ** 2
* np.sqrt(np.abs(-g * beta * T_diff / grid_spacing))
)
# add up to lambda mean
lam_mean += lambda_plus
# %% Simulation Env. in-part von Neumann stability calculation:
@njit(nogil=GLOB_NOGIL, cache=True)
def _vonNeumann_stability_invar(
part_id,
stability_breaches,
UA_tb,
UA_port,
UA_amb_shell,
dm_io,
rho_T,
rhocp,
grid_spacing,
port_subs_gsp,
A_cell,
A_port,
A_shell, # areas to backcalc diffusivity from UA
r_total,
V_cell,
step_stable, # check_vN, , # system wide bools
vN_max_step,
max_factor,
stepnum,
timestep, # system wide vars
):
r"""
Check for L2/von Neumann stability for diffusion and massflows.
Massflows are checked for parts where the massflow is defined NOT
invariant, that means where all cells in the part share the same massflow!
Notes
-----
Von Neumann stability for conduction:
.. math::
r = \frac{\alpha \Delta t}{(\Delta x)^2} \leq \frac{1}{2} \\
\text{With the thermal diffusivity: } \alpha = \frac{
\lambda}{\rho c_{p}}\\
\text{and } \lambda = \frac{U\cdot A}{A} \cdot \Delta x \\
\text{yields } r = \frac{(UA)}{\rho c_{p}} \frac{\Delta t}{A \Delta x}
Von Neumann stability for advection:
"""
# save von Neumann stability values for cells by multiplying the cells
# relevant total x-gridspacing with the maximum UA-value (this gives a
# substitue heat conduction to get a total diffusion coefficient) and
# the inverse maximum rho*cp value (of all cells! this may result in a
# worst-case-result with a security factor of up to about 4.2%) to get
# the substitute diffusion coefficient and then mult. with step and
# div. by gridspacing**2 (not **2 since this is cut out with mult. by
# it to get substitute diffusion from UA) and save to array:
vN_diff = np.empty(3)
# rhocpmax = rhocp.max()
# For calculation see docstring
# replaced old and faulty calculations with missing Area
# vN_diff[0] = (UA_tb.max() / rhocpmax) * timestep / grid_spacing
vN_diff[0] = (
np.max(UA_tb[1:-1] / rhocp[1:]) * timestep / (A_cell * grid_spacing)
)
# for the next two with non-constant gridspacing, find max of UA/gsp:
# vN_diff[1] = (UA_port / port_subs_gsp).max() / rhocpmax * timestep
vN_diff[1] = (
np.max(UA_port / (A_port * port_subs_gsp)) * timestep / rhocp.max()
)
# vN_diff[2] = UA_amb_shell.max() / r_total / rhocpmax * timestep
vN_diff[2] = np.max(UA_amb_shell / rhocp) * timestep / (A_shell * r_total)
# get maximum:
vN_diff_max = vN_diff.max()
# for massflow:
# get maximum cfl number (this is the von Neumann stability condition
# for massflow through cells), again with total max. of rho to get a
# small security factor for worst case:
Vcellrhomax = V_cell * rho_T.max()
vN_dm_max = np.abs(dm_io).max() * timestep / Vcellrhomax
# get maximum von Neumann stability condition values:
# get dividers for maximum stable timestep to increase or decrease
# stepsize:
vN_diff_mult = vN_diff_max / 0.5
vN_dm_mult = vN_dm_max / 1
# get biggest divider:
vN_div_max = max(vN_diff_mult, vN_dm_mult)
# check if any L2 stability conditions are violated:
if vN_div_max > 1:
# only do something if von Neumann checking is active, else just
# print an error but go on with the calculation:
# if check_vN:
if True:
# if not stable, set stable step bool to False
step_stable[0] = False
stability_breaches += 1 # increase breaches counter for this part
# calculate required timestep to make this part stable with a
# security factor of 0.95:
local_vN_max_step = timestep / vN_div_max * 0.95
# if this is the smallest step of all parts needed to make all
# parts stable save it to maximum von Neumann step:
if vN_max_step[0] > local_vN_max_step:
vN_max_step[0] = local_vN_max_step
# # increase error weight of this part by the factor of 1.1 to
# # avoid running into this error again:
# self._trnc_err_cell_weight *= 1.1 # NOT good
# adjust max factor if vN was violated:
if max_factor[0] > 1.05:
max_factor[0] = max_factor[0] ** 0.99
if max_factor[0] < 1.05: # clip to 1.05
max_factor[0] = 1.05
else:
print(
'\nVon Neumann stability violated at step',
stepnum,
'and part with id',
part_id,
'!',
)
raise ValueError
# return new values (or old values if unchanged):
return step_stable, vN_max_step, max_factor
@njit(nogil=GLOB_NOGIL, cache=True)
def _vonNeumann_stability_invar_hexnum(
part_id,
stability_breaches,
UA_dim1,
UA_dim2,
UA_port,
dm_io,
rho_T,
rhocp,
grid_spacing,
port_subs_gsp,
A_channel,
A_plate_eff,
A_port,
V_cell,
step_stable, # check_vN, , # system wide bools
vN_max_step,
max_factor,
stepnum,
timestep, # system wide vars
):
r"""
Check for L2/von Neumann stability for diffusion and massflows.
Special method for numeric Heat Exchanger calculation with two-dimensional
heat flow and two seeparated flow regimes.
Notes
-----
Von Neumann stability for conduction:
.. math::
r = \frac{\alpha \Delta t}{(\Delta x)^2} \leq \frac{1}{2} \\
\text{With the thermal diffusivity: } \alpha = \frac{
\lambda}{\rho c_{p}}\\
\text{and } \lambda = \frac{U\cdot A}{A} \cdot \Delta x \\
\text{yields } r = \frac{(UA)}{\rho c_{p}} \frac{\Delta t}{A \Delta x}
Von Neumann stability for advection:
"""
# save von Neumann stability values for cells by multiplying the cells
# relevant total x-gridspacing with the maximum UA-value (this gives a
# substitue heat conduction to get a total diffusion coefficient) and
# the inverse maximum rho*cp value (of all cells! this may result in a
# worst-case-result with a security factor of up to about 4.2%) to get
# the substitute diffusion coefficient and then mult. with step and
# div. by gridspacing**2 (not **2 since this is cut out with mult. by
# it to get substitute diffusion from UA) and save to array:
vN_diff = np.empty(3)
rhocpmax = rhocp.max()
# heat conduction in flow direction:
vN_diff[0] = ( # intermediate solution with Area but not detailed max.
(UA_dim1.max() / rhocpmax) * timestep / (A_channel * grid_spacing)
)
# old version without area
# vN_diff[0] = (UA_dim1.max() / rhocpmax) * timestep / grid_spacing
# new version with detailed checks, but not validated yet, thus
# replaced with the intermediate solution
# vN_diff[0] = (
# np.max(UA_dim1[1:-1] / rhocp[1:])
# * timestep / (A_channel * grid_spacing))
# heat conduction perpendicular to flow direction (fluid-plate-fuild):
vN_diff[1] = (
(UA_dim2.max() / rhocpmax) * timestep / (A_plate_eff * grid_spacing)
)
# vN_diff[1] = (UA_dim2.max() / rhocpmax) * timestep / grid_spacing
# vN_diff[1] = (
# np.max(UA_dim2[1:-1] / rhocp[1:])
# * timestep / (A_plate_eff * grid_spacing))
# for the next two with non-constant gridspacing, find max of UA/gsp:
vN_diff[2] = (
(UA_port / (A_port * port_subs_gsp)).max() / rhocpmax * timestep
)
# vN_diff[2] = (UA_port / port_subs_gsp).max() / rhocpmax * timestep
# vN_diff[2] = (
# np.max(UA_port / (A_port * port_subs_gsp)) * timestep / rhocp.max())
# get maximum:
vN_diff_max = vN_diff.max()
# for massflow:
# get maximum cfl number (this is the von Neumann stability condition
# for massflow through cells), again with total max. of rho to get a
# small security factor for worst case:
Vcellrhomax = V_cell * rho_T.max()
vN_dm_max = np.abs(dm_io).max() * timestep / Vcellrhomax
# get maximum von Neumann stability condition values:
# get dividers for maximum stable timestep to increase or decrease
# stepsize:
vN_diff_mult = vN_diff_max / 0.5
vN_dm_mult = vN_dm_max / 1
# get biggest divider:
vN_div_max = max(vN_diff_mult, vN_dm_mult)
# check if any L2 stability conditions are violated:
if vN_div_max > 1:
# only do something if von Neumann checking is active, else just
# print an error but go on with the calculation:
# if check_vN:
if True:
# if not stable, set stable step bool to False
step_stable[0] = False
stability_breaches += 1 # increase breaches counter for this part
# calculate required timestep to make this part stable with a
# security factor of 0.95:
local_vN_max_step = timestep / vN_div_max * 0.95
# if this is the smallest step of all parts needed to make all
# parts stable save it to maximum von Neumann step:
if vN_max_step[0] > local_vN_max_step:
vN_max_step[0] = local_vN_max_step
# # increase error weight of this part by the factor of 1.1 to
# # avoid running into this error again:
# self._trnc_err_cell_weight *= 1.1 # NOT good
# adjust max factor if vN was violated:
if max_factor[0] > 1.05:
max_factor[0] = max_factor[0] ** 0.99
if max_factor[0] < 1.05: # clip to 1.05
max_factor[0] = 1.05
else:
print(
'\nVon Neumann stability violated at step',
stepnum,
'and part with id',
part_id,
'!',
)
raise ValueError
# return new values (or old values if unchanged):
return step_stable, vN_max_step, max_factor
@njit(nogil=GLOB_NOGIL, cache=True)
def _vonNeumann_stability_var(
part_id,
stability_breaches,
UA_tb,
UA_port,
UA_amb_shell,
dm_top,
dm_bot,
dm_port,
rho_T,
rhocp,
grid_spacing,
port_subs_gsp,
A_cell,
A_port,
A_shell, # areas to backcalc diffusivity from UA
r_total,
V_cell,
step_stable, # check_vN, , # system wide bools
vN_max_step,
max_factor,
stepnum,
timestep, # system wide vars
):
r"""
Check for L2/von Neumann stability for diffusion and massflows.
Massflows are checked for parts where the massflow is defined as NOT
invariant, that means where all cells in the part may have different
massflow!
Notes
-----
Von Neumann stability for conduction:
.. math::
r = \frac{\alpha \Delta t}{(\Delta x)^2} \leq \frac{1}{2} \\
\text{With the thermal diffusivity: } \alpha = \frac{
\lambda}{\rho c_{p}}\\
\text{and } \lambda = \frac{U\cdot A}{A} \cdot \Delta x \\
\text{yields } r = \frac{(UA)}{\rho c_{p}} \frac{\Delta t}{A \Delta x}
Von Neumann stability for advection:
"""
# save von Neumann stability values for cells by multiplying the cells
# relevant total x-gridspacing with the maximum UA-value (this gives a
# substitue heat conduction to get a total diffusion coefficient) and
# the inverse maximum rho*cp value (of all cells! this may result in a
# worst-case-result with a security factor of up to about 4.2%) to get
# the substitute diffusion coefficient and then mult. with step and
# div. by gridspacing**2 (not **2 since this is cut out with mult. by
# it to get substitute diffusion from UA) and save to array:
vN_diff = np.empty(3)
# rhocpmax = rhocp.max()
# For calculation see docstring
# replaced old and faulty calculations with missing Area
# vN_diff[0] = (UA_tb.max() / rhocpmax) * timestep / grid_spacing
vN_diff[0] = (
np.max(UA_tb[1:-1] / rhocp[1:]) * timestep / (A_cell * grid_spacing)
)
# for the next two with non-constant gridspacing, find max of UA/gsp:
# vN_diff[1] = (UA_port / port_subs_gsp).max() / rhocpmax * timestep
vN_diff[1] = (
np.max(UA_port / (A_port * port_subs_gsp)) * timestep / rhocp.max()
)
# vN_diff[2] = UA_amb_shell.max() / r_total / rhocpmax * timestep
vN_diff[2] = np.max(UA_amb_shell / rhocp) * timestep / (A_shell * r_total)
# get maximum:
vN_diff_max = vN_diff.max()
# for massflow:
# get maximum cfl number (this is the von Neumann stability condition
# for massflow through cells), again with total max. of rho to get a
# small security factor for worst case:
Vcellrhomax = V_cell * rho_T.max()
# NO checking for dims, since error probability of only having a critical
# massflow sum at the port inflow cell and NOT at the next cell border is
# extremely low AND this calculation would require either complicated
# generated_jit functions OR keepdims support in sum! Thus just simple
# check.
# if UA_port.ndim == 1:
vN_dm_max = (
max(dm_top.max(), dm_bot.max(), np.abs(dm_port).max())
* timestep
/ Vcellrhomax
)
# else:
# vN_dm = (
# max(dm_top.max(), dm_bot.max(),
# np.abs(dm_port.sum(axis=0, keepdims=True)).max())
# * timestep / Vcellrhomax)
# get maximum von Neumann stability condition values:
# get dividers for maximum stable timestep to increase or decrease
# stepsize:
vN_diff_mult = vN_diff_max / 0.5
vN_dm_mult = vN_dm_max / 1
# get biggest divider:
vN_div_max = max(vN_diff_mult, vN_dm_mult)
# check if any L2 stability conditions are violated:
if vN_div_max > 1.0:
# only do something if von Neumann checking is active, else just
# print an error but go on with the calculation:
# if check_vN:
if True:
# if not stable, set stable step bool to False
step_stable[0] = False
stability_breaches += 1 # increase breaches counter for this part
# calculate required timestep to make this part stable with a
# security factor of 0.95:
local_vN_max_step = timestep / vN_div_max * 0.95
# if this is the smallest step of all parts needed to make all
# parts stable save it to maximum von Neumann step:
if vN_max_step[0] > local_vN_max_step:
vN_max_step[0] = local_vN_max_step
# # increase error weight of this part by the factor of 1.1 to
# # avoid running into this error again:
# self._trnc_err_cell_weight *= 1.1 # NOT good
# adjust max factor if vN was violated:
if max_factor[0] > 1.05:
max_factor[0] = max_factor[0] ** 0.99
if max_factor[0] < 1.05: # clip to 1.05
max_factor[0] = 1.05
else:
print(
'\nVon Neumann stability violated at step',
stepnum,
'and part with id',
part_id,
'!',
)
raise ValueError
# return new values (or old values if unchanged):
return step_stable, vN_max_step, max_factor
# %% Simulation Env. part specific differential functions:
@njit(nogil=GLOB_NOGIL, cache=True)
def pipe1D_diff(
T_ext,
T_port,
T_s,
T_amb,
ports_all, # temperatures
dm_io,
dm_top,
dm_bot,
dm_port,
res_dm, # flows
cp_T,
lam_T,
rho_T,
ny_T,
lam_mean,
cp_port,
lam_port_fld,
mcp,
rhocp,
lam_wll,
lam_ins,
mcp_wll,
ui, # material properties.
alpha_i,
alpha_inf, # alpha values
UA_tb,
UA_tb_wll,
UA_amb_shell,
UA_port,
UA_port_wll, # UA values
port_own_idx,
port_link_idx, # indices
grid_spacing,
port_gsp,
port_subs_gsp,
d_i,
cell_dist, # lengths
flow_length,
r_total,
r_ln_wll,
r_ln_ins,
r_rins, # lengths
A_cell,
V_cell,
A_shell_i,
A_shell_ins,
A_p_fld_mean, # areas and vols
process_flows,
vertical,
step_stable, # bools
part_id,
stability_breaches,
vN_max_step,
max_factor, # misc.
stepnum, # step information
dT_cond,
dT_adv,
dT_total, # differentials
timestep,
):
process_flows[0] = _process_flow_invar(
process_flows=process_flows,
dm_io=dm_io,
dm_top=dm_top,
dm_bot=dm_bot,
dm_port=dm_port,
stepnum=stepnum,
res_dm=res_dm,
)
water_mat_props_ext_view(
T_ext=T_ext, cp_T=cp_T, lam_T=lam_T, rho_T=rho_T, ny_T=ny_T
)
# get mean lambda value between cells:
_lambda_mean_view(lam_T=lam_T, out=lam_mean)
UA_plate_tb(
A_cell=A_cell,
grid_spacing=grid_spacing,
lam_mean=lam_mean,
UA_tb_wll=UA_tb_wll,
out=UA_tb,
)
# for conduction between current cell and ambient:
# get outer pipe (insulation) surface temperature using a linearized
# approach assuming steady state (assuming surface temperature = const.
# for t -> infinity) and for cylinder shell (lids are omitted)
surface_temp_steady_state_inplace(
T=T_ext[1:-1],
T_inf=T_amb[0],
A_s=A_shell_ins,
alpha_inf=alpha_inf,
UA=UA_amb_shell,
T_s=T_s,
)
# get inner alpha value between fluid and wall from nusselt equations:
pipe_alpha_i(
dm_io, T_ext[1:-1], rho_T, ny_T, lam_T, A_cell, d_i, cell_dist, alpha_i
)
# get outer alpha value between insulation and surrounding air:
cylinder_alpha_inf( # for a cylinder
T_s=T_s,
T_inf=T_amb[0],
flow_length=flow_length,
vertical=vertical,
r_total=r_total,
alpha_inf=alpha_inf,
)
# get resulting UA to ambient:
UA_fld_wll_ins_amb_cyl(
A_i=A_shell_i,
r_ln_wll=r_ln_wll,
r_ln_ins=r_ln_ins,
r_rins=r_rins,
alpha_i=alpha_i,
alpha_inf=alpha_inf,
lam_wll=lam_wll,
lam_ins=lam_ins,
out=UA_amb_shell,
)
# precalculate values which are needed multiple times:
cell_temp_props_ext(
T_ext=T_ext,
V_cell=V_cell,
cp_T=cp_T,
rho_T=rho_T,
mcp_wll=mcp_wll,
rhocp=rhocp,
mcp=mcp,
ui=ui,
)
dT_cond_port = _process_ports_collapsed(
ports_all=ports_all,
port_link_idx=port_link_idx,
port_own_idx=port_own_idx,
T=T_ext[1:-1],
mcp=mcp,
UA_port=UA_port,
UA_port_wll=UA_port_wll,
A_p_fld_mean=A_p_fld_mean,
port_gsp=port_gsp,
grid_spacing=grid_spacing,
lam_T=lam_T,
cp_port=cp_port,
lam_port_fld=lam_port_fld,
T_port=T_port,
)
step_stable, vN_max_step, max_factor = _vonNeumann_stability_invar(
part_id=part_id,
stability_breaches=stability_breaches,
UA_tb=UA_tb,
UA_port=UA_port,
UA_amb_shell=UA_amb_shell,
dm_io=dm_io,
rho_T=rho_T,
rhocp=rhocp,
grid_spacing=grid_spacing,
port_subs_gsp=port_subs_gsp,
A_cell=A_cell,
A_port=A_p_fld_mean,
A_shell=A_shell_ins,
r_total=r_total,
V_cell=V_cell,
step_stable=step_stable,
vN_max_step=vN_max_step,
max_factor=max_factor,
stepnum=stepnum,
timestep=timestep,
)
# CALCULATE DIFFERENTIALS
# calculate heat transfer by conduction
dT_cond[:] = (
+UA_tb[:-1] * (T_ext[:-2] - T_ext[1:-1])
+ UA_tb[1:] * (T_ext[2:] - T_ext[1:-1])
# + UA_port * (T_port - T_ext[1:-1])
+ UA_amb_shell * (T_amb[0] - T_ext[1:-1])
) / mcp
# calculate heat transfer by advection
dT_adv[:] = (
+dm_top * (cp_T[:-2] * T_ext[:-2] - ui)
+ dm_bot * (cp_T[2:] * T_ext[2:] - ui)
) / mcp
# sum up heat conduction and advection for port values:
for i in range(port_own_idx.size):
idx = port_own_idx[i] # idx of port values at temperature/diff array
# conduction
dT_cond[idx] += dT_cond_port[i]
# advection
dT_adv[idx] += (
dm_port[i] * (cp_port[i] * T_port[i] - ui[idx]) / mcp[idx]
)
dT_total[:] = dT_cond + dT_adv
return dT_total
@njit(nogil=GLOB_NOGIL, cache=True)
def pipe1D_diff_fullstructarr(
T_ext,
ports_all, # temperatures
dm_io,
res_dm,
cp_T,
lam_mean,
UA_tb,
port_link_idx,
port_subs_gsp,
step_stable, # bools
vN_max_step,
max_factor,
process_flows,
vertical, # misc.
stepnum,
ra1,
ra2,
ra5,
timestep,
):
"""
This function uses as many structured arrays as possible to reduce the
time needed for calls to typeof_pyval. ra1, ra2, and ra5 are the
structured arrays. ra1 contains single floats, ra2 all values of the shape
of the port arrays and ra5 all non-extended value array sized arrays.
The current speedup over using a list of args is so small, if any, that
the easy approach if lists is preferred.
As soon as structured arrays of variable shaped sub arrays is supported,
this may become interesting.
"""
process_flows[0] = _process_flow_invar(
process_flows=process_flows,
dm_io=dm_io,
dm_top=ra5['dm_top'],
dm_bot=ra5['dm_bot'],
dm_port=ra2['dm_port'],
stepnum=stepnum,
res_dm=res_dm,
)
water_mat_props_ext_view(
T_ext=T_ext,
cp_T=cp_T,
lam_T=ra5['lam_T'],
rho_T=ra5['rho_T'],
ny_T=ra5['ny_T'],
)
# get mean lambda value between cells:
_lambda_mean_view(lam_T=ra5['lam_T'], out=lam_mean)
UA_plate_tb(
A_cell=ra1['A_cell'],
grid_spacing=ra1['grid_spacing'],
lam_mean=lam_mean,
UA_tb_wll=ra1['UA_tb_wll'],
out=UA_tb,
)
# for conduction between current cell and ambient:
# get outer pipe (insulation) surface temperature using a linearized
# approach assuming steady state (assuming surface temperature = const.
# for t -> infinity) and for cylinder shell (lids are omitted)
surface_temp_steady_state_inplace(
T=T_ext[1:-1],
T_inf=ra1['T_amb'][0],
A_s=ra1['A_shell_ins'],
alpha_inf=ra5['alpha_inf'],
UA=ra5['UA_amb_shell'],
T_s=ra5['T_s'],
)
# get inner alpha value between fluid and wall from nusselt equations:
pipe_alpha_i(
dm_io,
T_ext[1:-1],
ra5['rho_T'],
ra5['ny_T'],
ra5['lam_T'],
ra1['A_cell'],
ra1['d_i'],
ra5['cell_dist'],
ra5['alpha_i'],
)
# get outer alpha value between insulation and surrounding air:
cylinder_alpha_inf( # for a cylinder
T_s=ra5['T_s'],
T_inf=ra1['T_amb'][0],
flow_length=ra1['flow_length'],
vertical=vertical,
r_total=ra1['r_total'],
alpha_inf=ra5['alpha_inf'],
)
# get resulting UA to ambient:
UA_fld_wll_ins_amb_cyl(
A_i=ra1['A_shell_i'],
r_ln_wll=ra1['r_ln_wll'],
r_ln_ins=ra1['r_ln_ins'],
r_rins=ra1['r_rins'],
alpha_i=ra5['alpha_i'],
alpha_inf=ra5['alpha_inf'],
lam_wll=ra1['lam_wll'],
lam_ins=ra1['lam_ins'],
out=ra5['UA_amb_shell'],
)
# precalculate values which are needed multiple times:
cell_temp_props_ext(
T_ext=T_ext,
V_cell=ra1['V_cell'],
cp_T=cp_T,
rho_T=ra5['rho_T'],
mcp_wll=ra1['mcp_wll'],
rhocp=ra5['rhocp'],
mcp=ra5['mcp'],
ui=ra5['ui'],
)
dT_cond_port = _process_ports_collapsed(
ports_all=ports_all,
port_link_idx=port_link_idx,
port_own_idx=ra2['port_own_idx'],
T=T_ext[1:-1],
mcp=ra5['mcp'],
UA_port=ra2['UA_port'],
UA_port_wll=ra2['UA_port_wll'],
A_p_fld_mean=ra2['A_p_fld_mean'],
port_gsp=ra2['port_gsp'],
grid_spacing=ra1['grid_spacing'][0],
lam_T=ra5['lam_T'],
cp_port=ra2['cp_port'],
lam_port_fld=ra2['lam_port_fld'],
T_port=ra2['T_port'],
)
step_stable, vN_max_step, max_factor = _vonNeumann_stability_invar(
part_id=ra1['part_id'],
stability_breaches=ra1['stability_breaches'],
UA_tb=UA_tb,
UA_port=ra2['UA_port'],
UA_amb_shell=ra5['UA_amb_shell'],
dm_io=dm_io,
rho_T=ra5['rho_T'],
rhocp=ra5['rhocp'],
grid_spacing=ra1['grid_spacing'][0],
port_subs_gsp=port_subs_gsp,
A_cell=ra1['A_cell'],
A_port=ra2['A_p_fld_mean'],
A_shell=ra1['A_shell_ins'],
r_total=ra1['r_total'][0],
V_cell=ra1['V_cell'][0],
step_stable=step_stable,
vN_max_step=vN_max_step,
max_factor=max_factor,
stepnum=stepnum,
timestep=timestep,
)
# CALCULATE DIFFERENTIALS
# calculate heat transfer by conduction
ra5['dT_cond'][:] = (
+UA_tb[:-1] * (T_ext[:-2] - T_ext[1:-1])
+ UA_tb[1:] * (T_ext[2:] - T_ext[1:-1])
# + UA_port * (T_port - T_ext[1:-1])
+ ra5['UA_amb_shell'] * (ra1['T_amb'][0] - T_ext[1:-1])
) / ra5['mcp']
# dT_cond[0] += dT_cond_port[0]
# dT_cond[-1] += dT_cond_port[-1]
# calculate heat transfer by advection
ra5['dT_adv'][:] = (
(
+ra5['dm_top'] * (cp_T[:-2] * T_ext[:-2] - ra5['ui'])
+ ra5['dm_bot'] * (cp_T[2:] * T_ext[2:] - ra5['ui'])
)
# + dm_port * (cp_port * T_port - ui))
/ ra5['mcp']
)
# dT_adv[0] += dm_port[0] * (cp_port[0] * T_port[0] - ui[0]) / mcp[0]
# dT_adv[-1] += dm_port[-1] * (cp_port[-1] * T_port[-1] - ui[-1]) / mcp[-1]
# T_port and cp_port NOT collapsed
# for i in range(port_own_idx.size):
# idx = port_own_idx[i]
# dT_cond[idx] += dT_cond_port[i]
# dT_adv[idx] += (
# dm_port[idx] * (cp_port[idx] * T_port[idx] - ui[idx])
# / mcp[idx])
# all (except dm_port) collapsed:
for i in range(ra2['port_own_idx'].size):
idx = ra2['port_own_idx'][i]
ra5['dT_cond'][idx] += dT_cond_port[i]
# dT_adv[idx] += ( # dm_port like T
# dm_port[idx] * (cp_port[i] * T_port[i] - ui[idx])
# / mcp[idx])
ra5['dT_adv'][idx] += ( # dm port only 2 cells
ra2['dm_port'][i]
* (ra2['cp_port'][i] * ra2['T_port'][i] - ra5['ui'][idx])
/ ra5['mcp'][idx]
)
ra5['dT_total'][:] = ra5['dT_cond'] + ra5['dT_adv']
return ra5['dT_total']
@njit(nogil=GLOB_NOGIL, cache=True)
def pipe1D_diff_structarr(
T_ext,
T_port,
T_s,
T_amb,
ports_all, # temperatures
dm_io,
dm_top,
dm_bot,
dm_port,
res_dm, # flows
cp_T,
lam_T,
rho_T,
ny_T,
lam_mean,
cp_port,
lam_port_fld, # mat. props.
mcp,
rhocp,
ui, # material properties.
alpha_i,
alpha_inf, # alpha values
UA_tb,
UA_amb_shell,
UA_port,
UA_port_wll, # UA values
port_own_idx,
port_link_idx, # indices
port_gsp,
port_subs_gsp,
cell_dist, # lengths
A_p_fld_mean, # areas and vols
process_flows,
step_stable, # bools
stability_breaches,
vN_max_step,
max_factor, # misc.
stepnum, # step information
dT_cond,
dT_adv,
dT_total, # differentials
sar, # structarr
vertical,
part_id,
timestep,
):
process_flows[0] = _process_flow_invar(
process_flows=process_flows,
dm_io=dm_io,
dm_top=dm_top,
dm_bot=dm_bot,
dm_port=dm_port,
stepnum=stepnum,
res_dm=res_dm,
)
water_mat_props_ext_view(
T_ext=T_ext, cp_T=cp_T, lam_T=lam_T, rho_T=rho_T, ny_T=ny_T
)
# get mean lambda value between cells:
_lambda_mean_view(lam_T=lam_T, out=lam_mean)
UA_plate_tb(
A_cell=sar['A_cell'][0],
grid_spacing=sar['grid_spacing'][0],
lam_mean=lam_mean,
UA_tb_wll=sar['UA_tb_wll'][0],
out=UA_tb,
)
# for conduction between current cell and ambient:
# get outer pipe (insulation) surface temperature using a linearized
# approach assuming steady state (assuming surface temperature = const.
# for t -> infinity) and for cylinder shell (lids are omitted)
surface_temp_steady_state_inplace(
T=T_ext[1:-1],
T_inf=T_amb[0],
A_s=sar['A_shell_ins'][0],
alpha_inf=alpha_inf,
UA=UA_amb_shell,
T_s=T_s,
)
# get inner alpha value between fluid and wall from nusselt equations:
pipe_alpha_i(
dm_io,
T_ext[1:-1],
rho_T,
ny_T,
lam_T,
sar['A_cell'][0],
sar['d_i'][0],
cell_dist,
alpha_i,
)
# get outer alpha value between insulation and surrounding air:
cylinder_alpha_inf( # for a cylinder
T_s=T_s,
T_inf=T_amb[0],
flow_length=sar['flow_length'][0],
vertical=vertical,
r_total=sar['r_total'][0],
alpha_inf=alpha_inf,
)
# get resulting UA to ambient:
UA_fld_wll_ins_amb_cyl(
A_i=sar['A_shell_i'][0],
r_ln_wll=sar['r_ln_wll'][0],
r_ln_ins=sar['r_ln_ins'][0],
r_rins=sar['r_rins'][0],
alpha_i=alpha_i,
alpha_inf=alpha_inf,
lam_wll=sar['lam_wll'][0],
lam_ins=sar['lam_ins'][0],
out=UA_amb_shell,
)
# precalculate values which are needed multiple times:
cell_temp_props_ext(
T_ext=T_ext,
V_cell=sar['V_cell'][0],
cp_T=cp_T,
rho_T=rho_T,
mcp_wll=sar['mcp_wll'][0],
rhocp=rhocp,
mcp=mcp,
ui=ui,
)
dT_cond_port = _process_ports_collapsed(
ports_all=ports_all,
port_link_idx=port_link_idx,
port_own_idx=port_own_idx,
T=T_ext[1:-1],
mcp=mcp,
UA_port=UA_port,
UA_port_wll=UA_port_wll,
A_p_fld_mean=A_p_fld_mean,
port_gsp=port_gsp,
grid_spacing=sar['grid_spacing'][0],
lam_T=lam_T,
cp_port=cp_port,
lam_port_fld=lam_port_fld,
T_port=T_port,
)
step_stable, vN_max_step, max_factor = _vonNeumann_stability_invar(
part_id=part_id,
stability_breaches=stability_breaches,
UA_tb=UA_tb,
UA_port=UA_port,
UA_amb_shell=UA_amb_shell,
dm_io=dm_io,
rho_T=rho_T,
rhocp=rhocp,
grid_spacing=sar['grid_spacing'][0],
port_subs_gsp=port_subs_gsp,
A_cell=sar['A_cell'],
A_port=A_p_fld_mean,
A_shell=sar['A_shell_ins'],
r_total=sar['r_total'][0],
V_cell=sar['V_cell'][0],
step_stable=step_stable,
vN_max_step=vN_max_step,
max_factor=max_factor,
stepnum=stepnum,
timestep=timestep,
)
# CALCULATE DIFFERENTIALS
# calculate heat transfer by conduction
dT_cond[:] = (
+UA_tb[:-1] * (T_ext[:-2] - T_ext[1:-1])
+ UA_tb[1:] * (T_ext[2:] - T_ext[1:-1])
# + UA_port * (T_port - T_ext[1:-1])
+ UA_amb_shell * (T_amb[0] - T_ext[1:-1])
) / mcp
# dT_cond[0] += dT_cond_port[0]
# dT_cond[-1] += dT_cond_port[-1]
# calculate heat transfer by advection
dT_adv[:] = (
(
+dm_top * (cp_T[:-2] * T_ext[:-2] - ui)
+ dm_bot * (cp_T[2:] * T_ext[2:] - ui)
)
# + dm_port * (cp_port * T_port - ui))
/ mcp
)
# dT_adv[0] += dm_port[0] * (cp_port[0] * T_port[0] - ui[0]) / mcp[0]
# dT_adv[-1] += dm_port[-1] * (cp_port[-1] * T_port[-1] - ui[-1]) / mcp[-1]
# T_port and cp_port NOT collapsed
# for i in range(port_own_idx.size):
# idx = port_own_idx[i]
# dT_cond[idx] += dT_cond_port[i]
# dT_adv[idx] += (
# dm_port[idx] * (cp_port[idx] * T_port[idx] - ui[idx])
# / mcp[idx])
# all (except dm_port) collapsed:
for i in range(port_own_idx.size):
idx = port_own_idx[i]
dT_cond[idx] += dT_cond_port[i]
# dT_adv[idx] += ( # dm_port like T
# dm_port[idx] * (cp_port[i] * T_port[i] - ui[idx])
# / mcp[idx])
dT_adv[idx] += ( # dm port only 2 cells
dm_port[i] * (cp_port[i] * T_port[i] - ui[idx]) / mcp[idx]
)
dT_total[:] = dT_cond + dT_adv
return dT_total
@njit(nogil=GLOB_NOGIL, cache=True)
def pipe1D_branched_diff(
T_ext,
T_port,
T_s,
T_amb,
ports_all, # temperatures
dm_io,
dm,
dm_top,
dm_bot,
dm_port,
res_dm, # flows
cp_T,
lam_T,
rho_T,
ny_T,
lam_mean,
cp_port,
lam_port_fld,
mcp,
rhocp,
lam_wll,
lam_ins,
mcp_wll,
ui, # material properties.
alpha_i,
alpha_inf, # alpha values
UA_tb,
UA_tb_wll,
UA_amb_shell,
UA_port,
UA_port_wll, # UA values
port_own_idx,
port_link_idx, # indices
grid_spacing,
port_gsp,
port_subs_gsp,
d_i,
cell_dist, # lengths
flow_length,
r_total,
r_ln_wll,
r_ln_ins,
r_rins, # lengths
A_cell,
V_cell,
A_shell_i,
A_shell_ins,
A_p_fld_mean, # areas and vols
process_flows,
vertical,
step_stable, # bools
part_id,
stability_breaches,
vN_max_step,
max_factor, # misc.
stepnum, # step information
dT_cond,
dT_adv,
dT_total, # differentials
timestep,
):
process_flows[0] = _process_flow_var(
process_flows=process_flows,
dm_io=dm_io,
dm=dm,
dm_top=dm_top,
dm_bot=dm_bot,
dm_port=dm_port,
port_own_idx=port_own_idx,
stepnum=stepnum,
res_dm=res_dm,
)
water_mat_props_ext_view(
T_ext=T_ext, cp_T=cp_T, lam_T=lam_T, rho_T=rho_T, ny_T=ny_T
)
# get mean lambda value between cells:
_lambda_mean_view(lam_T=lam_T, out=lam_mean)
UA_plate_tb(
A_cell=A_cell,
grid_spacing=grid_spacing,
lam_mean=lam_mean,
UA_tb_wll=UA_tb_wll,
out=UA_tb,
)
# for conduction between current cell and ambient:
# get outer pipe (insulation) surface temperature using a linearized
# approach assuming steady state (assuming surface temperature = const.
# for t -> infinity) and for cylinder shell (lids are omitted)
surface_temp_steady_state_inplace(
T=T_ext[1:-1],
T_inf=T_amb[0],
A_s=A_shell_ins,
alpha_inf=alpha_inf,
UA=UA_amb_shell,
T_s=T_s,
)
# get inner alpha value between fluid and wall from nusselt equations:
pipe_alpha_i(
dm, T_ext[1:-1], rho_T, ny_T, lam_T, A_cell, d_i, cell_dist, alpha_i
)
# get outer alpha value between insulation and surrounding air:
cylinder_alpha_inf( # for a cylinder
T_s=T_s,
T_inf=T_amb[0],
flow_length=flow_length,
vertical=vertical,
r_total=r_total,
alpha_inf=alpha_inf,
)
# get resulting UA to ambient:
UA_fld_wll_ins_amb_cyl(
A_i=A_shell_i,
r_ln_wll=r_ln_wll,
r_ln_ins=r_ln_ins,
r_rins=r_rins,
alpha_i=alpha_i,
alpha_inf=alpha_inf,
lam_wll=lam_wll,
lam_ins=lam_ins,
out=UA_amb_shell,
)
# precalculate values which are needed multiple times:
cell_temp_props_ext(
T_ext=T_ext,
V_cell=V_cell,
cp_T=cp_T,
rho_T=rho_T,
mcp_wll=mcp_wll,
rhocp=rhocp,
mcp=mcp,
ui=ui,
)
dT_cond_port = _process_ports_collapsed(
ports_all=ports_all,
port_link_idx=port_link_idx,
port_own_idx=port_own_idx,
T=T_ext[1:-1],
mcp=mcp,
UA_port=UA_port,
UA_port_wll=UA_port_wll,
A_p_fld_mean=A_p_fld_mean,
port_gsp=port_gsp,
grid_spacing=grid_spacing,
lam_T=lam_T,
cp_port=cp_port,
lam_port_fld=lam_port_fld,
T_port=T_port,
)
step_stable, vN_max_step, max_factor = _vonNeumann_stability_invar(
part_id=part_id,
stability_breaches=stability_breaches,
UA_tb=UA_tb,
UA_port=UA_port,
UA_amb_shell=UA_amb_shell,
dm_io=dm_io,
rho_T=rho_T,
rhocp=rhocp,
grid_spacing=grid_spacing,
port_subs_gsp=port_subs_gsp,
A_cell=A_cell,
A_port=A_p_fld_mean,
A_shell=A_shell_ins,
r_total=r_total,
V_cell=V_cell,
step_stable=step_stable,
vN_max_step=vN_max_step,
max_factor=max_factor,
stepnum=stepnum,
timestep=timestep,
)
# CALCULATE DIFFERENTIALS
# calculate heat transfer by conduction
dT_cond[:] = (
+UA_tb[:-1] * (T_ext[:-2] - T_ext[1:-1])
+ UA_tb[1:] * (T_ext[2:] - T_ext[1:-1])
# + UA_port * (T_port - T_ext[1:-1])
+ UA_amb_shell * (T_amb[0] - T_ext[1:-1])
) / mcp
# calculate heat transfer by advection
dT_adv[:] = (
+dm_top * (cp_T[:-2] * T_ext[:-2] - ui)
+ dm_bot * (cp_T[2:] * T_ext[2:] - ui)
) / mcp
# sum up heat conduction and advection for port values:
for i in range(port_own_idx.size):
idx = port_own_idx[i] # idx of port values at temperature/diff array
# conduction
dT_cond[idx] += dT_cond_port[i]
# advection
dT_adv[idx] += (
dm_port[i] * (cp_port[i] * T_port[i] - ui[idx]) / mcp[idx]
)
dT_total[:] = dT_cond + dT_adv
return dT_total
@njit(nogil=GLOB_NOGIL, cache=True)
def heatedpipe1D_diff(
T_ext,
T_port,
T_s,
T_amb,
ports_all, # temperatures
dm_io,
dm_top,
dm_bot,
dm_port,
dQ_heating,
res_dm,
res_dQ, # flows
cp_T,
lam_T,
rho_T,
ny_T,
lam_mean,
cp_port,
lam_port_fld, # mat. props.
mcp,
mcp_heated,
rhocp,
lam_wll,
lam_ins,
mcp_wll,
ui, # material properties.
alpha_i,
alpha_inf, # alpha values
UA_tb,
UA_tb_wll,
UA_amb_shell,
UA_port,
UA_port_wll, # UA values
port_own_idx,
port_link_idx,
heat_mult, # indices
grid_spacing,
port_gsp,
port_subs_gsp,
d_i,
cell_dist, # lengths
flow_length,
r_total,
r_ln_wll,
r_ln_ins,
r_rins, # lengths
A_cell,
V_cell,
A_shell_i,
A_shell_ins,
A_p_fld_mean, # areas and vols
process_flows,
vertical,
step_stable, # bools
part_id,
stability_breaches,
vN_max_step,
max_factor, # misc.
stepnum,
timestep, # step information
dT_cond,
dT_adv,
dT_heat,
dT_heated, # differentials
emergency_shutdown=110.0,
):
# shutdown gasboiler immediately if any temperatures are exceeding
# emergency_shutdown-value
if np.any(T_ext >= emergency_shutdown):
dQ_heating[:] = 0.0
# save rate of heat flow to result array
if process_flows[0]: # only if flows not already processed
res_dQ[stepnum] = dQ_heating
process_flows[0] = _process_flow_invar(
process_flows=process_flows,
dm_io=dm_io,
dm_top=dm_top,
dm_bot=dm_bot,
dm_port=dm_port,
stepnum=stepnum,
res_dm=res_dm,
)
water_mat_props_ext_view(
T_ext=T_ext, cp_T=cp_T, lam_T=lam_T, rho_T=rho_T, ny_T=ny_T
)
# get mean lambda value between cells:
_lambda_mean_view(lam_T=lam_T, out=lam_mean)
UA_plate_tb(
A_cell=A_cell,
grid_spacing=grid_spacing,
lam_mean=lam_mean,
UA_tb_wll=UA_tb_wll,
out=UA_tb,
)
# for conduction between current cell and ambient:
# get outer pipe (insulation) surface temperature using a linearized
# approach assuming steady state (assuming surface temperature = const.
# for t -> infinity) and for cylinder shell (lids are omitted)
surface_temp_steady_state_inplace(
T=T_ext[1:-1],
T_inf=T_amb[0],
A_s=A_shell_ins,
alpha_inf=alpha_inf,
UA=UA_amb_shell,
T_s=T_s,
)
# get inner alpha value between fluid and wall from nusselt equations:
pipe_alpha_i(
dm_io, T_ext[1:-1], rho_T, ny_T, lam_T, A_cell, d_i, cell_dist, alpha_i
)
# get outer alpha value between insulation and surrounding air:
cylinder_alpha_inf( # for a cylinder
T_s=T_s,
T_inf=T_amb[0],
flow_length=flow_length,
vertical=vertical,
r_total=r_total,
alpha_inf=alpha_inf,
)
# get resulting UA to ambient:
UA_fld_wll_ins_amb_cyl(
A_i=A_shell_i,
r_ln_wll=r_ln_wll,
r_ln_ins=r_ln_ins,
r_rins=r_rins,
alpha_i=alpha_i,
alpha_inf=alpha_inf,
lam_wll=lam_wll,
lam_ins=lam_ins,
out=UA_amb_shell,
)
# precalculate values which are needed multiple times:
cell_temp_props_ext(
T_ext=T_ext,
V_cell=V_cell,
cp_T=cp_T,
rho_T=rho_T,
mcp_wll=mcp_wll,
rhocp=rhocp,
mcp=mcp,
ui=ui,
)
dT_cond_port = _process_ports_collapsed(
ports_all=ports_all,
port_link_idx=port_link_idx,
port_own_idx=port_own_idx,
T=T_ext[1:-1],
mcp=mcp,
UA_port=UA_port,
UA_port_wll=UA_port_wll,
A_p_fld_mean=A_p_fld_mean,
port_gsp=port_gsp,
grid_spacing=grid_spacing,
lam_T=lam_T,
cp_port=cp_port,
lam_port_fld=lam_port_fld,
T_port=T_port,
)
step_stable, vN_max_step, max_factor = _vonNeumann_stability_invar(
part_id=part_id,
stability_breaches=stability_breaches,
UA_tb=UA_tb,
UA_port=UA_port,
UA_amb_shell=UA_amb_shell,
dm_io=dm_io,
rho_T=rho_T,
rhocp=rhocp,
grid_spacing=grid_spacing,
port_subs_gsp=port_subs_gsp,
A_cell=A_cell,
A_port=A_p_fld_mean,
A_shell=A_shell_ins,
r_total=r_total,
V_cell=V_cell,
step_stable=step_stable,
vN_max_step=vN_max_step,
max_factor=max_factor,
stepnum=stepnum,
timestep=timestep,
)
# CALCULATE DIFFERENTIALS
# calculate heat transfer by internal heat sources
dT_heated[:] = dQ_heating * heat_mult / mcp_heated
# calculate heat transfer by conduction
dT_cond[:] = (
+UA_tb[:-1] * (T_ext[:-2] - T_ext[1:-1])
+ UA_tb[1:] * (T_ext[2:] - T_ext[1:-1])
+ UA_amb_shell * (T_amb[0] - T_ext[1:-1])
) / mcp
# calculate heat transfer by advection
dT_adv[:] = (
+dm_top * (cp_T[:-2] * T_ext[:-2] - ui)
+ dm_bot * (cp_T[2:] * T_ext[2:] - ui)
) / mcp
# sum up heat conduction and advection for port values:
for i in range(port_own_idx.size):
idx = port_own_idx[i] # idx of port values at temperature/diff array
# conduction
dT_cond[idx] += dT_cond_port[i]
# advection
dT_adv[idx] += (
dm_port[i] * (cp_port[i] * T_port[i] - ui[idx]) / mcp[idx]
)
dT_total = dT_cond + dT_adv + dT_heat
return dT_total, process_flows, step_stable, vN_max_step, max_factor
@njit(nogil=GLOB_NOGIL, cache=True)
def tes_diff(
T_ext,
T_port,
T_s,
T_s_lid,
T_amb,
ports_all, # temperatures
dm_io,
dm,
dm_top,
dm_bot,
dm_port,
res_dm, # flows
cp_T,
lam_T,
rho_T,
ny_T,
lam_mean,
cp_port,
lam_port_fld,
mcp,
rhocp,
lam_wll,
lam_ins,
mcp_wll,
ui, # mat. props.
alpha_i,
alpha_inf, # alpha_inf_lid, # alpha values
UA_tb,
UA_tb_wll,
UA_amb_shell,
UA_amb_lid,
UA_port,
UA_port_wll, # UA values
port_own_idx,
port_link_idx, # indices
grid_spacing,
port_gsp,
port_subs_gsp,
d_i,
cell_dist,
flow_length,
flow_length_lid,
r_total,
r_ln_wll,
r_ln_ins,
r_rins,
s_wll,
s_ins, # lengths
A_cell,
V_cell,
A_shell_i,
A_shell_ins,
A_p_fld_mean, # areas and vols
process_flows,
vertical,
vertical_lid,
lid_top,
step_stable, # bools
part_id,
stability_breaches,
vN_max_step,
max_factor, # misc.
stepnum, # step information
dT_cond,
dT_adv,
dT_total, # differentials
# T, T_top, T_bot, # T+bot/top NOT NEEDED ANYMORE
# cp_top, cp_bot, # cp_top/botT NOT NEEDED ANYMORE
timestep,
):
process_flows[0] = _process_flow_var(
process_flows=process_flows,
dm_io=dm_io,
dm=dm,
dm_top=dm_top,
dm_bot=dm_bot,
dm_port=dm_port,
port_own_idx=port_own_idx,
stepnum=stepnum,
res_dm=res_dm,
)
water_mat_props_ext_view(
T_ext=T_ext, cp_T=cp_T, lam_T=lam_T, rho_T=rho_T, ny_T=ny_T
)
# get mean lambda value between cells:
_lambda_mean_view(lam_T=lam_T, out=lam_mean)
# calculate buoyancy with Nusselt correction:
buoyancy_byNusselt(T=T_ext[1:-1], ny=ny_T, d_i=d_i, lam_mean=lam_mean)
UA_plate_tb(
A_cell=A_cell,
grid_spacing=grid_spacing,
lam_mean=lam_mean,
UA_tb_wll=UA_tb_wll,
out=UA_tb,
)
# for conduction between current cell and ambient:
# get outer pipe (insulation) surface temperature using a linearized
# approach assuming steady state (assuming surface temperature = const.
# for t -> infinity) and for cylinder shell (lids are omitted)
surface_temp_steady_state_inplace(
T=T_ext[1:-1],
T_inf=T_amb[0],
A_s=A_shell_ins,
alpha_inf=alpha_inf,
UA=UA_amb_shell,
T_s=T_s,
)
# get inner alpha value between fluid and wall from nusselt equations:
pipe_alpha_i(
dm=dm,
T=T_ext[1:-1],
rho=rho_T,
ny=ny_T,
lam_fld=lam_T,
A=A_cell,
d_i=d_i,
x=cell_dist,
alpha=alpha_i,
)
# get outer alpha value between insulation and surrounding air:
cylinder_alpha_inf( # for a cylinder
T_s=T_s,
T_inf=T_amb[0],
flow_length=flow_length,
vertical=vertical,
r_total=r_total,
alpha_inf=alpha_inf,
)
alpha_inf_lid = plane_alpha_inf(
T_s=T_s_lid,
T_inf=T_amb[0],
flow_length=flow_length_lid,
vertical=vertical_lid,
top=lid_top,
)
# get resulting UA to ambient:
UA_fld_wll_ins_amb_cyl(
A_i=A_shell_i,
r_ln_wll=r_ln_wll,
r_ln_ins=r_ln_ins,
r_rins=r_rins,
alpha_i=alpha_i,
alpha_inf=alpha_inf,
lam_wll=lam_wll,
lam_ins=lam_ins,
out=UA_amb_shell,
)
UA_amb_lid[:] = UA_fld_wll_ins_amb_plate(
A=A_cell,
s_wll=s_wll,
s_ins=s_ins, # alpha_i FIRST AND LAST element! alpha_fld=alpha_i[0],
alpha_fld=alpha_i[:: alpha_i.size - 1],
alpha_inf=alpha_inf_lid,
lam_wll=lam_wll,
lam_ins=lam_ins,
)
# precalculate values which are needed multiple times:
cell_temp_props_ext(
T_ext=T_ext,
V_cell=V_cell,
cp_T=cp_T,
rho_T=rho_T,
mcp_wll=mcp_wll,
rhocp=rhocp,
mcp=mcp,
ui=ui,
)
# dT_cond_port = _process_ports_collapsed(
_ = _process_ports_collapsed(
ports_all=ports_all,
port_link_idx=port_link_idx,
port_own_idx=port_own_idx,
T=T_ext[1:-1],
mcp=mcp,
UA_port=UA_port,
UA_port_wll=UA_port_wll,
A_p_fld_mean=A_p_fld_mean,
port_gsp=port_gsp,
grid_spacing=grid_spacing,
lam_T=lam_T,
cp_port=cp_port,
lam_port_fld=lam_port_fld,
T_port=T_port,
)
step_stable, vN_max_step, max_factor = _vonNeumann_stability_var(
part_id=part_id,
stability_breaches=stability_breaches,
UA_tb=UA_tb,
UA_port=UA_port,
UA_amb_shell=UA_amb_shell,
dm_top=dm_top,
dm_bot=dm_bot,
dm_port=dm_port,
rho_T=rho_T,
rhocp=rhocp,
grid_spacing=grid_spacing,
port_subs_gsp=port_subs_gsp,
A_cell=A_cell,
A_port=A_p_fld_mean,
A_shell=A_shell_ins,
r_total=r_total,
V_cell=V_cell,
step_stable=step_stable,
vN_max_step=vN_max_step,
max_factor=max_factor,
stepnum=stepnum,
timestep=timestep,
)
if T_port.ndim == 1:
# calculate heat transfer by conduction
dT_cond[:] = ( # EXTENDED ARRAY VERSION
+UA_tb[:-1] * (T_ext[:-2] - T_ext[1:-1])
+ UA_tb[1:] * (T_ext[2:] - T_ext[1:-1])
+ UA_amb_shell * (T_amb[0] - T_ext[1:-1])
) / mcp
# add losses through top and bottom lid:
dT_cond[0] += ( # EXTENDED ARRAY VERSION
UA_amb_lid[0] * (T_amb[0] - T_ext[1]) / mcp[0]
)
dT_cond[-1] += UA_amb_lid[-1] * (T_amb[0] - T_ext[-2]) / mcp[-1]
# calculate heat transfer by advection
dT_adv[:] = ( # EXTENDED ARRAY VERSION
+dm_top * (cp_T[:-2] * T_ext[:-2] - ui)
+ dm_bot * (cp_T[2:] * T_ext[2:] - ui)
) / mcp
else:
# the same if multiple ports per cell exist. to correctly calculate
# this, the sum of the arrays has to be taken:
dT_cond[:] = ( # EXTENDED ARRAY VERSION
+UA_tb[:-1] * (T_ext[:-2] - T_ext[1:-1])
+ UA_tb[1:] * (T_ext[2:] - T_ext[1:-1])
+ UA_amb_shell * (T_amb[0] - T_ext[1:-1])
) / mcp
# add losses through top and bottom lid:
dT_cond[0] += ( # EXTENDED ARRAY VERSION
UA_amb_lid[0] * (T_amb[0] - T_ext[1]) / mcp[0]
)
dT_cond[-1] += UA_amb_lid[-1] * (T_amb[0] - T_ext[-2]) / mcp[-1]
# calculate heat transfer by advection
dT_adv[:] = ( # EXTENDED ARRAY VERSION
+dm_top * (cp_T[:-2] * T_ext[:-2] - ui)
+ dm_bot * (cp_T[2:] * T_ext[2:] - ui)
) / mcp
for i in range(T_port.size):
idx = port_own_idx[i]
# dT_cond[idx] += dT_cond_port[i]
# heat conduction over ports (T_ext[idx+1] since index is not extended)
dT_cond[idx] += UA_port[i] * (T_port[i] - T_ext[idx + 1]) / mcp[idx]
# heat advection through ports
dT_adv[idx] += (
dm_port.flat[i]
* (cp_port[i] * T_port[i] - ui[idx]) # collapsed dmport
/ mcp[idx]
)
# sum up all differentials
dT_total[:] = dT_cond + dT_adv
return (
dT_total,
process_flows,
step_stable,
vN_max_step,
max_factor,
alpha_inf_lid,
)
@njit(nogil=GLOB_NOGIL, cache=True)
def chp_core_diff(
T_ext,
T_port,
T_s,
T_amb,
ports_all, # temperatures
dm_io,
dm_top,
dm_bot,
dm_port,
dQ_heating,
res_dm,
res_dQ, # flows
cp_T,
lam_T,
rho_T,
ny_T,
lam_mean,
cp_port,
lam_port_fld, # mat. props.
mcp,
mcp_heated,
rhocp,
lam_wll,
lam_ins,
mcp_wll,
ui, # material properties.
alpha_i,
alpha_inf, # alpha values
UA_tb,
UA_tb_wll,
UA_amb_shell,
UA_port,
UA_port_wll, # UA values
port_own_idx,
port_link_idx,
heat_mult, # indices
grid_spacing,
port_gsp,
port_subs_gsp,
d_i,
cell_dist, # lengths
flow_length,
r_total,
r_ln_wll,
r_ln_ins,
r_rins, # lengths
A_cell,
V_cell,
A_shell_i,
A_shell_ins,
A_p_fld_mean, # areas and vols
process_flows,
vertical,
step_stable, # bools
part_id,
stability_breaches,
vN_max_step,
max_factor, # misc.
stepnum,
timestep, # step information
dT_cond,
dT_adv,
dT_heat,
dT_heated, # differentials
):
# save rate of heat flow to result array
if process_flows[0]: # only if flows not already processed
res_dQ[stepnum] = dQ_heating
process_flows[0] = _process_flow_invar(
process_flows=process_flows,
dm_io=dm_io,
dm_top=dm_top,
dm_bot=dm_bot,
dm_port=dm_port,
stepnum=stepnum,
res_dm=res_dm,
)
water_mat_props_ext_view(
T_ext=T_ext, cp_T=cp_T, lam_T=lam_T, rho_T=rho_T, ny_T=ny_T
)
# get mean lambda value between cells:
_lambda_mean_view(lam_T=lam_T, out=lam_mean)
UA_plate_tb(
A_cell=A_cell,
grid_spacing=grid_spacing,
lam_mean=lam_mean,
UA_tb_wll=UA_tb_wll,
out=UA_tb,
)
# for conduction between current cell and ambient:
# get outer pipe (insulation) surface temperature using a linearized
# approach assuming steady state (assuming surface temperature = const.
# for t -> infinity) and for cylinder shell (lids are omitted)
surface_temp_steady_state_inplace(
T=T_ext[1:-1],
T_inf=T_amb[0],
A_s=A_shell_ins,
alpha_inf=alpha_inf,
UA=UA_amb_shell,
T_s=T_s,
)
# get inner alpha value between fluid and wall from nusselt equations:
pipe_alpha_i(
dm_io, T_ext[1:-1], rho_T, ny_T, lam_T, A_cell, d_i, cell_dist, alpha_i
)
# get outer alpha value between insulation and surrounding air:
cylinder_alpha_inf( # for a cylinder
T_s=T_s,
T_inf=T_amb[0],
flow_length=flow_length,
vertical=vertical,
r_total=r_total,
alpha_inf=alpha_inf,
)
# get resulting UA to ambient:
UA_fld_wll_ins_amb_cyl(
A_i=A_shell_i,
r_ln_wll=r_ln_wll,
r_ln_ins=r_ln_ins,
r_rins=r_rins,
alpha_i=alpha_i,
alpha_inf=alpha_inf,
lam_wll=lam_wll,
lam_ins=lam_ins,
out=UA_amb_shell,
)
# precalculate values which are needed multiple times:
cell_temp_props_ext(
T_ext=T_ext,
V_cell=V_cell,
cp_T=cp_T,
rho_T=rho_T,
mcp_wll=mcp_wll,
rhocp=rhocp,
mcp=mcp,
ui=ui,
)
dT_cond_port = _process_ports_collapsed(
ports_all=ports_all,
port_link_idx=port_link_idx,
port_own_idx=port_own_idx,
T=T_ext[1:-1],
mcp=mcp,
UA_port=UA_port,
UA_port_wll=UA_port_wll,
A_p_fld_mean=A_p_fld_mean,
port_gsp=port_gsp,
grid_spacing=grid_spacing,
lam_T=lam_T,
cp_port=cp_port,
lam_port_fld=lam_port_fld,
T_port=T_port,
)
step_stable, vN_max_step, max_factor = _vonNeumann_stability_invar(
part_id=part_id,
stability_breaches=stability_breaches,
UA_tb=UA_tb,
UA_port=UA_port,
UA_amb_shell=UA_amb_shell,
dm_io=dm_io,
rho_T=rho_T,
rhocp=rhocp,
grid_spacing=grid_spacing,
port_subs_gsp=port_subs_gsp,
A_cell=A_cell,
A_port=A_p_fld_mean,
A_shell=A_shell_ins,
r_total=r_total,
V_cell=V_cell,
step_stable=step_stable,
vN_max_step=vN_max_step,
max_factor=max_factor,
stepnum=stepnum,
timestep=timestep,
)
# CALCULATE DIFFERENTIALS
# calculate heat transfer by internal heat sources
dT_heated[:] = dQ_heating * heat_mult / mcp_heated
# calculate heat transfer by conduction
dT_cond[:] = (
+UA_tb[:-1] * (T_ext[:-2] - T_ext[1:-1])
+ UA_tb[1:] * (T_ext[2:] - T_ext[1:-1])
+ UA_amb_shell * (T_amb[0] - T_ext[1:-1])
) / mcp
# calculate heat transfer by advection
dT_adv[:] = (
+dm_top * (cp_T[:-2] * T_ext[:-2] - ui)
+ dm_bot * (cp_T[2:] * T_ext[2:] - ui)
) / mcp
# sum up heat conduction and advection for port values:
for i in range(port_own_idx.size):
idx = port_own_idx[i] # idx of port values at temperature/diff array
# conduction
dT_cond[idx] += dT_cond_port[i]
# advection
dT_adv[idx] += (
dm_port[i] * (cp_port[i] * T_port[i] - ui[idx]) / mcp[idx]
)
dT_total = dT_cond + dT_adv + dT_heat
return dT_total, process_flows, step_stable, vN_max_step, max_factor
@njit(nogil=GLOB_NOGIL, cache=True)
def hexnum_diff(
T_ext,
T_port,
T_amb,
ports_all, # temperatures
dm_io,
dm_top,
dm_bot,
dm_port,
res_dm, # flows
cp_T,
lam_fld,
rho_T,
ny_T,
lam_mean,
cp_port,
lam_port_fld,
mcp,
rhocp,
cp_wll,
lam_wll,
ui, # material properties.
alpha_i, # alpha values
UA_dim1,
UA_dim2,
UA_dim1_wll,
UA_port,
UA_port_wll, # UA values
port_own_idx,
port_link_idx, # indices
grid_spacing,
port_gsp,
port_subs_gsp,
d_h,
s_plate,
cell_dist,
dist_min, # lengths
A_channel,
V_cell_fld,
A_plate_eff,
A_p_fld_mean, # areas and vols
channel_divisor,
corr_Re,
process_flows,
step_stable, # bools
part_id,
stability_breaches,
vN_max_step,
max_factor, # misc.
stepnum, # step information
dT_cond,
dT_adv,
dT_total, # differentials
timestep,
):
# generate views needed to make calculations easier:
T_sup = T_ext[1:-1, 1] # view to supply side
T_dmd = T_ext[1:-1, 3] # view to demand side
# T_wll = T_ext[1:-1, 2] # view to wall temperature
dm_sup = dm_io[:1] # view to supply side massflow
dm_dmd = dm_io[1:] # view to demand side massflow
process_flows[0] = _process_flow_multi_flow(
process_flows=process_flows,
dm_io=dm_io,
dm_top=dm_top,
dm_bot=dm_bot,
dm_port=dm_port,
stepnum=stepnum,
res_dm=res_dm,
)
flow_per_channel = np.abs(dm_io / channel_divisor)
water_mat_props_ext_view( # only pass fluid columns to T_ext
T_ext=T_ext[:, 1::2], cp_T=cp_T, lam_T=lam_fld, rho_T=rho_T, ny_T=ny_T
)
_lambda_mean_view(lam_T=lam_fld, out=lam_mean)
UA_plate_tb_fld( # only pass the fluid columns to out
A_cell=A_channel,
grid_spacing=grid_spacing,
lam_mean=lam_mean,
out=UA_dim1[:, ::2],
)
UA_plate_tb_wll( # only pass the wall column to out
UA_tb_wll=UA_dim1_wll, out=UA_dim1[:, 1]
)
phex_alpha_i_wll_sep_discretized(
dm=dm_sup / channel_divisor[0],
T_fld=T_sup,
T_wll=T_sup,
rho=rho_T[:, 0],
ny=ny_T[:, 0],
lam_fld=lam_fld[:, 0],
A=A_channel,
d_h=d_h,
x=cell_dist,
corr_Re=corr_Re,
alpha=alpha_i[:, 0],
)
phex_alpha_i_wll_sep_discretized(
dm=dm_dmd / channel_divisor[1],
T_fld=T_dmd,
T_wll=T_dmd,
rho=rho_T[:, 1],
ny=ny_T[:, 1],
lam_fld=lam_fld[:, 1],
A=A_channel,
d_h=d_h,
x=cell_dist,
corr_Re=corr_Re,
alpha=alpha_i[:, 1],
)
UA_dim2[:, 1:3] = UA_fld_wll_plate(
A=A_plate_eff, s_wll=s_plate / 2, alpha_fld=alpha_i, lam_wll=lam_wll
)
cell_temp_props_fld(
T_ext_fld=T_ext[:, 1::2],
V_cell=V_cell_fld,
cp_T=cp_T,
rho_T=rho_T,
rhocp_fld=rhocp[:, ::2],
mcp_fld=mcp[:, ::2],
ui_fld=ui[:, ::2],
)
specific_inner_energy_wll(T_wll=T_ext[1:-1, 2], cp_wll=cp_wll, ui=ui[:, 1])
dT_cond_port = _process_ports_collapsed(
ports_all=ports_all,
port_link_idx=port_link_idx,
port_own_idx=port_own_idx,
T=T_ext[1:-1, 1:-1],
mcp=mcp,
UA_port=UA_port,
UA_port_wll=UA_port_wll,
A_p_fld_mean=A_p_fld_mean,
port_gsp=port_gsp,
grid_spacing=grid_spacing,
lam_T=lam_fld,
cp_port=cp_port,
lam_port_fld=lam_port_fld,
T_port=T_port,
)
(
_step_stable,
_vN_max_step,
_max_factor,
) = _vonNeumann_stability_invar_hexnum(
part_id=part_id,
stability_breaches=stability_breaches,
UA_dim1=UA_dim1,
UA_dim2=UA_dim2,
UA_port=UA_port,
dm_io=flow_per_channel,
rho_T=rho_T,
rhocp=rhocp,
grid_spacing=grid_spacing,
port_subs_gsp=port_subs_gsp,
A_channel=A_channel,
A_plate_eff=A_plate_eff,
A_port=A_p_fld_mean,
V_cell=V_cell_fld,
step_stable=step_stable,
vN_max_step=vN_max_step,
max_factor=max_factor,
stepnum=stepnum,
timestep=timestep,
)
UA_amb_shell = 0.0
dT_cond[:] = (
# heat conduction in first dimension (axis 0), top -> bottom:
(
+UA_dim1[:-1] * (T_ext[:-2, 1:-1] - T_ext[1:-1, 1:-1])
# heat conduction in first dimension (axis 0), bottom -> top:
+ UA_dim1[1:] * (T_ext[2:, 1:-1] - T_ext[1:-1, 1:-1])
# heat conduction in second dimension (axis 1), left -> right:
+ UA_dim2[:, :-1] * (T_ext[1:-1, :-2] - T_ext[1:-1, 1:-1])
# heat conduction in second dimension (axis 1), right -> left:
+ UA_dim2[:, 1:] * (T_ext[1:-1, 2:] - T_ext[1:-1, 1:-1])
# heat conduction to ambient (currently set to 0):
+ UA_amb_shell * (T_amb - T_ext[1:-1, 1:-1])
)
/ mcp
)
# calculate heat transfer by advection in the fluid channels
dT_adv[:, ::2] = (
# advective heat transport (only axis 0), top -> bottom:
(
+dm_top * (cp_T[:-2] * T_ext[:-2, 1::2] - ui[:, ::2])
# advective heat transport (only axis 0), bottom -> top:
+ dm_bot * (cp_T[2:] * T_ext[2:, 1::2] - ui[:, ::2])
)
/ mcp[:, ::2]
)
# sum up heat conduction and advection for port values:
for i in range(port_own_idx.size):
idx = port_own_idx[i] # idx of port values at temperature/diff array
# conduction
dT_cond.flat[idx] += dT_cond_port[i]
# advection
dT_adv.flat[idx] += (
dm_port[i]
* (cp_port[i] * T_port[i] - ui.flat[idx])
/ mcp.flat[idx]
)
# divide advective transfer by the number of channels:
dT_adv[:, ::2] /= channel_divisor
# sum up the differentials for conduction and advection
dT_total[:] = dT_cond + dT_adv
return dT_total
@nb.njit(cache=True)
def condensing_hex_solve(
T,
T_port,
ports_all,
res,
res_dm,
dm_io,
dm_port,
port_own_idx,
port_link_idx,
X_pred,
flow_scaling,
water_dm_range,
gas_dv_range,
int_comb_idx,
nvars_per_ftr,
pca_mean,
pca_components,
lm_intercept,
lm_coef,
stepnum,
):
"""
Calculate condensing flue gas HEX by using a PCA-transformed polynome LR.
Parameters
----------
T : TYPE
DESCRIPTION.
T_port : TYPE
DESCRIPTION.
ports_all : TYPE
DESCRIPTION.
res : TYPE
DESCRIPTION.
res_dm : TYPE
DESCRIPTION.
dm_io : TYPE
DESCRIPTION.
dm_port : TYPE
DESCRIPTION.
port_own_idx : TYPE
DESCRIPTION.
port_link_idx : TYPE
DESCRIPTION.
X_pred : TYPE
DESCRIPTION.
flow_scaling : TYPE
DESCRIPTION.
water_dm_range : TYPE
DESCRIPTION.
gas_dv_range : TYPE
DESCRIPTION.
int_comb_idx : TYPE
DESCRIPTION.
nvars_per_ftr : TYPE
DESCRIPTION.
pca_mean : TYPE
DESCRIPTION.
pca_components : TYPE
DESCRIPTION.
lm_intercept : TYPE
DESCRIPTION.
lm_coef : TYPE
DESCRIPTION.
stepnum : TYPE
DESCRIPTION.
Raises
------
ValueError
DESCRIPTION.
Returns
-------
None.
"""
_port_values_to_cont(
ports_all=ports_all, port_link_idx=port_link_idx, out=T_port
)
# extract inflowing temperatures for water (idx 0) and flue gas (idx 2)
X_pred[:, :2] = T_port[::2]
# extract water massflow (cell 3) and flue gas volume flow (cell 4) and
# scale with scaling factors
X_pred[:, 2:] = dm_io / flow_scaling
# make some flow checks:
# check for water/flue gas massflow bounds. only do something if violated
bypass = False # bypass is initialized to False
if (water_dm_range[0] != 0.0) and (X_pred[0, 2] < water_dm_range[0]):
# if flow smaller than lower bound, decide if using 0 or lower bound
# by rounding to the closer value:
X_pred[0, 2] = (
round(X_pred[0, 2] / water_dm_range[0]) * water_dm_range[0]
)
elif X_pred[0, 2] > water_dm_range[1]:
# bypassing excess mass flow, to avoid huge power output
# when outside HEX heat meters are calculated with unclipped flows:
# backup full flow for calculations:
# water_dm_full = X_pred[0, 2] # not needed anymore
# get excess flow over max. range. this amount is bypassed
water_dm_excess = X_pred[0, 2] - water_dm_range[1]
bypass = True # set bypassing to true
# clip the amount of water over the hex to the range
X_pred[0, 2] = water_dm_range[1]
if (gas_dv_range[0] != 0.0) and (X_pred[0, 3] < gas_dv_range[0]):
# if flow smaller than lower bound, decide if using 0 or lower bound
# by rounding to the closer value:
X_pred[0, 3] = round(X_pred[0, 3] / gas_dv_range[0]) * gas_dv_range[0]
elif X_pred[0, 3] > gas_dv_range[1]:
print(
'\nFluegas volume flow in condensing HEX exceeded. The '
'following value was encountered:'
)
print(X_pred[0, 3])
raise ValueError
# calculate results. but only if NO massflow is 0
if np.all(X_pred[0, 2:] != 0):
dm_water_thresh = 0.1 # threshhold below which no regr. preds. exist
n_samples = 1 # this is always one for this function
# only if water massflow greater 10%, else quad polynome
if X_pred[0, 2] > dm_water_thresh:
# transform input data to polynome, then to principal components
X_pf = transform_to_poly_nb(
X_pred, int_comb_idx, nvars_per_ftr, n_samples
)
X_PC = transform_pca_nb(X_pf, pca_mean, pca_components)
# predict
T_pred = poly_tranfs_pred(X_PC, lm_intercept, lm_coef)
# save results to temperature array
T[0, 0] = X_pred[0, 0] # t w in
T[1, 0] = T_pred[0, 0] # t w out
T[0, 1] = X_pred[0, 1] # t fg in
T[1, 1] = T_pred[0, 1] # t fg out
else: # for massflow below thresh, use quad polynome
T_pred_below_thresh = condensing_hex_quad_poly(
X_pred, # X vector
int_comb_idx,
nvars_per_ftr, # polynomial transf.
pca_mean,
pca_components, # PCA transformation
lm_intercept,
lm_coef, # linear model transformation
dm_water_thresh=0.1,
dx=0.01,
)
# save results to temperature array
T[0, 0] = X_pred[0, 0] # t w in
T[1, 0] = T_pred_below_thresh[0, 0] # t w out
T[0, 1] = X_pred[0, 1] # t fg in
T[1, 1] = T_pred_below_thresh[0, 1] # t fg out
else: # if ANY massflow is 0, all output temps are equal to input temps
T[:, 0] = X_pred[0, 0] # t w in & t w out = t w in
T[:, 1] = X_pred[0, 1] # t fg in & t fg out = t fg in
# if bypassing the hex with a part of the water flow:
if bypass:
# get heat capacity rates for bypassing, hex traversing and
# outflowing (mixed) water. hcr is dimensionless, since flows have been
# scaled before, thus value is same as leveraged Cp in unit J/kg/K
hcr_bypass = cp_water(X_pred[0, 0]) * water_dm_excess
hcr_hex_out = cp_water(T[1, 0]) * water_dm_range[1]
hcr_out = hcr_bypass + hcr_hex_out
# calculate outflowing (mix of bypass and hex traversing water) temp:
T_out = (hcr_bypass * X_pred[0, 0] + hcr_hex_out * T[1, 0]) / hcr_out
# set to the temperature result array:
T[1, 0] = T_out # t w out
res[stepnum[0]] = T
res_dm[stepnum[0]] = dm_io
# %% Simulation Env. implicit/explicit specific functions and tests:
@njit(nogil=GLOB_NOGIL, cache=True)
def hexnum_diff_impl(
T_ext,
T_port,
T_amb,
ports_all, # temperatures
dm_io,
dm_top,
dm_bot,
dm_port,
res_dm, # flows
cp_T,
lam_fld,
rho_T,
ny_T,
lam_mean,
cp_port,
lam_port_fld,
mcp,
rhocp,
cp_wll,
lam_wll,
ui, # material properties.
alpha_i, # alpha values
UA_dim1,
UA_dim2,
UA_dim1_wll,
UA_port,
UA_port_wll, # UA values
port_own_idx,
port_link_idx, # indices
grid_spacing,
port_gsp,
d_h,
s_plate,
cell_dist,
dist_min, # lengths
A_channel,
V_cell_fld,
A_plate_eff,
A_p_fld_mean, # areas and vols
channel_divisor,
corr_Re,
process_flows, # bools
stepnum, # step information
dT_cond,
dT_adv,
dT_total, # differentials
):
# generate views needed to make calculations easier:
T_sup = T_ext[1:-1, 1] # view to supply side
T_dmd = T_ext[1:-1, 3] # view to demand side
# T_wll = T_ext[1:-1, 2] # view to wall temperature
dm_sup = dm_io[:1] # view to supply side massflow
dm_dmd = dm_io[1:] # view to demand side massflow
process_flows[0] = _process_flow_multi_flow(
process_flows=process_flows,
dm_io=dm_io,
dm_top=dm_top,
dm_bot=dm_bot,
dm_port=dm_port,
stepnum=stepnum,
res_dm=res_dm,
)
water_mat_props_ext_view( # only pass fluid columns to T_ext
T_ext=T_ext[:, 1::2], cp_T=cp_T, lam_T=lam_fld, rho_T=rho_T, ny_T=ny_T
)
_lambda_mean_view(lam_T=lam_fld, out=lam_mean)
UA_plate_tb_fld( # only pass the fluid columns to out
A_cell=A_channel,
grid_spacing=grid_spacing,
lam_mean=lam_mean,
out=UA_dim1[:, ::2],
)
UA_plate_tb_wll( # only pass the wall column to out
UA_tb_wll=UA_dim1_wll, out=UA_dim1[:, 1]
)
phex_alpha_i_wll_sep_discretized(
dm=dm_sup / channel_divisor[0],
T_fld=T_sup,
T_wll=T_sup,
rho=rho_T[:, 0],
ny=ny_T[:, 0],
lam_fld=lam_fld[:, 0],
A=A_channel,
d_h=d_h,
x=cell_dist,
corr_Re=corr_Re,
alpha=alpha_i[:, 0],
)
phex_alpha_i_wll_sep_discretized(
dm=dm_dmd / channel_divisor[1],
T_fld=T_dmd,
T_wll=T_dmd,
rho=rho_T[:, 1],
ny=ny_T[:, 1],
lam_fld=lam_fld[:, 1],
A=A_channel,
d_h=d_h,
x=cell_dist,
corr_Re=corr_Re,
alpha=alpha_i[:, 1],
)
UA_dim2[:, 1:3] = UA_fld_wll_plate(
A=A_plate_eff, s_wll=s_plate / 2, alpha_fld=alpha_i, lam_wll=lam_wll
)
cell_temp_props_fld(
T_ext_fld=T_ext[:, 1::2],
V_cell=V_cell_fld,
cp_T=cp_T,
rho_T=rho_T,
rhocp_fld=rhocp[:, ::2],
mcp_fld=mcp[:, ::2],
ui_fld=ui[:, ::2],
)
specific_inner_energy_wll(T_wll=T_ext[1:-1, 2], cp_wll=cp_wll, ui=ui[:, 1])
dT_cond_port = _process_ports_collapsed(
ports_all=ports_all,
port_link_idx=port_link_idx,
port_own_idx=port_own_idx,
T=T_ext[1:-1, 1:-1],
mcp=mcp,
UA_port=UA_port,
UA_port_wll=UA_port_wll,
A_p_fld_mean=A_p_fld_mean,
port_gsp=port_gsp,
grid_spacing=grid_spacing,
lam_T=lam_fld,
cp_port=cp_port,
lam_port_fld=lam_port_fld,
T_port=T_port,
)
UA_amb_shell = 0.0
dT_cond[:] = (
# heat conduction in first dimension (axis 0), top -> bottom:
(
+UA_dim1[:-1] * (T_ext[:-2, 1:-1] - T_ext[1:-1, 1:-1])
# heat conduction in first dimension (axis 0), bottom -> top:
+ UA_dim1[1:] * (T_ext[2:, 1:-1] - T_ext[1:-1, 1:-1])
# heat conduction in second dimension (axis 1), left -> right:
+ UA_dim2[:, :-1] * (T_ext[1:-1, :-2] - T_ext[1:-1, 1:-1])
# heat conduction in second dimension (axis 1), right -> left:
+ UA_dim2[:, 1:] * (T_ext[1:-1, 2:] - T_ext[1:-1, 1:-1])
# heat conduction to ambient (currently set to 0):
+ UA_amb_shell * (T_amb - T_ext[1:-1, 1:-1])
)
/ mcp
)
# calculate heat transfer by advection in the fluid channels
dT_adv[:, ::2] = (
# advective heat transport (only axis 0), top -> bottom:
(
+dm_top * (cp_T[:-2] * T_ext[:-2, 1::2] - ui[:, ::2])
# advective heat transport (only axis 0), bottom -> top:
+ dm_bot * (cp_T[2:] * T_ext[2:, 1::2] - ui[:, ::2])
)
/ mcp[:, ::2]
)
# sum up heat conduction and advection for port values:
for i in range(port_own_idx.size):
idx = port_own_idx[i] # idx of port values at temperature/diff array
# conduction
dT_cond.flat[idx] += dT_cond_port[i]
# advection
dT_adv.flat[idx] += (
dm_port[i]
* (cp_port[i] * T_port[i] - ui.flat[idx])
/ mcp.flat[idx]
)
# divide advective transfer by the number of channels:
dT_adv[:, ::2] /= channel_divisor
# sum up the differentials for conduction and advection
dT_total[:] = dT_cond + dT_adv
return dT_total
@nb.njit(cache=True, nogil=GLOB_NOGIL)
def euler_forward(diff, diff_input_args, yprev, _h):
return yprev + _h * diff(*diff_input_args)
@nb.njit(cache=True, nogil=GLOB_NOGIL)
def hexnum_imp_root_diff(y, yprev, h, input_args):
input_args[0][1:-1, 1:-1] = y.reshape(input_args[0][1:-1, 1:-1].shape)
return y - yprev - h * hexnum_diff_impl(*input_args).ravel()
@nb.njit
def hexnum_imp_fixedpoint(y, y_prev, h, input_args): # fixed point function
"""
Find fixed point of the hexnum implicit function.
Warning: Fixed point iteration may be several"""
input_args[0][1:-1, 1:-1] = y.reshape(input_args[0][1:-1, 1:-1].shape)
return (
y_prev.reshape(input_args[0][1:-1, 1:-1].shape)
+ h * hexnum_diff_impl(*input_args)
).ravel()
@nb.njit
def fixed_point_to_root(y, fp_fun, y_prev, h, input_args):
return y - fp_fun(y, y_prev, h, input_args)
@nb.njit(cache=True, nogil=GLOB_NOGIL)
def hexnum_imp_fixedp_diff(y, yprev, h, input_args):
input_args[0][1:-1, 1:-1] = y
return yprev + h * hexnum_diff_impl(*input_args)
# @nb.njit(cache=True, nogil=GLOB_NOGIL)
def hexnum_imp_newt_diff(yprev, _h, input_args, rtol=1e-6):
"""
https://math.stackexchange.com/questions/152159/how-to-correctly-apply-newton-raphson-method-to-backward-euler-method
https://scicomp.stackexchange.com/questions/5042/how-to-implement-newtons-method-for-solving-the-algebraic-equations-in-the-back
"""
input_args[0][1:-1, 1:-1] = yprev
y_lastiter = yprev.copy()
err = 1.0
# initial guess:
diff = hexnum_diff_impl(*input_args)
y = yprev + _h * diff
f = np.zeros_like(y)
while np.any(err > rtol):
# y_lastiter = y.copy()
input_args[0][1:-1, 1:-1] = y
# y = (
# y_lastiter
# + (euler_forward(hexnum_diff_impl, input_args, yprev, _h)
# / hexnum_diff_impl(*input_args))
# )
diff = hexnum_diff_impl(*input_args)
f_lastiter = f
f = y - yprev - _h * diff
nz = f != 0.0 # make a mask with non zero values
slope = (f[nz] - f_lastiter[nz]) / (y[nz] - y_lastiter[nz])
# diff[diff == 0.] = yprev[diff == 0.]
# diff2 = diff * _h
# y = y_lastiter - ((yprev + diff) / (diff))
# y[np.abs(y) == np.inf] = y_lastiter[np.abs(y) == np.inf]
# y = y_lastiter - yprev / diff - 1.
# err = np.sqrt(np.sum((np.abs(y - y_lastiter))**2))
# err = (y - y_lastiter) / y_lastiter
y[nz] = y_lastiter[nz] - f[nz] / slope
err = (y - y_lastiter) / y_lastiter
y_lastiter = y.copy()
return y
# %% Simulation Env. old (mostly deprecated) solve methods:
@njit(nogil=GLOB_NOGIL, cache=True)
def solve_connector_3w_overload(arglist):
solve_connector_3w(*arglist)
@nb.njit(nogil=GLOB_NOGIL, cache=True)
def solve_connector_3w(T, ports_all, cp_T, dm, port_link_idx, res, stepnum):
# depending on the flow conditions this 3w connector acts as a flow
# mixing or splitting device. This state has to be determined by
# checking the direction of the massflows through the ports.
# A negative sign means that the massflow is exiting through the
# respective port, a positive sign is an ingoing massflow.
# get connected port temperatures:
# get port array:
_port_values_to_cont(
ports_all=ports_all, port_link_idx=port_link_idx, out=T
)
# get cp-values of all temperatures:
get_cp_water(T, cp_T)
# save bool indices of massflows greater (in) and less (out) than 0:
# (using dm as massflow array only works since it is a view of _dm_io!)
dm_in = np.greater(dm, 0)
dm_out = np.less(dm, 0)
# if 2 ports > 0 are True, 3w connector is mixer:
if np.sum(dm_in) == 2:
# get cp of outflowing massflow (error of mean temp is <<0.5% compared
# to a heat cap. ratio calculation, thus negligible and ok):
cp_out = cp_water(np.sum(T[dm_in]) / 2)
# calc T_out by mixing the inflowing massflows (*-1 since outgoing
# massflows have a negative sign):
T_out = np.sum(dm[dm_in] * cp_T[dm_in] * T[dm_in]) / (
cp_out * -1 * dm[dm_out]
)
# pass on port values by switching temperatures:
# set old T_out to both in-ports
T[dm_in] = T[dm_out]
# set calculated T_out to out-port
T[dm_out] = T_out
# if 2 ports < 0 are True, 3w connector is splitter:
elif np.sum(dm_out) == 2:
# no real calculation has to be done here, just switching
# temperatures and passing them on to opposite ports
# calc the temp which will be shown at the inflowing port as a mean
# of the temps of outflowing ports (at in port connected part will
# see a mean value of both temps for heat conduction):
T_in = T[dm_out].sum() / 2
# pass inflowing temp to outflowing ports:
T[dm_out] = T[dm_in]
# pass mean out temp to in port:
T[dm_in] = T_in
# if one port has 0 massflow, sum of dm_in == 1:
elif np.sum(dm_in) == 1:
# get port with 0 massflow:
dm0 = np.equal(dm, 0)
# this port 'sees' a mean of the other two temperatures:
T[dm0] = T[~dm0].sum() / 2
# the out ports heat flow is dominated by convection, thus it
# only 'sees' the in flow temperature but not the 0 flow temp:
T[dm_out] = T[dm_in]
# the in ports heat flow is also dominated by convection, but here
# it is easy to implement the 0-flow port influence, since heat
# flow by convection of part connected to in port is not affected
# by connected temperature, thus also get a mean value:
T[dm_in] = T[~dm_in].sum() / 2
# if all ports have 0 massflow:
else:
# here all ports see a mean of the other ports:
# bkp 2 ports
T0 = (T[1] + T[2]) / 2
T1 = (T[0] + T[2]) / 2
# save means to port values:
T[2] = (T[0] + T[1]) / 2
T[0] = T0
T[1] = T1
# save results:
res[stepnum[0]] = T
# <EMAIL>((float64[:,:], float64[:,:], float64[:], float64[:], float64[:,:,:],
# float64[:,:], float64[:,:],
# float64[:], float64[:], float64[:], float64[:], float64[:,:],
# float64[:], float64[:], float64[:],
# int32[:], int32[:],
# float64, float64, float64, float64, float64[:],
# int32, int32, int32, float64, int32))
@nb.njit(nogil=GLOB_NOGIL, cache=True)
def solve_platehex(
T,
T_port,
T_mean,
ports_all,
res,
dm_io,
dm_port,
cp_mean,
lam_mean,
rho_mean,
ny_mean,
lam_wll,
alpha_i,
UA_fld_wll,
UA_fld_wll_fld,
port_own_idx,
port_link_idx,
A_plate_eff,
A_channel,
d_h,
s_plate,
cell_dist,
num_A,
num_channels_sup,
num_channels_dmd,
corr_Re,
stepnum,
):
# get temperatures of connected ports:
_port_values_to_cont(
ports_all=ports_all, port_link_idx=port_link_idx, out=T_port
)
# get massflows
# only positive flows for side supply and set entry temperature
# depending on flow direction:
if dm_io[0] >= 0:
dm_sup = dm_io[0] # positive sup side flow
T_sup_in = T_port[0] # entry temp. sup is sup_in
T_sup_out = T_port[1] # out temp. sup is sup_out
dm_port[0] = dm_io[0]
dm_port[1] = 0.0
else:
dm_sup = -dm_io[0] # positive sup side flow
T_sup_in = T_port[1] # entry temp. sup is sup_out
T_sup_out = T_port[0] # out temp. sup is sup_in
dm_port[0] = 0.0
dm_port[1] = -dm_io[0]
# only positive flows for side demand and set entry temperature
# depending on flow direction:
if dm_io[1] >= 0:
dm_dmd = dm_io[1] # positive dmd side flow
T_dmd_in = T_port[2] # entry temp. dmd is dmd_in
T_dmd_out = T_port[3] # out temp. dmd is dmd_out
dm_port[2] = dm_io[1]
dm_port[3] = 0.0
else:
dm_dmd = -dm_io[1] # positive dmd side flow
T_dmd_in = T_port[3] # entry temp. dmd is dmd_out
T_dmd_out = T_port[2] # out temp. dmd is dmd_in
dm_port[2] = 0.0
dm_port[3] = -dm_io[1]
# do all the calculations only if both massflows are not 0
if dm_sup != 0 and dm_dmd != 0:
# get mean temperature of both fluid sides as a mean of the neighboring
# port temperatures which is a good approximation when there is a flow
# through the HEX (without flow no calc. will be done anyways):
T_mean[0] = (T_sup_in + T_sup_out) / 2 # sup side
T_mean[1] = (T_dmd_in + T_dmd_out) / 2 # dmd side
# get thermodynamic properties of water
# for mean cell temp:
water_mat_props(T_mean, cp_mean, lam_mean, rho_mean, ny_mean)
# for conduction between fluid cells and wall:
# get inner alpha value between fluid and wall from nusselt equations:
# supply side:
phex_alpha_i_wll_sep(
dm_sup / num_channels_sup,
T_mean[0],
T_mean[0],
rho_mean[0],
ny_mean[0],
lam_mean[0],
A_channel,
d_h,
cell_dist,
corr_Re,
alpha_i[0:1],
)
# demand side:
phex_alpha_i_wll_sep(
dm_dmd / num_channels_dmd,
T_mean[1],
T_mean[1],
rho_mean[1],
ny_mean[1],
lam_mean[1],
A_channel,
d_h,
cell_dist,
corr_Re,
alpha_i[1:2],
)
# get resulting UA from both fluid sides, assuming same values in all
# channels of one pass, to the midpoint (-> /2) of the separating wall.
# index [1, 1] for lam_wll selects own lam_wll to avoid overwriting by
# _get_port_connections method of simenv.
UA_fld_wll[:] = UA_fld_wll_plate(
A_plate_eff, s_plate / 2, alpha_i, lam_wll[0]
)
# get total UA value from fluid to fluid (in VDI Wärmeatlas this is kA)
# by calculating the series circuit of the UA fluid wall values with
# the number of effective heat transfer areas (num plates - 2)
UA_fld_wll_fld[0] = (
series_circuit_UA(UA_fld_wll[0], UA_fld_wll[1]) * num_A
)
# Heat exchanger dimensionless coefficients:
# heat capacity flows (ok, this is not dimensionless...)
dC_sup = dm_sup * cp_mean[0]
dC_dmd = dm_dmd * cp_mean[1]
# calculate NTU value of the supply side:
if dC_sup != 0:
NTU_sup = UA_fld_wll_fld[0] / dC_sup
else:
NTU_sup = np.inf
# calculate heat capacity flow ratio for the supply to demand side:
if dC_dmd != 0:
R_sup = dC_sup / dC_dmd
else:
R_sup = np.inf
# get dimensionless change in temperature
rs_ntus = (R_sup - 1) * NTU_sup # precalc. for speed
# for the supply side
if (
R_sup != 1 and rs_ntus < 100 # heat cap flow ratio not 1 and valid
): # range for exp
P_sup = (1 - np.exp(rs_ntus)) / (1 - R_sup * np.exp(rs_ntus))
elif rs_ntus > 100: # if exp in not-defined range
P_sup = 1 / R_sup # largely only depending on 1/R
# above a specific value. for float64 everything above around
# 50 to 100 is cut of due to float precision and quite exactly
# equal 1/R.
else: # heat cap flow ratio equal 1
P_sup = NTU_sup / (1 + NTU_sup)
# for the demand side:
P_dmd = P_sup * R_sup
# if P_sup has a NaN value, for example when a flow is zero or very
# close to zero (NaN check is: Number is not equal to itself!):
if P_sup != P_sup:
P_sup = 0
P_dmd = 0
# calculate supply and demand outlet temperatures from this and
# overwrite the estimate value taken from ports:
T_sup_out = T_sup_in - P_sup * ( # supply side outlet temp.
T_sup_in - T_dmd_in
)
T_dmd_out = T_dmd_in + P_dmd * ( # demand side outlet temp.
T_sup_in - T_dmd_in
)
# calculate heat flow from supply fluid to wall and demand fluid:
# dQ = dC_sup * (T_sup_in - T_sup_out)
else:
# else if at least one side is zero.
# fill with the values of connected ports where the flow is 0 (this
# is already done automatically in the beginning where temperature
# values are set depending on the flow direction, so do nothing
# for zero flow).
# pass on the value where the flow is not 0.
if dm_sup != 0: # check supply side for flow not zero
T_sup_out = T_sup_in # pass on if sup flow not 0
elif dm_dmd != 0: # if sup flow not zero
T_dmd_out = T_dmd_in # pass on if dmd flow not 0
# set new values to array for port interaction with other parts,
# depending on flow direction:
if dm_io[0] >= 0: # sup side normal flow
T[0] = T_sup_in # - 273.15
T[1] = T_sup_out # - 273.15
else: # sup side inversed flow
T[1] = T_sup_in # - 273.15
T[0] = T_sup_out # - 273.15
# only positive flows for side demand and set entry temperature
# depending on flow direction:
if dm_io[1] >= 0: # dmd side normal flow
T[2] = T_dmd_in # - 273.15
T[3] = T_dmd_out # - 273.15
else: # dmd side inversed flow
T[3] = T_dmd_in # - 273.15
T[2] = T_dmd_out # - 273.15
# save results:
res[stepnum[0]] = T
# dT_cond[1, 1] = 0
@jit(
(float64[:], int32[:], float64[:]),
nopython=True,
nogil=GLOB_NOGIL,
cache=True,
) # parallel=GLOB_PARALLEL useful
def _get_p_arr_pump(ports_all, port_link_idx, T):
"""
Values of requested ports are saved to temperature array.
"""
T[:] = ports_all[port_link_idx][::-1]
@njit(nogil=GLOB_NOGIL, cache=True)
def solve_mix_overload(arglist):
solve_mix(*arglist)
@nb.jit(
nopython=True, nogil=GLOB_NOGIL, cache=True
) # parallel=GLOB_PARALLEL useful
def solve_mix(port_array, _port_link_idx, dm_io, T):
# get port array:
T[:] = port_array[_port_link_idx]
# calc T_out by mixing A and B if there is a flow through the valve
if dm_io[2] != 0:
# get outlet temperature as mean of both inlet temperatures for cp
# calculation:
T[2] = (T[0] + T[1]) / 2
# get heat capacities:
cp = cp_water(T)
# get outlet temperature by mixing the massflows:
T_AB = (dm_io[0] * cp[0] * T[0] + dm_io[1] * cp[1] * T[1]) / (
dm_io[2] * cp[2]
)
# set mean outlet temp. to both in-ports for heat conduction
T[0:2] = T[2]
# set calculated T_out to out-port
T[2] = T_AB
else:
# else if dm of AB port is zero, the temperatures all are a mean of
# the other ports temperatures to enable heat calculation:
T_AB = (T[0] + T[1]) / 2
T_A = (T[1] + T[2]) / 2
T_B = (T[0] + T[2]) / 2
# set to temperature array:
T[0] = T_A
T[1] = T_B
T[2] = T_AB
@njit(nogil=GLOB_NOGIL, cache=True)
def solve_split_overload(arglist):
solve_split(*arglist)
@nb.jit(
(float64[:], int32[:], float64[:]),
nopython=True,
nogil=GLOB_NOGIL,
cache=True,
)
def solve_split(port_array, _port_link_idx, T):
T[:] = port_array[_port_link_idx]
T_in = T[0:2].sum() / 2
T[0:2] = T[2]
T[2] = T_in
@njit(nogil=GLOB_NOGIL, cache=True)
def solve_pump_overload(arglist):
solve_pump(*arglist)
@nb.njit(nogil=GLOB_NOGIL, cache=True) # parallel=GLOB_PARALLEL useful
def solve_pump(ports_all, port_link_idx, T, res, res_dm, dm, stepnum):
"""
Solve method of part pump.
"""
# get and invert temperatures
_get_p_arr_pump(ports_all=ports_all, port_link_idx=port_link_idx, T=T)
# save massflow to massflow result grid
res_dm[stepnum[0], 0] = dm[0]
# save temperatures to temperature result grid
res[stepnum[0]] = T
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def ctrl_deadtime(
deadtime,
timestep,
dt_arr,
pv_arr,
len_dt_arr,
dt_idx,
last_dt_idx,
delayed_pv,
sp,
pv,
):
dt_arr += timestep
# last deadtime index is saved for interpolation if in last
# step a new pv was found, otherwise last deadtime index will
# be increased by one to include roll by one element:
if dt_idx != -1:
last_dt_idx = dt_idx
else:
last_dt_idx += 1
# reset deadtime index with a value which will be kept if no
# new pv value reached (so the old one will be kept):
dt_idx = -1
# loop through deadtime array
for i in range(len_dt_arr):
# if time in deadtime array is equal or greater deadtime
# return index of the position (only the first occurrence
# will be found!)
if dt_arr[i] >= deadtime:
dt_idx = i
break
# calculate delayed pv (will not be overwritten after calc.
# until next step, thus can be reused if no new value is found)
# if a new value has reached deadtime delay in only one step:
if dt_idx == 0:
# interpolate delayed pv from previous pv, new pv and
# expired time and time between prev. and new pv:
delayed_pv = delayed_pv + (pv_arr[0] - delayed_pv) / (deadtime) * (
dt_arr[0]
)
# if a new value has reached deadtime delay after more than
# one step:
elif dt_idx > 0:
# if deadtime is hit exactly (for example with constant
# timesteps):
if dt_arr[dt_idx] == deadtime:
delayed_pv = pv_arr[dt_idx]
else:
# interpolate value if deadtime is overshot andnot hit:
delayed_pv = pv_arr[dt_idx - 1] + (
pv_arr[dt_idx] - pv_arr[dt_idx - 1]
) / (dt_arr[dt_idx] - dt_arr[dt_idx - 1]) * (
deadtime - dt_arr[dt_idx - 1]
)
# if deadtime delay was not reached:
else:
# interpolate delayed pv from previous pv, next pv and
# expired time and time till next pv:
delayed_pv = delayed_pv + (pv_arr[last_dt_idx] - delayed_pv) / (
deadtime - (dt_arr[last_dt_idx] - timestep)
) * (timestep)
# calculate error from delayed pv_value (delayed pv will not
# be overwritten until next step):
error = sp[0] - delayed_pv
# set all time values in deadtime array after found value to 0:
dt_arr[dt_idx:] = 0
# roll deadtime and pv array one step backwards:
dt_arr[1:] = dt_arr[0:-1]
pv_arr[1:] = pv_arr[0:-1]
# insert current pv into first slot of pv_arr:
pv_arr[0] = pv[0]
# set expired time of current pv to zero:
dt_arr[0] = 0
return error, delayed_pv, dt_idx, last_dt_idx
@nb.njit(nogil=GLOB_NOGIL, cache=True)
def _heun_corrector_adapt(
res,
T,
df0,
df1,
trnc_err_cell_weight,
_h,
stepnum,
rtol,
atol,
err,
new_trnc_err,
):
# solve heun method and save to result:
res[stepnum] = res[stepnum - 1] + (_h / 2) * (df0 + df1)
# GET TRUCATION ERROR FOR HEUN COMPARED WITH LOWER ORDER EULER
# TO CALC. NEW STEPSIZE:
# truncation error approximation is the difference of the total
# heun result and the euler (predictor) result saved in T. The
# trunc. error is calculated by taking the root mean square
# norm of the differences for each part. This applies a root
# mean square error weighting over the cells.
# To get the systems truncation error, the norms have to be
# added up by taking the root of the sum of the squares.
# get each part's local relative error as euclidean matrix norm
# (sqrt not yet taken to enable summing up the part's errors)
# weighted by the relative and absolute tolerance. tolerance weighting as
# in:
# https://github.com/scipy/scipy/blob/ ...
# 19acfed431060aafaa963f7e530c95e70cd4b85c/scipy/integrate/_ivp/rk.py#L147
trnc_err = (
(
(res[stepnum] - T)
* trnc_err_cell_weight
/ (np.maximum(res[stepnum - 1], res[stepnum]) * rtol + atol)
)
** 2
).sum()
# sum these local relative errors up for all parts:
err += trnc_err
# now get root mean square error for part by dividing part's
# trnc_err by its amount of cells and taking the root:
new_trnc_err = (trnc_err / T.size) ** 0.5
# now also save to T arrays to be able to easily use
# memoryviews in diff functions:
T[:] = res[stepnum]
return err, new_trnc_err
# @nb.njit(nogil=GLOB_NOGIL, cache=True)
def _embedded_adapt_stepsize(
err,
sys_trnc_err,
num_cells_tot_nmrc,
step_accepted,
failed_steps,
safety,
order,
solver_state,
min_factor,
max_factor,
min_stepsize,
max_stepsize,
ports_all,
parr_bkp,
vN_max_step,
step_stable,
cnt_instable,
timeframe,
_h,
timestep,
stepnum,
):
# ADAPTIVE TIMESTEP CALCULATION:
# get all part's RMS error by dividing err by the amount of all
# cells in the system and taking the root:
err_rms = (err / num_cells_tot_nmrc) ** 0.5
# save to array to enable stepwise system error lookup:
sys_trnc_err[stepnum] = err_rms
# check for good timesteps:
# err_rms already has the relative and absolute tolerance included,
# thus only checking against its value:
if err_rms < 1:
# error is lower than tolerance, thus step is accepted.
step_accepted = True
# save successful timestep to simulation environment:
timestep = _h
# get new timestep (err_rms is inverted thus negative power):
_h *= min(
max_factor[0], max(1, (safety * err_rms ** (-1 / (order + 1))))
)
# check if step is not above max step:
if _h > max_stepsize:
_h = max_stepsize # reduce to max stepsize
# save to state that max stepsize was reached:
solver_state[stepnum] = 5
else:
# else save to state that error was ok in i steps:
solver_state[stepnum] = 4
elif err_rms == 0.0:
# if no RMS (most probably the step was too small so rounding
# error below machine precision led to cut off of digits) step
# will also be accepted:
step_accepted = True
# save successful timestep to simulation environment:
timestep = _h
# get maximum step increase for next step:
_h *= max_factor[0]
# save to state that machine epsilon was reached:
solver_state[stepnum] = 7
# check if step is not above max step:
if _h > max_stepsize:
_h = max_stepsize # reduce to max stepsize
else:
# else error was too big.
# check if stepsize already is at minimum stepsize. this can
# only be true, if stepsize has already been reduced to min.
# stepsize, thus to avoid infinite loop set step_accepted=True
# and skip the rest of the loop:
if _h == min_stepsize:
step_accepted = True
# save not successful but still accepted timestep to
# simulation environment:
timestep = _h
# save this special event to solver state:
solver_state[stepnum] = 6
else:
# else if stepsize not yet at min stepsize, reduce stepsize
# further by error estimate if this is not less than the
# minimum factor and redo the step.
_h *= max(min_factor, (safety * err_rms ** (-1 / (order + 1))))
# check if step is not below min step:
if _h < min_stepsize:
_h = min_stepsize # increase to min stepsize
# reset ports array for retrying step:
ports_all[:] = parr_bkp
# count failed steps at this step number:
failed_steps[stepnum] += 1
# catch von Neumann stability condition:
if not step_stable[0]:
# if von Neumann stability violated, do not accept step.
# This can happen even though the RMS-error is ok, since single
# non-stable parts can have a too small impact on the RMS. In
# this case _step_accepted will be overwritten.
step_accepted = False # redo the step
# inrease counter for failed loops
cnt_instable += 1
# set new step to maximum von Neumann step (calc. in parts):
_h = vN_max_step[0]
# count failed steps at this step number:
failed_steps[stepnum] += 1
# reset ports array for retrying step:
ports_all[:] = parr_bkp
# break loop if no solution was found after 50 tries:
if cnt_instable == 50:
# set timeframe to 0 to break the outer simulation loop
timeframe = 1e-9
# save error to solver state:
solver_state[stepnum] = 99
"""
TODO: Wie integriere ich das break hier?
"""
# break
return step_accepted, timestep, timeframe, cnt_instable
# %% CALCULATE DIMENSIONLESS NUMBERS:
@njit(nogil=GLOB_NOGIL, cache=True)
def rayleigh_number(T_s, T_inf, Pr, ny, Kelvin, flow_length):
"""
Calculate the Rayleigh number for the given parameters [1]_.
Parameters:
-----------
T_s : float, int, np.ndarray
Surface temperature in [°C] or [K].
T_inf : float, int, np.ndarray
Surrounding fluid temperature in [°C] or [K].
Pr : float, int, np.ndarray
Prandtl number of the surrounding fluid at the mean temperature:
$$(T_s + T_{inf}) / 2$$
For (dry) air this can be set to a constant value of ca.:
$$Pr = 0.708$$
ny : float, int, np.ndarray
Kinematic viscosity in [m^2 / s] of the surrounding fluid at the mean
temperature: $$(T_s + T_{inf}) / 2$$
Kelvin : float, int
If temperatures `T_s` and `T_inf` are given in [°C], Kelvin has to be
set to `Kelvin=273.15`. If `T_s` and `T_inf` are given in [K], Kelvin
has to be set to `Kelvin=0`
flow_length : float, int
Specific flow length in [m]. Has to be calculated depending on the part
geometry. See function calc_flow_length() for further information.
Notes:
------
.. [1] VDI Wärmeatlas 2013, VDI-Gesellschaft Verfahrenstechnik und
Chemieingenieurwesen, Düsseldorf, Deutschland, p. 754
"""
# Rayleigh number according to VDI Wärmeatlas 2013 chapter F1
# eq (7), replacing kappa with kappa = ny/Pr (F1 eq (8)) and beta
# with 1/T_inf (F1 eq (2)):
return (
np.abs(T_s - T_inf)
* 9.81
* flow_length ** 3
* Pr
/ ((T_inf + Kelvin) * ny ** 2)
)
# %% CALCULATE MATERIAL PROPERTIES:
# ---> water
# calc density from celsius temperature:
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def get_rho_water(T, rho):
# 4th degree
# Tclp = T.copy()
# Tclp[Tclp > 100.] = 100.
rho[:] = (
999.88785713136213
+ 4.9604454990529602e-02 * T
- 7.4722666453460717e-03 * T ** 2
+ 4.1094484438154484e-05 * T ** 3
- 1.2915789546323614e-07 * T ** 4
)
# 3rd degree
# rho[:] = (1000.0614995891804 + 1.3246507417626112e-02*T
# - 5.8171082149854319e-03*T**2 + 1.5262905345518088e-05*T**3)
# calc density from celsius temperature AND RETURN the result:
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def rho_water(T):
# 4th degree
# Tclp = T.copy()
# Tclp[Tclp > 100.] = 100.
return (
999.88785713136213
+ 4.9604454990529602e-02 * T
- 7.4722666453460717e-03 * T ** 2
+ 4.1094484438154484e-05 * T ** 3
- 1.2915789546323614e-07 * T ** 4
)
# calc heat conduction from celsius temperature AND RETURN IT:
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def lambda_water(T):
# 3rd degree (4th degree not sufficiently better)
# Tclp = T.copy()
# Tclp[Tclp > 100.] = 100.
return (
5.6987912853229539e-01
+ 1.7878370402545738e-03 * T
- 5.9998217273879795e-06 * T ** 2
- 8.6964577115093407e-09 * T ** 3
)
# calc heat conduction from celsius temperature:
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def get_lambda_water(T, lam):
# 3rd degree (4th degree not sufficiently better)
# Tclp = T.copy()
# Tclp[Tclp > 100.] = 100.
lam[:] = (
5.6987912853229539e-01
+ 1.7878370402545738e-03 * T
- 5.9998217273879795e-06 * T ** 2
- 8.6964577115093407e-09 * T ** 3
)
# calc specific heat capacity from celsius temperature (4th degree, about 10%
# slower but a tiny bit more accurate):
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def get_cp_water(T, cp):
# 4th degree
# Tclp = T.copy()
# Tclp[Tclp > 100.] = 100.
cp[:] = (
4215.4023574179992
- 2.8853943283519348 * T
+ 7.490580684801168e-02 * T ** 2
- 7.7807143441700321e-04 * T ** 3
+ 3.2109328970410432e-06 * T ** 4
)
# 3rd degree
# cp[:] = (4211.0855150125581 - 1.9815167178349438*T
# + 3.375770177242976e-02*T**2 - 1.3588485500876595e-04*T**3)
# calc specific heat capacity from celsius temperature AND RETURN IT
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def cp_water(T):
# 4th degree
# Tclp = T.copy()
# Tclp[Tclp > 100.] = 100.
return (
4215.4023574179992
- 2.8853943283519348 * T
+ 7.490580684801168e-02 * T ** 2
- 7.7807143441700321e-04 * T ** 3
+ 3.2109328970410432e-06 * T ** 4
)
# calc kinematic viscosity from celsius temperature after VDI Wärmeatlas 2013
# table D2.1:
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def get_ny_water(T, ny):
# 4th degree:
# Tclp = T.copy()
# Tclp[Tclp > 100.] = 100.
ny[:] = (
1.7764473380494155e-06
- 5.5640275781265404e-08 * T
+ 1.0243072887494426e-09 * T ** 2
- 9.7954460136981165e-12 * T ** 3
+ 3.6460468745062724e-14 * T ** 4
)
# calc kinematic viscosity from celsius temperature AND RETURN IT, VDI
# Wärmeatlas 2013 table D2.1:
@njit(nogil=GLOB_NOGIL, cache=True)
def ny_water(T):
# 4th degree:
# Tclp = T.copy()
# Tclp[Tclp > 100.] = 100.
return (
1.7764473380494155e-06
- 5.5640275781265404e-08 * T
+ 1.0243072887494426e-09 * T ** 2
- 9.7954460136981165e-12 * T ** 3
+ 3.6460468745062724e-14 * T ** 4
)
# calc Prandtl number from celsius temperature:
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def get_Pr_water(T, Pr):
# 4th degree:
# Pr[:] = (12.909891117064289 - 0.4207372206483363*T
# + 7.4860282126284405e-03*T**2 - 6.854571430021334e-05*T**3
# + 2.4685760188512201e-07*T**4)
# 3rd degree:
# Tclp = T.copy()
# Tclp[Tclp > 100.] = 100.
Pr[:] = (
12.5780108199379058
- 0.35124680571767508 * T
+ 4.3225480444706085e-03 * T ** 2
- 1.9174193923188898e-05 * T ** 3
)
# calc Prandtl number from celsius temperature AND RETURN IT
# (alot faster for single values):
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def Pr_water_return(T):
# 4th degree:
# Pr[:] = (12.909891117064289 - 0.4207372206483363*T
# + 7.4860282126284405e-03*T**2 - 6.854571430021334e-05*T**3
# + 2.4685760188512201e-07*T**4)
# 3rd degree:
# Tclp = T.copy()
# Tclp[Tclp > 100.] = 100.
return (
12.5780108199379058
- 0.35124680571767508 * T
+ 4.3225480444706085e-03 * T ** 2
- 1.9174193923188898e-05 * T ** 3
)
# calc isobaric expansion coefficient in [1/K] from celsius temperature:
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def get_beta_water(T, beta):
# 3rd degree:
# Tclp = T.copy()
# Tclp[Tclp > 100.] = 100.
beta[:] = (
-5.87985364766666e-05
+ 1.5641955219950547e-05 * T
- 1.3587684743777981e-07 * T ** 2
+ 6.1220503308149086e-10 * T ** 3
)
# calc isobaric expansion coefficient in [1/K] from celsius temperature
# AND RETURN IT:
@nb.njit(nogil=GLOB_NOGIL, cache=True)
def beta_water_return(T):
# 3rd degree:
# Tclp = T.copy()
# Tclp[Tclp > 100.] = 100.
return (
-5.87985364766666e-05
+ 1.5641955219950547e-05 * T
- 1.3587684743777981e-07 * T ** 2
+ 6.1220503308149086e-10 * T ** 3
)
# calc Reynolds number
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def get_Re_water(v, L, ny, Re):
Re[:] = np.abs(v) * L / ny
# calc Reynolds number and RETURN the result
@nb.njit(nogil=GLOB_NOGIL, cache=True)
def Re_water_return(v, L, ny):
return np.abs(v) * L / ny
# ---> dry air:
# calc density from celsius temperature:
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def get_rho_dryair(T, rho):
# 2nd degree
rho[:] = (
1.2767987012987012
- 0.0046968614718614701 * T
+ 1.4296536796536256e-05 * T ** 2
)
# calc heat conductivity from celsius temperature:
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def get_lam_dryair(T, lam):
# 2nd degree
lam[:] = (
0.024358670995670989
+ 7.6533982683982561e-05 * T
- 4.2099567099572201e-08 * T ** 2
)
# calc heat conductivity from celsius temperature and return it:
@njit(nogil=GLOB_NOGIL, cache=True)
def lam_dryair_return(T):
# 2nd degree
return (
0.024358670995670989
+ 7.6533982683982561e-05 * T
- 4.2099567099572201e-08 * T ** 2
)
# calc kinematic viscosity from celsius temperature:
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def get_ny_dryair(T, ny):
# 2nd degree
ny[:] = (
1.3500069264069257e-05
+ 8.8810389610389459e-08 * T
+ 1.0974025974025443e-10 * T ** 2
)
# calc kinematic viscosity from celsius temperature and return it:
@njit(nogil=GLOB_NOGIL, cache=True)
def ny_dryair_return(T):
# 2nd degree
return (
1.3500069264069257e-05
+ 8.8810389610389459e-08 * T
+ 1.0974025974025443e-10 * T ** 2
)
# ---> humid air:
# saturation pressure in [Pa] of humid air for total pressures < 2MPa
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def humid_air_saturation_pressure(T):
# 6th degree
return (
+1.56927617e-09 * T ** 6
+ 2.32760367e-06 * T ** 5
+ 3.19028425e-04 * T ** 4
+ 2.51824584e-02 * T ** 3
+ 1.42489091e00 * T ** 2
+ 4.55277840e01 * T ** 1
+ 5.99770272e02
)
# 10th degree
# return (- 1.30339138e-16*T**10 + 7.49527386e-14*T**9 - 1.59881730e-11*T**8
# + 1.54764869e-09*T**7 - 5.56609536e-08*T**6 + 1.46597641e-06*T**5
# + 4.21883898e-04*T**4 + 2.43290034e-02*T**3 + 1.38204573e+00*T**2
# + 4.58581434e+01*T + 6.02909924e+02)
# mass of water in fully saturated air in [kg H2O / kg Air] for a pressure of
# 0.1 MPa, only valid for -30 <= T <= 80 !
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def humid_air_sat_water_mass(T):
r"""
Calculate the mass of water in fully saturated air (at 100% relative
humidity) in :math:`[f]= \mathtt{kg_{H_2O}}/\mathtt{kg_{Luft}}`,
valid for a pressure of :math:`0.1\mathtt{\,MPa}` and a temperature range
of :math:`-30\mathtt{\,°C}\leq T \leq 80\mathtt{\,°C}`.
"""
# assert np.all(-30 <= T) and np.all(T <= 80)
# 6th degree
# return (1.56927617e-09*T**6 + 2.32760367e-06*T**5 + 3.19028425e-04*T**4
# + 2.51824584e-02*T**3 + 1.42489091e+00*T**2 + 4.55277840e+01*T
# + 5.99770272e+02)
# 10th degree
return (
+3.47491188e-19 * T ** 10
- 6.50956001e-17 * T ** 9
+ 3.68271647e-15 * T ** 8
+ 2.06252891e-14 * T ** 7
- 7.11474217e-12 * T ** 6
+ 1.29052920e-10 * T ** 5
+ 6.62755505e-09 * T ** 4
+ 8.79652019e-08 * T ** 3
+ 8.16034548e-06 * T ** 2
+ 2.98380899e-04 * T
+ 3.79413965e-03
)
# %% part shape specific calculations:
def calc_flow_length(*, part_shape, vertical, **kwargs):
"""
Calculate the shape specific flow length of a part for the calculation
of heat-transfer specific numbers, like the Rayleigh number.
"""
err_str = (
'`part_shape=' + str(part_shape) + '` was passed to '
'`calc_flow_length()`. The following shapes are supported:\n'
'\'plane\', \'cylinder\', \'sphere\'.'
)
assert part_shape in ['plane', 'cylinder', 'sphere'], err_str
err_str = (
'`vertical=' + str(vertical) + '` was passed to '
'`calc_flow_length()`. `vertical` must be a bool value, '
'depicting the orientation of the surface of which the flow '
'length shall be calculated. For a sphere this argument will '
'be ignored.'
)
assert type(vertical) == bool, err_str
err_str_len = (
'The part shape specific length parameters to be passed to '
'`calc_flow_length()` depend on the part\'s shape and '
'orientation. The following parameters are needed to calculate '
'the flow length for each shape:\n'
' plane, vertical=True: `height=X`\n'
' plane, vertical=False (horizontal): `width=X`, `depth=Y`. '
'Pass the diameter as value for width and depth for a circular '
'disk.\n'
' cylinder, vertical=True: `height=X`\n'
' cylinder, vertical=False (horizontal): `diameter=X`\n'
' sphere: `diameter=X`'
)
if part_shape in ('plane', 'cylinder') and vertical:
assert 'height' in kwargs and isinstance(
kwargs['height'], (int, float)
), err_str_len
return kwargs['height'] # VDI Wärmeatlas 2013, F2.1
elif part_shape == 'plane' and not vertical:
# VDI Wärmeatlas 2013, F2.3
assert 'width' in kwargs and isinstance(
kwargs['width'], (int, float)
), err_str_len
assert 'depth' in kwargs and isinstance(
kwargs['depth'], (int, float)
), err_str_len
return (kwargs['width'] * kwargs['depth']) / (
2 * (kwargs['width'] + kwargs['depth'])
)
elif part_shape == 'cylinder' and not vertical:
assert 'diameter' in kwargs and isinstance(
kwargs['diameter'], (int, float)
), err_str_len
return kwargs['diameter'] * np.pi / 2 # VDI Wärmeatlas 2013, F2.4.1
else:
assert 'diameter' in kwargs and isinstance(
kwargs['diameter'], (int, float)
), err_str_len
return kwargs['diameter'] # VDI Wärmeatlas 2013, F2.4.2
# caller to calculate Reynolds number for a round pipe/TES:
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def pipe_get_Re(dm, rho, ny, A, d_i, Re):
get_Re_water(dm / (rho * A), d_i, ny, Re)
# manual inlining function to calculate Reynolds number for a round pipe/TES:
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def pipe_get_Re2(dm, rho, ny, A, d_i, Re):
Re[:] = dm * d_i / (rho * A * ny)
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def pipe_alpha_i(dm, T, rho, ny, lam_fld, A, d_i, x, alpha):
"""
Calculates the inner alpha value in [W/(m**2K)] between the fluid inside a
pipe and the pipe wall for each cell of a round pipe or thermal energy
storage of diameter `d_i` and length `Len`.
In this case, the wall is considererd in the same row of the temperature
array as the fluid and thus can't have temperatures different from the
fluid temperature.
Parameters:
-----------
dm : np.ndarray, float, integer
Massflow in the pipe/TES in [kg/s].
rho : np.ndarray
Fluid density in [kg/m**3].
ny : np.ndarray
Fluid kinematic viscosity in [m**2/s].
lam_fld : np.ndarray
Fluid heat conductivity in [W/(mK)].
A : float, integer
Inner pipe cross section in [m**2] for round pipes or hydraulic cross
section for pipes of other shapes.
d_i : float, integer
Inner pipe diameter in [m] for round pipes or hydraulic diameter for
pipes of other shapes.
x : float, integer
Distance of cell from start of the pipe [m]. If the massflow `dm` is
negative, the inverse (the distance from the other end of the pipe) is
taken.
alpha : np.ndarray
Array to save the resulting alpha value in [W/(m**2K)] for all cells
of the pipe/TES.
"""
# save shape:
shape = rho.shape
# shape = (100,)
# preallocate arrays:
Re = np.zeros(shape)
# Pr = np.zeros((100,))
Pr_f = np.zeros(T.shape)
Nu = np.zeros(shape)
# get Reynolds and Prandtl number:
get_Re_water(dm / (rho * A), d_i, ny, Re)
get_Pr_water(T, Pr_f)
# get Peclet number to replace calculations of Re*Pr
Pe = Re * Pr_f
# use reversed x if first cell of dm is negative (this applies only to
# parts where the massflow is the same in all cells, since these are the
# only cells with a cell-specific x-array and a single-cell-massflow. For
# all other parts, this reversing does not change anything):
if dm[0] < 0:
xi = x[::-1] # create reversed view
else:
xi = x[:] # create view
# get a mask for the turbulent flows:
turb = Re > 2300
# equations for laminar Nusselt number following VDI Wärmeatlas 2013,
# Chapter G1 - 3.1.1 equation (3), (1) and (2)
Nu[~turb] = (
49.371 # 49.371 is 3.66**3 of eq (1) + 0.7**3 of eq (3)
+ (1.077 * (Pe[~turb] * d_i / xi[~turb]) ** (1 / 3) - 0.7)
** 3 # eq (2)
) ** (1 / 3)
# equations for turbulent Nusselt number following VDI Wärmeatlas 2013,
# Chapter G1 - 4.1 equations (27) and (28):
f = (1.8 * np.log10(Re[turb]) - 1.5) ** (-2)
Nu[turb] = (
(f / 8 * Pe[turb])
/ (1 + 12.7 * (Pr_f[turb] ** (2 / 3) - 1) * (f / 8) ** (0.5))
* (1 + (d_i / xi[turb]) ** (2 / 3) / 3)
)
# alpha value is Nusselt number * fluid lambda / d_i,
# VDI Wärmeatlas 2013, Chapter G1 - 4.1:
alpha[:] = Nu * lam_fld / d_i
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def pipe_alpha_i_wll_sep(dm, T, rho, ny, lam_fld, A, d_i, x, alpha):
"""
Calculates the inner alpha value in [W/(m**2K)] between the fluid inside a
pipe and the pipe wall for each cell of a round pipe or thermal energy
storage of diameter `d_i` and length `Len`.
In this case, the wall is considererd in a separate row of the temperature
array and can thus have temperatures different from the fluid temperature.
Parameters:
-----------
dm : np.ndarray, float, integer
Massflow in the pipe/TES in [kg/s].
rho : np.ndarray
Fluid density in [kg/m**3].
ny : np.ndarray
Fluid kinematic viscosity in [m**2/s].
lam_fld : np.ndarray
Fluid heat conductivity in [W/(mK)].
A : float, integer
Inner pipe cross section in [m**2] for round pipes or hydraulic cross
section for pipes of other shapes.
d_i : float, integer
Inner pipe diameter in [m] for round pipes or hydraulic diameter for
pipes of other shapes.
Len : float, integer
Total pipe length in [m].
alpha : np.ndarray
Array to save the resulting alpha value in [W/(m**2K)] for all cells
of the pipe/TES.
"""
# save shape:
shape = rho.shape
# shape = (100,)
# preallocate arrays:
Re = np.zeros(shape)
# Pr = np.zeros((100,))
Pr = np.zeros(T.shape)
Nu = np.zeros(shape)
# get Reynolds and Prandtl number:
get_Re_water(dm / (rho * A), d_i, ny, Re)
get_Pr_water(T, Pr)
# get correction factor for the difference in wall and fluid temperature
# following VDI Wärmeatlas 2013, Chapter G1 - 3.1.3 equation (13):
K = (Pr[:, 0] / Pr[:, 1]) ** 0.11
# save Prandtl number of first row (fluid row) to array for fluid Pr number
Pr_f = Pr[:, 0]
# get Peclet number to replace calculations of Re*Pr
Pe = Re * Pr_f
# use reversed x if first cell of dm is negative (this applies only to
# parts where the massflow is the same in all cells, since these are the
# only cells with a cell-specific x-array and a single-cell-massflow. For
# all other parts, this reversing does not change anything):
if dm[0] < 0:
xi = x[::-1] # create reversed view
else:
xi = x[:] # create view
# get a mask for the turbulent flows:
turb = Re > 2300
# equations for laminar Nusselt number following VDI Wärmeatlas 2013,
# Chapter G1 - 3.1.1 equation (3), (1) and (2)
Nu[~turb] = (
49.371 # 49.371 is 3.66**3 of eq (1) + 0.7**3 of eq (3)
+ (1.077 * (Pe[~turb] * d_i / xi[~turb]) ** (1 / 3) - 0.7)
** 3 # eq (2)
) ** (1 / 3)
# equations for turbulent Nusselt number following VDI Wärmeatlas 2013,
# Chapter G1 - 4.1 equations (27) and (28):
f = (1.8 * np.log10(Re[turb]) - 1.5) ** (-2)
Nu[turb] = (
(f / 8 * Pe[turb])
/ (1 + 12.7 * (Pr_f[turb] ** (2 / 3) - 1) * (f / 8) ** (0.5))
* (1 + (d_i / xi[turb]) ** (2 / 3) / 3)
)
# alpha value is Nusselt number * correction factor * fluid lambda / d_i,
# VDI Wärmeatlas 2013, Chapter G1 - 4.1:
alpha[:] = Nu * K * lam_fld / d_i
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def phex_alpha_i_wll_sep(
dm, T_fld, T_wll, rho, ny, lam_fld, A, d_h, x, corr_Re, alpha
):
"""
Calculates the inner alpha value in [W/(m**2K)] between the fluid inside a
plate heat exchanger and the (rectangular) heat exchanger channel wall for
each cell of a plate heat exchanger.
In this case, the wall is considererd in a separate row of the temperature
array and can thus have temperatures different from the fluid temperature.
Parameters:
-----------
dm : np.ndarray, float, integer
Massflow in the pipe/TES in [kg/s].
rho : np.ndarray
Fluid density in [kg/m**3].
ny : np.ndarray
Fluid kinematic viscosity in [m**2/s].
lam_fld : np.ndarray
Fluid heat conductivity in [W/(mK)].
A : float, integer
Inner pipe cross section in [m**2] for round pipes or hydraulic cross
section (fluid area perpendicular to the flow direction) for pipes of
other shapes.
d_i : float, integer
Inner pipe diameter in [m] for round pipes or hydraulic diameter for
pipes of other shapes.
x : float, integer
Total plate heat exchanger length in [m].
alpha : np.ndarray
Array to save the resulting alpha value in [W/(m**2K)] for all cells
of the pipe/TES.
"""
# save shape:
# shape = rho.shape
shape = alpha.shape
# preallocate arrays:
Re = np.zeros(shape) # not needed, since for a pipe/hex this is a scalar
# Pr = np.zeros((100,))
Nu = np.zeros(shape)
# get Reynolds and Prandtl number:
get_Re_water(dm / (rho * A), d_h, ny, Re) # hydraulic diameter as length!
# Re = Re_water_return(dm / (rho * A), d_h, ny) # hydraulic diameter as len!
Pr_f = Pr_water_return(T_fld)
Pr_wll = Pr_water_return(T_wll)
# apply correction difference for turbulators on Reynolds number:
Re += corr_Re # [0]
# get correction factor for the difference in wall and fluid temperature
# following VDI Wärmeatlas 2013, Chapter G1 - 3.1.3 equation (13):
K = (Pr_f / Pr_wll) ** 0.11
# get Peclet number to replace calculations of Re*Pr
Pe = Re * Pr_f
# get a mask for the turbulent flows:
turb = Re > 2300
# equations for mean laminar Nusselt number following VDI Wärmeatlas 2013,
# Chapter G1 - 3.1.2 equation (12) with (4), (5) and (11)
Pe_dx = Pe[~turb] * d_h / x # precalculate this
Nu[~turb] = (
49.371 # 49.371 is 3.66**3 of eq (4) + 0.7**3 of eq (12)
+ (1.615 * (Pe_dx) ** (1 / 3) - 0.7) ** 3 # equation (5)
+ ((2 / (1 + 22 * Pr_f)) ** (1 / 6) * (Pe_dx) ** 0.5) ** 3 # eq(11)
) ** (1 / 3)
# equations for mean turbulent Nusselt number following VDI Wärmeatlas
# 2013 Chapter G1 - 4.1 equations (27) and (26):
f = (1.8 * np.log10(Re[turb]) - 1.5) ** (-2)
Nu[turb] = (
(f / 8 * Pe[turb])
/ (1 + 12.7 * (Pr_f ** (2 / 3) - 1) * (f / 8) ** (0.5))
* (1 + (d_h / x) ** (2 / 3))
)
# alpha value is Nusselt number * correction factor * fluid lambda / d_i,
# VDI Wärmeatlas 2013, Chapter G1 - 4.1:
alpha[:] = Nu * K * lam_fld / d_h
@nb.njit(nogil=GLOB_NOGIL, cache=True)
def phex_alpha_i_wll_sep_discretized(
dm, T_fld, T_wll, rho, ny, lam_fld, A, d_h, x, corr_Re, alpha
):
"""
Calculates the inner alpha value in [W/(m**2K)] between the fluid inside a
plate heat exchanger and the (rectangular) heat exchanger channel wall for
each cell of a plate heat exchanger.
In this case, the wall is considererd in a separate row of the temperature
array and can thus have temperatures different from the fluid temperature.
Parameters:
-----------
dm : np.ndarray, float, integer
Massflow in the pipe/TES in [kg/s].
rho : np.ndarray
Fluid density in [kg/m**3].
ny : np.ndarray
Fluid kinematic viscosity in [m**2/s].
lam_fld : np.ndarray
Fluid heat conductivity in [W/(mK)].
A : float, integer
Inner pipe cross section in [m**2] for round pipes or hydraulic cross
section (fluid area perpendicular to the flow direction) for pipes of
other shapes.
d_i : float, integer
Inner pipe diameter in [m] for round pipes or hydraulic diameter for
pipes of other shapes.
x : float, integer
Total plate heat exchanger length in [m].
alpha : np.ndarray
Array to save the resulting alpha value in [W/(m**2K)] for all cells
of the pipe/TES.
"""
# save shape:
# shape = rho.shape
shape = alpha.shape
# preallocate arrays:
Re = np.zeros(shape) # not needed, since for a pipe/hex this is a scalar
# Pr = np.zeros((100,))
Nu = np.zeros(shape)
# get Reynolds and Prandtl number:
get_Re_water(dm / (rho * A), d_h, ny, Re) # hydraulic diameter as length!
# Re = Re_water_return(dm / (rho * A), d_h, ny) # hydraulic diameter as len!
Pr_f = Pr_water_return(T_fld)
Pr_wll = Pr_water_return(T_wll)
# apply correction difference for turbulators on Reynolds number:
Re += corr_Re # [0]
# get correction factor for the difference in wall and fluid temperature
# following VDI Wärmeatlas 2013, Chapter G1 - 3.1.3 equation (13):
K = (Pr_f / Pr_wll) ** 0.11
# get Peclet number to replace calculations of Re*Pr
Pe = Re * Pr_f
# get a mask for the turbulent flows:
turb = Re > 2300
# equations for mean laminar Nusselt number following VDI Wärmeatlas 2013,
# Chapter G1 - 3.1.2 equation (12) with (4), (5) and (11)
Pe_dx = Pe[~turb] * d_h / x[~turb] # precalculate this
Nu[~turb] = (
49.371 # 49.371 is 3.66**3 of eq (4) + 0.7**3 of eq (12)
+ (1.615 * (Pe_dx) ** (1 / 3) - 0.7) ** 3 # equation (5)
+ ((2 / (1 + 22 * Pr_f[~turb])) ** (1 / 6) * (Pe_dx) ** 0.5)
** 3 # eq(11)
) ** (1 / 3)
# equations for mean turbulent Nusselt number following VDI Wärmeatlas
# 2013 Chapter G1 - 4.1 equations (27) and (26):
f = (1.8 * np.log10(Re[turb]) - 1.5) ** (-2)
Nu[turb] = (
(f / 8 * Pe[turb])
/ (1 + 12.7 * (Pr_f[turb] ** (2 / 3) - 1) * (f / 8) ** (0.5))
* (1 + (d_h / x[turb]) ** (2 / 3))
)
# alpha value is Nusselt number * correction factor * fluid lambda / d_i,
# VDI Wärmeatlas 2013, Chapter G1 - 4.1:
alpha[:] = Nu * K * lam_fld / d_h
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def cylinder_alpha_inf(T_s, T_inf, flow_length, vertical, r_total, alpha_inf):
"""
Calculates the outer alpha value in [W/(m**2K)], between the outer cylinder
wall and the fluid of the environment, of a cylinder in a standard
environment on the outer surface.
Parameters:
-----------
r_total : float, int
Total radius of the cylinder including wall and additional material
layer like insulation.
alpha_inf : np.ndarray
Heat transfer coefficient in [W / (m**2K)] between the outer layer and
the ambient. The shape must equal the fluid temperature array shape or
be a single array cell. This array is used to calculate the new outer
surface temperature and to get the new alpha_inf value for the
calculation of the current U*A-value. Thus this array is
T_inf : float, int, np.ndarray
Ambient temperature in [°C] or [K]. If given as array, it must be a
single cell!
flow_length : float, int
Equivalent low length of the horizontal pipe or vertical pipe/TES in
[m].
vertical : bool
Giving information if this pipe/TES is vertical or horizontal. If
vertical,
"""
# Kelvin temperature:
Kelvin = 273.15
# Prandtl number of DRY air is nearly constant and thus set to:
Pr = 0.708
# f_Pr = 0.347
# get mean temperature of wall and ambient air:
T_mean = (T_inf + T_s) / 2
# get kin. viscosity and lambda for mean temperature:
ny = np.zeros(T_mean.shape)
lam = np.zeros(T_mean.shape)
get_ny_dryair(T_mean, ny)
get_lam_dryair(T_mean, lam)
# get Rayleigh number according to VDI Wärmeatlas 2013 chapter F1
# eq (7), replacing kappa with kappa = ny/Pr (F1 eq (8)) and beta
# with 1/T_inf (F1 eq (2)):
Ra = (
np.abs(T_s - T_inf)
* 9.81
* flow_length ** 3
* Pr
/ ((T_inf + Kelvin) * ny ** 2)
)
# check if the cylinder is vertical or horizontal:
if vertical:
# get Prandtl number influence function for vertical surfaces according
# to VDI Wärmeatlas 2013 chapter F2 equation (2):
# f_Pr = (1 + (0.492 / Pr)**(9/16))**(-16/9) this is const for const Pr
f_Pr = 0.3466023585520853
# get the Nusselt number for a vertical cylinder by use of VDI
# Wärmeatlas 2013 chapter F2.1 eq(1) and eq(3):
Nu = (
0.825 + 0.387 * (Ra * f_Pr) ** (1 / 6)
) ** 2 + 0.435 * flow_length / (2 * r_total)
else:
# get Prandtl number influence function for horizontal cylinders
# according to VDI Wärmeatlas 2013 chapter F2.4 equation (13):
# f_Pr = (1 + (0.559 / Pr)**(9/16))**(-16/9) this is const for const Pr
f_Pr = 0.3269207911296459
# get the Nusselt number for a horizontal cylinder by use of VDI
# Wärmeatlas 2013 chapter F2.4 eq(11):
Nu = (0.752 + 0.387 * (Ra * f_Pr) ** (1 / 6)) ** 2
# get alpha:
alpha_inf[:] = Nu * lam / flow_length
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def plane_alpha_inf(T_s, T_inf, flow_length, vertical, top):
"""
Calculates the outer alpha value in [W/(m**2K)], between the outer cylinder
wall and the fluid of the environment, of a cylinder in a standard
environment on the outer surface.
Parameters:
-----------
r_total : float, int
Total radius of the cylinder including wall and additional material
layer like insulation.
out : np.ndarray
Heat transfer coefficient in [W / (m**2K)] between the outer layer and
the ambient. The shape must equal the fluid temperature array shape or
be a single array cell. This array is used to calculate the new outer
surface temperature and to get the new alpha_inf value for the
calculation of the current U*A-value. Thus this array is
T_inf : float, int, np.ndarray
Ambient temperature in [°C] or [K]. If given as array, it must be a
single cell!
flow_length : float, int
Equivalent low length of the horizontal pipe or vertical pipe/TES in
[m].
vertical : bool
Giving information if this pipe/TES is vertical or horizontal. If
vertical,
"""
# check if the plane is vertical or horizontal:
if vertical:
return vert_plane_alpha_inf(T_s, T_inf, flow_length)
else:
return hor_plane_alpha_inf(T_s, T_inf, flow_length, top)
@njit(nogil=GLOB_NOGIL, cache=True)
def vert_plane_alpha_inf(T_s, T_inf, flow_length):
"""
Calculates the outer alpha value in [W/(m**2K)] between the vertical plane
surface wall and the fluid of the standard environment.
"""
# Kelvin temperature:
Kelvin = 273.15
# Prandtl number of DRY air is nearly constant and thus set to:
Pr = 0.708
# f_Pr = 0.347
# get mean temperature of wall and ambient air:
T_mean = (T_inf + T_s) / 2
# get kin. viscosity and lambda for mean temperature:
ny = np.zeros(T_mean.shape)
lam = np.zeros(T_mean.shape)
get_ny_dryair(T_mean, ny)
get_lam_dryair(T_mean, lam)
# # get Rayleigh number according to VDI Wärmeatlas 2013 chapter F1
# # eq (7), replacing kappa with kappa = ny/Pr (F1 eq (8)) and beta
# # with 1/T_inf (F1 eq (2)):
# Ra = ((T_s - T_inf) * 9.81 * flow_length**3 * Pr
# / ((T_inf + Kelvin) * ny**2))
# get Prandtl number influence function for vertical surfaces according
# to VDI Wärmeatlas 2013 chapter F2.1 equation (2):
# f_Pr = (1 + (0.492 / Pr)**(9/16))**(-16/9) this is const for const Pr
f_Pr = 0.3466023585520853
# get the Nusselt number for a vertical cylinder by use of VDI
# Wärmeatlas 2013 chapter F2.1 eq(1):
Nu = (
0.825
+ 0.387
* (rayleigh_number(T_s, T_inf, Pr, ny, Kelvin, flow_length) * f_Pr)
** (1 / 6)
) ** 2
# get alpha:
return Nu * lam / flow_length
# @njit(float64(float64, float64, float64, nb.boolean),
@njit(nogil=GLOB_NOGIL, cache=True)
def hor_plane_alpha_inf(T_s, T_inf, flow_length, top):
"""
Calculates the outer alpha value in [W/(m**2K)] between the plane surface
wall and the fluid of the standard environment of a horizontal plane.
This is only implemented for single scalar values! Not to be used with
arrays!
This is a reST style.
:param param1: this is a first param
:param param2: this is a second param
:returns: this is a description of what is returned
:raises keyError: raises an exception :math:`a=b`
"""
# Kelvin temperature:
Kelvin = 273.15
# Prandtl number of DRY air is nearly constant and thus set to:
Pr = 0.708
# f_Pr = 0.347
# get mean temperature of wall and ambient air:
T_mean = (T_inf + T_s) / 2
# get kin. viscosity and lambda for mean temperature:
ny = ny_dryair_return(T_mean)
lam = lam_dryair_return(T_mean)
Nu = np.empty(T_mean.shape)
Ra = rayleigh_number( # get Rayleigh-number:
T_s, T_inf, Pr, ny, Kelvin, flow_length
)
# calculation following VDI Wärmeatlas 2013
for i in range(T_s.shape[0]):
if (top[i] and T_s[i] >= T_inf) or (not top[i] and T_s[i] < T_inf):
# VDI F2.3.1
# heat conduction from the top of the plate to fluid OR from the fluid
# to the bottom of the plate
# get Prandtl number influence function for hor. surfaces according
# to VDI Wärmeatlas 2013 chapter F2.3.1 equation (9):
# f_Pr = (1 + (0.322 / Pr)**(11/20))**(-20/11)this is const for const Pr
f_Pr = 0.40306002707296223
# Ra_f_Pr = rayleigh_number( # get Ra*f_Pr for turbulence check
# T_s[i], T_inf, Pr, ny[i], Kelvin, flow_length) * f_Pr
Ra_f_Pr = Ra[i] * f_Pr
# get the Nusselt number for a hor. plane, VDI Wärmeatlas 2013:
if Ra_f_Pr <= 7e4: # laminar flow
Nu[i] = 0.766 * (Ra_f_Pr) ** (1 / 5) # VDI F2.3.1 eq (7)
else: # turbulent flow
Nu[i] = 0.15 * (Ra_f_Pr) ** (1 / 3) # VDI F2.3.1 eq (8)
else: # VDI F2.3.2
# heat conduction from the fluid to the top of the plate OR from the
# bottom of the plate to the fluid
# get Prandtl number influence function for vertical surfaces according
# to VDI Wärmeatlas 2013 chapter F2.1 equation (2):
# f_Pr = (1 + (0.492 / Pr)**(9/16))**(-16/9) this is const for const Pr
f_Pr = 0.3466023585520853
# Ra_f_Pr = rayleigh_number( # get Ra*f_Pr
# T_s[i], T_inf, Pr, ny[i], Kelvin, flow_length) * f_Pr
# Ra_f_Pr = Ra[i] * f_Pr
# get Nusselt number, only valid for 1e3 <= Ra*f_Pr <= 1e10, but there
# is no known correlation for turbulent convection!
Nu[i] = 0.6 * (Ra[i] * f_Pr) ** (1 / 5) # VDI F2.3.2 eq (10)
# return alpha:
return Nu * lam / flow_length
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def UA_fld_wll_ins_amb_cyl(
A_i, r_ln_wll, r_ln_ins, r_rins, alpha_i, alpha_inf, lam_wll, lam_ins, out
):
"""
Calculates the U*A-value for the heat flow to or from the fluid inside a
cylinder like a pipe or a round TES to or from the ambient in radial
direction.
Layers which are considered: fluid, wall material, insulation or any
other additional material layer, ambient.
The reference area must always be the fluid-wall-contact-area for
consistency with other calculations.
Parameters:
-----------
A_i : float, int
The fluid-wall-contact area PER CELL. Calculated with:
A_i = np.pi * r_i * 2 * grid_spacing
r_ln_wll : float, int
Radial thickness factor of the wall heat conductivity referred to the
reference area. Must be pre-calculated with:
r_ln_wll = r_i * np.log(r_o / r_i)
r_ln_ins : float, int
Radial thickness factor of the insulation heat conductivity referred to
the reference area. Must be pre-calculated with:
r_ln_ins = r_i * np.log((r_o + s_ins) / r_o)
r_rins : float, int
Radial thickness factor of the insulation-to-ambient heat transfer
coefficient referred to the reference area. Must be pre-calculated
with:
r_rins = r_i / (r_o + s_ins)
alpha_i : int, float, np.ndarray
Heat transfer coefficient in [W / (m**2K)] between the fluid inside the
pipe and the wall. The shape must equal the fluid temperature array
shape, if given as array.
alpha_inf : np.ndarray
Heat transfer coefficient in [W / (m**2K)] between the outer layer and
the ambient. The shape must equal the fluid temperature array shape or
be a single array cell. This array is used to calculate the new outer
surface temperature and to get the new alpha_inf value for the
calculation of the current U*A-value. Thus this array is
lam_wll : int, float, np.ndarray
Wall heat conductivity in [W / (mK)]. The shape must equal the fluid or
wall temperature array shape, if given as array.
lam_ins : int, float, np.ndarray
Outer material layer heat conductivity in [W / (mK)]. The shape must
equal the fluid temperature array shape, if given as array.
out : float, int, np.ndarray
Total heat transfer coefficient in [W/K] result output array.
"""
out[:] = A_i / (
1 / alpha_i
+ r_ln_wll / lam_wll
+ r_ln_ins / lam_ins
+ r_rins / alpha_inf
)
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def UA_fld_wll_amb_cyl(A_i, r_ln_wll, r_ro, alpha_i, alpha_inf, lam_wll, UA):
"""
Calculates the U*A-value for the heat flow to or from the fluid inside a
cylinder like a pipe or a round TES to or from the ambient in radial
direction.
Layers which are considered: fluid, wall material, ambient.
The reference area must always be the fluid-wall-contact-area for
consistency with other calculations.
Parameters:
-----------
A_i : float, int
The fluid-wall-contact area PER CELL. Calculated with:
A_i = np.pi * r_i * 2 * grid_spacing
r_ln_wll : float, int
Radial thickness factor of the wall heat conductivity referred to the
reference area. Must be pre-calculated with:
r_ln_wll = r_i * np.log(r_o / r_i)
r_ro : float, int
Radial thickness factor of the wall-to-ambient heat transfer
coefficient referred to the reference area. Must be pre-calculated
with:
r_ro = r_i / r_o
alpha_i : int, float, np.ndarray
Heat transfer coefficient in [W / (m**2K)] between the fluid inside the
pipe and the wall. The shape must equal the fluid temperature array
shape, if given as array.
alpha_inf : int, float, np.ndarray
Heat transfer coefficient in [W / (m**2K)] between the outer layer and
the ambient. The shape must equal the fluid temperature array shape,
if given as array.
lam_wll : int, float, np.ndarray
Wall heat conductivity in [W / (mK)]. The shape must equal the fluid or
wall temperature array shape, if given as array.
"""
UA[:] = A_i / (1 / alpha_i + r_ln_wll / lam_wll + r_ro / alpha_inf)
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def UA_fld_wll_cyl(A_i, r_i, r_o, alpha_i, lam_wll, UA):
"""
Calculates the U*A-value for the heat flow to or from the fluid inside a
cylinder like a pipe or a round TES to or from the wall in radial
direction. The wall is considered as a single finite volume element per
cell, thus the heat flow is calculated to the mid-point (radius wise, not
mass wise, thus r_mid = (r_o + r_i) / 2) of the wall.
Layers which are considered: fluid, wall material.
The reference area must always be the fluid-wall-contact-area for
consistency with other calculations.
Parameters:
-----------
A_i : float, int
The fluid-wall-contact area PER CELL. Calculated with:
A_i = np.pi * r_i * 2 * grid_spacing
r_i : float, int
Radius in [m] of the fluid-wall-contact-area.
r_o : float, int
Radius in [m] of the outer wall surface.
alpha_i : int, float, np.ndarray
Heat transfer coefficient in [W / (m**2K)] between the fluid inside the
pipe and the wall. The shape must equal the fluid temperature array
shape, if given as array.
lam_wll : int, float, np.ndarray
Wall heat conductivity in [W / (mK)]. The shape must equal the fluid or
wall temperature array shape, if given as array.
"""
# the log mean outer diameter is taken for length of lam_wll:
# np.log((r_o / r_i + 1) / 2) = np.log((r_o + r_i)/ 2 / r_i)
# with r_wll = (r_o + r_i) / 2
print('UA_fld_wll -> replace np.log with const!')
UA[:] = A_i / (1 / alpha_i + r_i * np.log((r_o / r_i + 1) / 2) / lam_wll)
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def UA_wll_ins_amb_cyl(
A_i, r_i, r_o, r_ln_ins, r_rins, alpha_inf, lam_wll, lam_ins, UA
):
"""
Calculates the U*A-value for the heat flow to or from the wall of a
cylinder like a pipe or a round TES to or from the ambient in radial
direction. The wall is considered as a single finite volume element per
cell, thus the heat flow is calculated from/to the mid-point of the wall.
Layers which are considered: wall material, insulation or any other
additional material layer, ambient.
The reference area must always be the fluid-wall-contact-area for
consistency with other calculations.
Parameters:
-----------
A_i : float, int
The fluid-wall-contact area PER CELL. Calculated with:
A_i = np.pi * r_i * 2 * grid_spacing
r_i : float, int
Radius in [m] of the fluid-wall-contact-area.
r_o : float, int
Radius in [m] of the outer wall surface.
r_ln_ins : float, int
Radial thickness factor of the insulation heat conductivity referred to
the reference area. Must be pre-calculated with:
r_ln_ins = r_i * np.log((r_o + s_ins) / r_o)
r_rins : float, int
Radial thickness factor of the insulation-to-ambient heat transfer
coefficient referred to the reference area. Must be pre-calculated
with:
r_rins = r_i / (r_o + s_ins)
alpha_inf : int, float, np.ndarray
Heat transfer coefficient in [W / (m**2K)] between the outer layer and
the ambient. The shape must equal the fluid temperature array shape,
if given as array.
lam_wll : int, float, np.ndarray
Wall heat conductivity in [W / (mK)]. The shape must equal the fluid or
wall temperature array shape, if given as array.
lam_ins : int, float, np.ndarray
Outer material layer heat conductivity in [W / (mK)]. The shape must
equal the fluid temperature array shape, if given as array.
"""
# the log mean outer diameter is taken for length of lam_wll:
# np.log(2 / (r_i / r_o + 1)) = np.log(r_o * 2 / (r_o + r_i))
# with r_wll = (r_o + r_i) / 2
UA[:] = A_i / (
r_i * np.log(2 / (r_i / r_o + 1)) / lam_wll
+ r_ln_ins / lam_ins
+ r_rins / alpha_inf
)
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def UA_wll_amb_cyl(A_i, r_i, r_o, alpha_inf, lam_wll, UA):
"""
Calculates the U*A-value for the heat flow to or from the wall of a
cylinder like a pipe or a round TES to or from the ambient in radial
direction. The wall is considered as a single finite volume element per
cell, thus the heat flow is calculated to the mid-point of the wall.
Layers which are considered: wall material, ambient.
The reference area must always be the fluid-wall-contact-area for
consistency with other calculations.
Parameters:
-----------
A_i : float, int
The fluid-wall-contact area PER CELL. Calculated with:
A_i = np.pi * r_i * 2 * grid_spacing
r_i : float, int
Radius in [m] of the fluid-wall-contact-area.
r_o : float, int
Radius in [m] of the outer wall surface.
alpha_inf : int, float, np.ndarray
Heat transfer coefficient in [W / (m**2K)] between the outer layer and
the ambient. The shape must equal the fluid temperature array shape,
if given as array.
lam_wll : int, float, np.ndarray
Wall heat conductivity in [W / (mK)]. The shape must equal the fluid or
wall temperature array shape, if given as array.
"""
# the log mean outer diameter is taken for length of lam_wll:
# np.log(2 / (r_i / r_o + 1)) = np.log(r_o * 2 / (r_o + r_i))
# with r_wll = (r_o + r_i) / 2
UA[:] = A_i / (
r_i * np.log(2 / (r_i / r_o + 1)) / lam_wll + r_i / (r_o * alpha_inf)
)
@nb.njit(nogil=GLOB_NOGIL, cache=True)
def UA_fld_wll_plate(A, s_wll, alpha_fld, lam_wll):
"""
Calculates the U*A-value for the heat flow to or from a fluid at a plate
to or from the ambient.
Layers which are considered: fluid, wall material.
The reference area must always be the cross section area.
Parameters:
-----------
A : float, int
The fluid-wall-contact area in [m^2].
s_wll : float, int
Wall thickness in [m].
alpha_fld : int, float
Heat transfer coefficient in [W/(m^2K)] between the fluid and the wall.
lam_wll : int, float
Wall heat conductivity in [W/(mK)].
UA : np.ndarray
Result array where U*A in [W/K] will be saved to.
"""
return A / (1 / alpha_fld + s_wll / lam_wll)
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def UA_fld_wll_ins_amb_plate(
A, s_wll, s_ins, alpha_fld, alpha_inf, lam_wll, lam_ins
):
"""
Calculates the U*A-value for the heat flow to or from a fluid at a plate
with or without insulation to or from the ambient.
Layers which are considered: fluid, wall material, insulation, ambient.
The reference area must always be the cross section area.
Parameters:
-----------
A : float, int
The fluid-wall-contact area in [m^2].
s_wll : float, int
Wall thickness in [m].
s_ins : float, int
Insulation thickness in [m]. Can be zero.
alpha_fld : int, float
Heat transfer coefficient in [W/(m^2K)] between the fluid and the wall.
alpha_inf : int, float
Heat transfer coefficient in [W/(m^2K)] between the outer layer and
the ambient.
lam_wll : int, float
Wall heat conductivity in [W/(mK)].
lam_ins : int, float
Insulation heat conductivity in [W/(mK)].
lam_fld : int, float
Fluid heat conductivity in [W/(mK)].
UA : np.ndarray
Result array where U*A in [W/K] will be saved to.
"""
return A / (
1 / alpha_fld + s_wll / lam_wll + s_ins / lam_ins + 1 / alpha_inf
)
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def UA_wll_ins_amb_plate(A, s_wll, s_ins, lam_wll, lam_ins, alpha_inf):
"""
Calculates the U*A-value for the heat flow to or from a plate with or
without insulation to or from the ambient.
Layers which are considered: wall material, insulation, ambient.
The reference area must always be the cross section area.
Parameters:
-----------
A : float, int
The fluid-wall-contact area in [m^2].
s_wll : float, int
Wall thickness in [m].
s_ins : float, int
Insulation thickness in [m].
lam_wll : int, float
Wall heat conductivity in [W/(mK)].
lam_ins : int, float
Insulation heat conductivity in [W/(mK)].
alpha_inf : int, float
Heat transfer coefficient in [W/(m^2K)] between the insulation and the
ambient.
UA : np.ndarray
Result array where U*A in [W/K] will be saved to.
"""
return A / (s_wll / lam_wll + s_ins / lam_ins + 1 / alpha_inf)
@nb.njit(nogil=GLOB_NOGIL, cache=True)
def surface_temp_steady_state_inplace(T, T_inf, A_s, alpha_inf, UA, T_s):
"""
Parameters:
-----------
A_s : float, int
The outer surface area (air-contact-area) PER CELL. Calculated with:
A_s = np.pi * r_s * 2 * grid_spacing
alpha_inf : np.ndarray
Heat transfer coefficient in [W / (m**2K)] between the outer layer and
the ambient. The shape must equal the fluid temperature array shape or
be a single array cell. This array is used to calculate the new outer
surface temperature and to get the new alpha_inf value for the
calculation of the current U*A-value. Thus this array is
UA : float, int, np.ndarray
Total heat transfer coefficient in [W/K].
T_inf : float, int, np.ndarray
Ambient temperature in [°C] or [K]. If given as array, it must be a
single cell!
"""
# get outer surface temperature, following WTP Formelsammlung Chapter 3.3
# with sigma = (T-T_inf) / (T_i - T_inf) instead of the constant heat
# production formula. This formula is only for steady state, thus an error
# will be incorporated. To get the outer layer temperature, the heatflow
# from the fluid through the pipe-wall (and insulation) to ambient is set
# equal with the heatflow from the outer surface (index o) to ambient:
# (T_s - T_inf) * alpha_inf * A_s = U * A_s * (T_i - T_inf)
# Since UA already incorporates the inner fluid-wall-contact-surface as
# reference area, alpha_inf needs to be adjusted by its area.
T_s[:] = T_inf + (T - T_inf) * UA / (alpha_inf * A_s)
@nb.njit(nogil=GLOB_NOGIL, cache=True)
def surface_temp_steady_state(T, T_inf, A_s, alpha_inf, UA):
"""
Parameters:
-----------
A_s : float, int
The outer surface area (air-contact-area) PER CELL. Calculated with:
A_s = np.pi * r_s * 2 * grid_spacing
alpha_inf : np.ndarray
Heat transfer coefficient in [W / (m**2K)] between the outer layer and
the ambient. The shape must equal the fluid temperature array shape or
be a single array cell. This array is used to calculate the new outer
surface temperature and to get the new alpha_inf value for the
calculation of the current U*A-value. Thus this array is
UA : float, int, np.ndarray
Total heat transfer coefficient in [W/K].
T_inf : float, int, np.ndarray
Ambient temperature in [°C] or [K]. If given as array, it must be a
single cell!
"""
# get outer surface temperature, following WTP Formelsammlung Chapter 3.3
# with sigma = (T-T_inf) / (T_i - T_inf) instead of the constant heat
# production formula. This formula is only for steady state, thus an error
# will be incorporated. To get the outer layer temperature, the heatflow
# from the fluid through the pipe-wall (and insulation) to ambient is set
# equal with the heatflow from the outer surface (index o) to ambient:
# (T_s - T_inf) * alpha_inf * A_s = U * A_s * (T_i - T_inf)
# Since UA already incorporates the inner fluid-wall-contact-surface as
# reference area, alpha_inf needs to be adjusted by its area.
return T_inf + (T - T_inf) * UA / (alpha_inf * A_s)
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def series_circuit_UA(*args):
"""
Calculates the total U*A-value for a series circuit of two or more U*A
values.
Parameters:
-----------
UA : float, int, np.ndarray
U*A value (heat conductivity) in [W/K] for each part of the series
circuit. If given as np.ndarray, all arrays have to be of the same
shape.
Returns:
--------
UA_series : float, np.ndarray
Total U*A value (heat conductivity) in [W/K] of the series.
"""
UA_series = 1 / args[0] # get inverse of first value
arg_iter = iter(args) # make iterator out of args
next(arg_iter) # skip first entry since it is already taken
for arg in arg_iter: # iterate over the rest of args
UA_series += 1 / arg # sum up inverse values
return 1 / UA_series # return inverse of sum
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def parallel_circuit_UA(*args):
"""
Calculates the total U*A-value for a parallel circuit of two or more U*A
values.
Parameters:
-----------
UA : float, int, np.ndarray
U*A value (heat conductivity) in [W/K] for each part of the parallel
circuit. If given as np.ndarray, all arrays have to be of the same
shape.
Returns:
--------
UA_series : float, np.ndarray
Total U*A value (heat conductivity) in [W/K] of the parallel circuit.
"""
UA_parallel = args[0] # get first value
arg_iter = iter(args) # make iterator out of args
next(arg_iter) # skip first entry since it is already taken
for arg in arg_iter: # iterate over the rest of args
UA_parallel += arg # sum up values
return UA_parallel # return sum
# ---> GENERAL FUNCTIONS:
# logarithmic mean temperature difference:
@nb.njit(nogil=GLOB_NOGIL, cache=True)
def log_mean_temp_diff(T_A_one, T_A_two, T_B_one, T_B_two):
"""
Calculate the logarithmic mean temperature difference (LMTD) of two fluid
streams `one` and `two` of a heat exchanger with two ends `A` and `B`.
Parameters:
-----------
T_A_one : float, int, np.array
Fluid temperature of stream one at end A.
T_A_two : float, int, np.array
Fluid temperature of stream two at end A.
T_B_one : float, int, np.array
Fluid temperature of stream one at end B.
T_B_two : float, int, np.array
Fluid temperature of stream two at end B.
"""
Delta_T_A = T_A_one - T_A_two
Delta_T_B = T_B_one - T_B_two
lmtd = (Delta_T_A - Delta_T_B) / (np.log(Delta_T_A / Delta_T_B))
return lmtd
# get simple moving average of the array x and N cells:
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def moving_avg(x, N):
arr = np.zeros(x.shape[0] + 1)
arr[1:] = x
cumsum = np.cumsum(arr)
return (cumsum[N:] - cumsum[:-N]) / float(N)
# fill the edges of x to new_length, so that input x is placed in the middle
# of the output array. if the number of new cells is not even, the array is
# shifted one cell to the end:
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def fill_edges(x, new_length):
old_length = x.shape[0] # get old length
residual = new_length - old_length # get difference in lengths
x_new = np.zeros(new_length) # create new array
start = residual // 2 + residual % 2 # get start point where to insert
x_new[start : start + old_length] = x # fill new array in the middle
x_new[:start] = x[0] # fill before start with first value
x_new[old_length + start :] = x[-1] # fill at end with last value
return x_new
# this function calls simple moving average on array x and N cells AND fills
# the edges with the last and first value:
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def moving_avg_fill_edges(x, N):
return fill_edges(moving_avg(x, N), x.shape[0])
# get window weighted moving average over array x with window weight wght and
# the possibility to fill the edges with the last correct value to get an array
# of the same shape as x:
@nb.jit(nopython=True, nogil=GLOB_NOGIL, cache=True)
def weighted_moving_avg(x, wght, fill_edges=False):
# get number of cells to calculate average in each step and get total
# average array length:
N = wght.size
length = x.shape[0] - N + 1
# if edges shall be filled, create an array like the input array and calc.
# new starting point where the "real" moving average is starting:
if fill_edges:
wa_len = x.shape[0] # get length
residual = wa_len - length # calc. remaining edge points to be filled
start = residual // 2 + residual % 2 # calc. starting point
wa = np.zeros(wa_len) # create result array
else:
start = 0 # start at 0
wa = np.zeros(length) # create result array
# loop over array:
for i in range(length):
wa[i + start] = (x[i : i + N] * wght).sum() # calc weighted mean
# fill edges before start with first value and after end with last value
if fill_edges:
wa[:start] = wa[start]
wa[length + start :] = wa[i + start]
return wa
@nb.njit(parallel=GLOB_PARALLEL)
def root_finder(poly_coeff, roots):
"""
Finds the roots of a polynome for an array of root values `roots`.
This means that a polynome, given by its polynome coefficient array
`poly_coeff`, is reversed at each value of `roots`. A polynome defining
the saturated water mass in air for a given temperature, this returns the
Taupunkt temperature for a given water mass.
Since the results have a shape of n-1 for a polynome of degree n, the
results have to be filtered. This may be done in the following way:
>>> # set all imaginary dominated values to zero:
>>> rts_arr[np.abs(rts_arr.imag) > 1e-12] = 0.
>>> # set values above an upper and lower boundary to zero:
>>> rts_arr[rts_arr > 85] = 0.
>>> rts_arr[rts_arr < 10] = 0.
>>> # extract all non-zero values:
>>> rts_arr.real[rts_arr.real != 0]
>>> # check if the shape is correct, else use other imaginary and real
>>> # bounds for setting to zero:
>>> assert rts_arr.shape == roots.shape
Parameters:
poly_coeff : np.ndarray
Polynomial coefficients to be reversed. Should be given as
**dtype=np.complex128** to avoid typing errors.
roots : np.ndarray
Roots to solve the polynomial for.
"""
polcoeffs = poly_coeff.copy()
lin_coeff = polcoeffs[-1]
rts_arr = np.zeros(
(roots.shape[0], poly_coeff.shape[0] - 1), dtype=np.complex128
)
for i in nb.prange(roots.shape[0]):
polcoeffs[-1] = lin_coeff - roots[i]
rts_arr[i, :] = np.roots(polcoeffs)
return rts_arr
# %% Empiric relations, polynomes etc. for startup times, regression...
@nb.njit
def lim_growth(x, s, b0, k):
"""
Function for limited growth. Used in several fits, thus it is implemented
here as a raw function, which can be used in closures, inlining etc.
Parameters
----------
x : float, int, np.ndarray
x values of the growth function.
s : float, optional
Limit of the growth function.
b0 : float, optional
Starting value. Values of 0 are **NOT RECOMMENDED**.
k : float, optional
Curvature parameter.
Returns
-------
float, np.ndarray
Value at point `x`.
"""
return s - (s - b0) * k ** x
@nb.njit(cache=True)
def chp_startup_th(
time, s=1.0, b0=4.3715647889609857e-4, k=8.61423130773867e-3
):
"""
Thermal power output and/or efficiency factor during EC Power XRGi20
CHP Startup.
See auswertung_bhkw.chp_fits for generation of the fit.
Parameters
----------
time : float, int, np.ndarray
Time or timepoints in **seconds** [s] at which the startup progress
shall be evaluated. 0 ist the CHP start time.
s : float, optional
Maximum power to reach, maximum modulation or efficiency.
If set to 1, will return the result as a fraction of 1, else the
absolute value will be returned. The default is 1..
b0 : float, optional
Starting value. Cannot be set to zero.
The default is 4.3715647889609857e-4.
k : float, optional
Curvature parameter. The default is 8.61423130773867e-3.
Returns
-------
float, np.ndarray
Value fraction of `s` at the time `time`. If `time` is an np.ndarray,
the same type will be returned.
"""
return s / (1 + (s / b0 - 1) * np.e ** (-k * s * time))
@nb.njit(cache=True)
def chp_startup_el(
time,
s=4.6611096613889975,
b0=-3.6832212021813398e-09,
k=0.9997484824090741,
):
"""
Electrical power output and/or efficiency factor during EC Power XRGi20
CHP Startup. Limited growth, close to linear growth, startup to full power
(99% of modulation) within 950 seconds was found to be matching
measurement data.
See auswertung_bhkw.chp_fits for generation of the fit.
Parameters
----------
time : float, int, np.ndarray
Time or timepoints in **seconds** [s] at which the startup progress
shall be evaluated. 0 ist the CHP start time.
s : float, optional
Maximum power to reach, maximum modulation or efficiency.
If set to 1, will return the result as a fraction of 1, else the
absolute value will be returned. The default is 4.6611.
b0 : float, optional
Starting value. Cannot be set to zero.
The default is -3.68322e-9.
k : float, optional
Curvature parameter. The default is 0.9997484824090741.
Returns
-------
float, np.ndarray
Value fraction of `s` at the time `time`. If `time` is an np.ndarray,
the same type will be returned.
"""
return lim_growth(time, s, b0, k)
# return s - (s - b0) * k**time
@nb.njit(cache=True)
def chp_startup_gas(time):
"""
Gas power input and/or efficiency factor during EC Power XRGi20
CHP Startup. Compound of thermal and electrical startup factors, scaled by
the efficiencies given in the datasheet. With this, full gas power input
is reached 287s after startup. The resultung efficiency of a startup from
0 to 100% is 60%, including the extraction of remaining heat during
shutdown, the 0-1-0 efficiency is 69.1%.
Parameters
----------
time : float, int, np.ndarray
Time or timepoints in **seconds** [s] at which the startup progress
shall be evaluated. 0 ist the CHP start time.
Returns
-------
float, np.ndarray
Value fraction of `s` at the time `time`. If `time` is an np.ndarray,
the same type will be returned.
"""
return chp_startup_el(time) / 0.32733 + chp_startup_th(time) / 0.6334
@nb.njit(cache=True)
def chp_thermal_power(
modulation, s=0.60275, b0=0.972917, k=3.5990506789130166
):
"""
Thermal power output and/or efficiency factor in dependence of the
electrical power modulation of an EC Power XRGi20 CHP plant.
Limited growth fit was found to be matching measurement data.
See auswertung_bhkw.chp_fits for generation of the fit.
Parameters
----------
time : float, int, np.ndarray
Time or timepoints in **seconds** [s] at which the startup progress
shall be evaluated. 0 ist the CHP start time.
s : float, optional
Maximum power to reach, maximum modulation or efficiency.
If set to 1, will return the result as a fraction of 1, else the
absolute value will be returned. The default is 1..
b0 : float, optional
Starting value. Cannot be set to zero.
The default is 0.
k : float, optional
Duration until full power is reached parameter. The default is 960.
Returns
-------
float, np.ndarray
Value fraction of `s` at the time `time`. If `time` is an np.ndarray,
the same type will be returned.
"""
return lim_growth(modulation, s, b0, k)
# return s - (s - b0) * k**modulation
@nb.njit(cache=True)
def chp_gas_power(
modulation, s=-1.17, b0=0.995828402366862, k=1.9507547298681704
):
"""
Gas power input (**lower heating value**) in dependency of the
electrical power modulation of an EC Power XRGi20 CHP plant.
Limited growth fit was found to be matching measurement data.
See auswertung_bhkw.chp_fits for generation of the fit.
Parameters
----------
time : float, int, np.ndarray
Time or timepoints in **seconds** [s] at which the startup progress
shall be evaluated. 0 ist the CHP start time.
s : float, optional
Maximum power to reach, maximum modulation or efficiency.
If set to 1, will return the result as a fraction of 1, else the
absolute value will be returned. The default is 1..
b0 : float, optional
Starting value. Cannot be set to zero.
The default is 0.
k : float, optional
Duration until full power is reached parameter. The default is 960.
Returns
-------
float, np.ndarray
Value fraction of `s` at the time `time`. If `time` is an np.ndarray,
the same type will be returned.
"""
return lim_growth(modulation, s, b0, k)
@nb.njit
def chp_shutdown_th(time, a=-1.2532286835042036e-09, b=927.5198588530006):
"""
Thermal power output/efficiency of a CHP plant.
Thermal power output and/or efficiency factor during EC Power XRGi20
CHP switchoff. Cubic fit chosen for the measurement data.
See auswertung_bhkw.chp_fits for generation of the fit.
Parameters
----------
time : float, int, np.ndarray
Time or timepoints in **seconds** [s] at which the switchoff progress
shall be evaluated. 0 ist the CHP start time.
a : float, optional
Maximum power to reach, maximum modulation or efficiency.
If set to 1, will return the result as a fraction of 1, else the
absolute value will be returned. The default is 1..
b : float, optional
Slope parameter. The default is -1.035329352327848e-3.
Returns
-------
float, np.ndarray
Value fraction of `s` at the time `time`. If `time` is an np.ndarray,
the same type will be returned.
"""
return a * (time - b) ** 3
@nb.njit
def quad_get_c(x, dy, b, c_base):
"""Get c parameter for quad polynome for condensing hex."""
return c_base - dy / (2 * b)
@nb.njit
def quad_get_b(y0, a, c):
"""Get b parameter for quad polynome for condensing hex."""
return (np.expand_dims(y0, -1) - a) / c ** 2
@nb.njit
def condensing_hex_quad_poly(
X_pred,
int_comb_idx,
nvars_per_ftr, # polynomial transf.
pca_mean,
pca_components, # PCA transformation
lm_intercept,
lm_coef, # linear model transformation
dm_water_thresh=0.1,
dx=0.01,
):
"""
Calculate condensing HEX temperatures below the valid massflow threshold.
**ONLY VALID below the valid massflow range**, typically from 0-10% of the
maximum massflow.
Parameters
----------
dx : TYPE, optional
Delta x to determin slope from. The default is .01.
Returns
-------
None.
"""
# extract n samples:
n_samples = X_pred.shape[0]
# prediction arrays at the boundary and +dx for the slope
# extract and save massflow values:
dm_bkp = X_pred[:, 2:3].copy() # :3 extracts it as 2D arr and avoids resh.
X_pred_bc = np.vstack( # prediction x array at the boundary
(
X_pred[:, 0],
X_pred[:, 1],
np.full((X_pred.shape[0],), dm_water_thresh),
X_pred[:, 3],
)
).T
X_pred_dx = X_pred_bc.copy() # prediction x arr with dx for slope
X_pred_dx[:, 2] += dx
y0 = X_pred[:, 1] # extract fg entry temperature
# make predictions at dm_water_thresh, the boundary of the valid
# region
X_pf_bc = transform_to_poly_nb(
X_pred_bc, int_comb_idx, nvars_per_ftr, n_samples
)
X_PC_bc = transform_pca_nb(X_pf_bc, pca_mean, pca_components)
# predict
y_hat_bc = poly_tranfs_pred(X_PC_bc, lm_intercept, lm_coef)
# make predictions at dm_water_thresh+dx for generating the slope
X_pf_dx = transform_to_poly_nb(
X_pred_dx, int_comb_idx, nvars_per_ftr, n_samples
)
X_PC_dx = transform_pca_nb(X_pf_dx, pca_mean, pca_components)
# predict
y_hat_dx = poly_tranfs_pred(X_PC_dx, lm_intercept, lm_coef)
dy = (y_hat_bc - y_hat_dx) / dx # get the slopes
# set c to dm_water_thresh for the first iteration of both temperatures
c = np.array([[dm_water_thresh, dm_water_thresh]], dtype=np.float64) #
for i in range(1):
b = quad_get_b(y0=y0, a=y_hat_bc, c=c)
c = quad_get_c(x=dm_bkp, dy=dy, b=b, c_base=dm_water_thresh)
T_pred_below_thresh = y_hat_bc + b * (dm_bkp - dm_water_thresh) ** 2
return T_pred_below_thresh
# @njit(nogil=GLOB_NOGIL, cache=True) # parallel=GLOB_PARALLEL useful
def _process_chp_core_modulation(
process_flows,
power_modulation,
T,
T_chp_in,
T_chp_in_max,
T_chp_in_max_emrgncy,
mod_lower,
min_on_time,
min_off_time,
max_ramp_el,
startup_in_progress,
shutdown_in_progress,
chp_state,
startup_at_time,
shutdown_at_time,
startup_duration,
shutdown_duration,
chp_on_perc,
remaining_heat,
bin_pow_fac,
startup_factor_th,
startup_factor_el,
shutdown_factor_th,
startuptsteps,
chp_off_perc,
dt_time_temp_exc,
max_temp_exc_time,
stepnum,
time_vec,
timestep,
):
"""
Process masssflows for parts with multiple flow channels.
Massflows are being processed for parts which have multiple separated flow
channels. The massflow in each flow channel must be invariant.
The massflow through ports in `dm_io` is aquired by update_FlowNet.
"""
# process flows is only executed ONCE per timestep, afterwards the bool
# process_flows is set to False.
if process_flows[0]: # only if flows not already processed
# get current elapsed time
curr_time = time_vec[stepnum[0] - 1] + timestep
# get state of the last step
state_last_step = chp_state[stepnum[0] - 1]
# check for modulation range and set on-off-integer:
if power_modulation[0] < mod_lower:
# binary power multiplication factor to enable off-state
# for modulations < mod_lower, f.i. to avoid modulations below
# 50%.
bin_pow_fac = 0.0
chp_on = False # chp is off
else:
bin_pow_fac = 1.0
chp_on = True # chp is on
# detect changes in the state to save start/stop times
if (state_last_step != 0.0) != chp_on:
if not chp_on: # if last step chp was on and now off
# assert that minimum run time is fullfilled. if not,
# avoid switching off by keeping min. modulation
if min_on_time > (curr_time - startup_at_time):
# if minimum run time not reached, set chp to on
bin_pow_fac = 1.0
chp_on = True
power_modulation[0] = mod_lower
else: # else allow shutdown
shutdown_at_time = curr_time # chp was shutdown
shutdown_in_progress = True
# print('shutdown at {0:.3f} s'.format(curr_time))
else: # if last step chp was off and now it is on
# assert that minimum off time is fulfilled AND
# (both ok -> OR statetment) inlet temp. is not exceeding
# max temp.. If any is True, avoid CHP startup
if (
(min_off_time > (curr_time - shutdown_at_time))
or (T_chp_in[0] > T_chp_in_max)
or np.any(T > T_chp_in_max_emrgncy)
):
# if minimum off time not reached or temperature too
# high, set chp to off
bin_pow_fac = 0.0
chp_on = False
power_modulation[0] = 0.0
else: # else allow switching on
startup_at_time = curr_time # chp was started
startup_in_progress = True
# print('start at {0:.3f} s'.format(curr_time))
elif chp_on:
# if CHP was on last step AND is on now, check for ramps
# get difference of modulation and absolute ramp per second
mod_diff = state_last_step - power_modulation[0]
mod_ramp_abs = np.abs(mod_diff) / timestep
# if absolute ramp is higher than max ramp, limit change to
# ramp
if mod_ramp_abs > max_ramp_el:
if mod_diff <= 0.0: # ramp up too fast
power_modulation[0] = ( # set ramp to max ramp
state_last_step + max_ramp_el * timestep
)
else: # ramp down too fast
power_modulation[0] = ( # set ramp to max ramp
state_last_step - max_ramp_el * timestep
)
# if chp is on, check if inlet temperature was exceeded or any
# temperature is above emergency shutdown temp., then shutdown
if chp_on and (
(T_chp_in[0] > T_chp_in_max) or np.any(T > T_chp_in_max_emrgncy)
):
# if max inlet temp. is exceeded, check max. allowed time for
# exceeding and if too large, shutdown CHP due to overtemp.,
# independend of min. run times and other parameters.
# also if inlet temp. is above an emergency threshold.
if (dt_time_temp_exc > max_temp_exc_time) or np.any(
T > T_chp_in_max_emrgncy
):
power_modulation[0] = 0.0
bin_pow_fac = 0.0
chp_on = False
shutdown_at_time = curr_time
shutdown_in_progress = True
# emergeny_shutdown = True
# print('emergency shutdown at {0:.3f} s'.format(curr_time))
else: # if timer not exceeded
# delta t how long the temp. has been exceeded. after the
# if-else check, since +timestep is at the end of the
# step, thus relevant for the next step.
dt_time_temp_exc += timestep
else: # else if temp. not exceeded, reset timer
dt_time_temp_exc = 0.0
# save chp state:
chp_state[stepnum[0]] = bin_pow_fac * power_modulation[0]
# process startup and shutdown procedure
# is the CHP switched on? If yes, startup time is larger than
# shutdown time.
if startup_at_time > shutdown_at_time:
# if chp shutdown was quite recent, thus heat is remaining
# -> shorten startup procedure
if shutdown_factor_th > chp_off_perc:
# if shutdown was recent, take the shutdown factor and
# look where in startup can be found. then add this
# timestep where it was found to the startup time
# (=increase startup duration) to account for remaining
# heat in the system
remaining_heat = np.argmin(
np.abs(startuptsteps - shutdown_factor_th)
)
# and reset shutdown factor to zero and set shutdown in
# progress False to avoid doing this twice:
shutdown_factor_th = 0.0
shutdown_in_progress = False
# get startup duration:
startup_duration = ( # on since
curr_time - startup_at_time + remaining_heat
)
# do factor calculations only, if startup not yet finished,
# else do nothing, since factors are already set to 1
if startup_in_progress:
# power multiplication factors:
startup_factor_th = chp_startup_th(startup_duration)
startup_factor_el = chp_startup_el(startup_duration)
# limit values to 0<=x<=1
startup_factor_th = (
0.0
if startup_factor_th < 0.0
else 1.0
if startup_factor_th > 1.0
else startup_factor_th
)
startup_factor_el = (
0.0
if startup_factor_el < 0.0
else 1.0
if startup_factor_el > 1.0
else startup_factor_el
)
# check if thermal startup is completed, else go on
if startup_factor_th > chp_on_perc:
# if thermal startup is completed, set all startups as
# completed
startup_in_progress = False
startup_factor_th = 1.0
startup_factor_el = 1.0
remaining_heat = 0.0
else: # if shutdown was more recent
shutdown_duration = curr_time - shutdown_at_time # off since
if shutdown_in_progress:
shutdown_factor_th = chp_shutdown_th(shutdown_duration)
if shutdown_factor_th < chp_off_perc:
# shutdown finished. reset values
shutdown_in_progress = False
shutdown_factor_th = 0.0
# return process flows bool to disable processing flows until next step
return (
bin_pow_fac,
startup_at_time,
shutdown_at_time,
startup_in_progress,
shutdown_in_progress,
startup_factor_th,
startup_factor_el,
shutdown_factor_th,
dt_time_temp_exc,
)
# %% Simulation environment functions:
@nb.njit
def predictor_step(diff_fun, args, h, y_prev):
return y_prev + h * diff_fun(*args, h)
@nb.njit
def solve_pred_loop(h, diff_funs, all_args, all_y_prev, interm_res):
ndiffs = len(diff_funs)
for n in range(ndiffs):
interm_res[n][:] = predictor_step(
diff_funs[n], all_args[n], h, all_y_prev[n]
).ravel()
return interm_res
@nb.jit(parallel=GLOB_PARALLEL)
def heun_predictor(_h, solve_num, parts, stepnum, i):
for part in solve_num:
# if first try, add last step's part truncation error to part:
if i == 0:
parts[part]._trnc_err += parts[part].__new_trnc_err
# get results from last timestep and pass them to
# current-timestep-temperature-array:
parts[part].T[:] = parts[part].res[stepnum - 1]
# calculate differential at result:
parts[part]._df0 = solve_num[part](_h)
# calculate and save predictor step:
parts[part].T[:] = parts[part].T + _h * parts[part]._df0
# %% regression based functions
def make_poly_transf_combs_for_nb(
mdl, n_features=None, pipeline=True, poly_step='polynomialfeatures'
):
"""Construct regressor combinations for polynomial."""
if pipeline:
# get polynomial features from pipeline
poly_feats = mdl.named_steps[poly_step]
else:
poly_feats = mdl
if n_features is None:
n_features = getattr(poly_feats, 'n_input_features_', None)
if n_features is None:
raise ValueError
# extract combinations as persisten tuple
cmbntns = tuple(
poly_feats._combinations(
n_features,
poly_feats.degree,
poly_feats.interaction_only,
poly_feats.include_bias,
)
)
# make to integer indexing array and fill with dummy false value
int_cmb_idx = (
np.zeros((len(cmbntns), len(cmbntns[-1])), dtype=np.int64) - 99
)
# create tuple with number of variables per combination
nvars_per_ftr = tuple([len(combo) for combo in cmbntns])
# make combinations tuple to integer index array, leaving blank cells
# filled with dummy value (which is ought to raise an error if called)
for i, c in enumerate(cmbntns):
int_cmb_idx[i, : nvars_per_ftr[i]] = c
return int_cmb_idx, nvars_per_ftr
def extract_pca_results(mdl, pipeline=True, pca_step='pca'):
"""Extract PCA result vectors/matrices for transformation."""
if pipeline:
# get polynomial features from pipeline
pca_mdl = mdl.named_steps[pca_step]
else:
pca_mdl = mdl
return pca_mdl.mean_, pca_mdl.components_
# the following 3 functions are numba compatible:
@nb.njit
def transform_to_poly_nb(X, int_comb_idx, nvars_per_ftr, n_samples):
"""Transform X vector to polynomial for predictions."""
XP = np.ones((n_samples, int_comb_idx.shape[0]), dtype=np.float64)
for n in range(XP.shape[0]):
for i in range(XP.shape[1]):
XP[n, i] = (
XP[n, i] * X[n][int_comb_idx[i, : nvars_per_ftr[i]]].prod()
)
return XP
@nb.njit
def transform_pca_nb(XP, pca_mean, pca_components):
"""Generate PCA transformation matrix."""
return np.dot(XP - pca_mean, pca_components.T)
@nb.njit
def poly_tranfs_pred(XP_pca_transf, intercept, coef):
"""Predict PCA transformed polynomial."""
return intercept + np.dot(XP_pca_transf, coef.T)
# %% root solvers to use in/with numba funtions
@nb.njit
def root_array_secant(func, x0, args, tol, maxiter):
"""
A vectorized version of Newton, Halley, and secant methods for arrays.
Do not use this method directly. This method is called from `newton`
when ``np.size(x0) > 1`` is ``True``. For docstring, see `newton`.
Taken and adapted from scipy.optimize.newton
This solver may be slower than the excplicit secant solver, but it is
stable and has a higher precision. In contrast
**This is the preferred solver for solving implicit differential
equations.**
"""
# Explicitly copy `x0` as `p` will be modified inplace, but the
# user's array should not be altered.
p = x0.copy()
# failures = np.ones_like(p).astype(bool)
# nz_der = np.ones_like(failures)
failures = p != -1234.4321 # bool array creation for numba
nz_der = failures.copy()
# print('using secant method')
# Secant method
dx = np.finfo(np.float64).eps ** 0.33
p1 = p * (1 + dx) + np.where(p >= 0, dx, -dx)
q0 = func(p, *args)
q1 = func(p1, *args)
# active = np.ones_like(p, dtype=bool)
active = failures.copy()
for _ in range(maxiter):
nz_der = q1 != q0
# stop iterating if all derivatives are zero
if not nz_der.any():
p = (p1 + p) / 2.0
break
# Secant Step
dp = (q1 * (p1 - p))[nz_der] / (q1 - q0)[nz_der]
# only update nonzero derivatives
p[nz_der] = p1[nz_der] - dp
active_zero_der = ~nz_der & active
p[active_zero_der] = (p1 + p)[active_zero_der] / 2.0
active &= nz_der # don't assign zero derivatives again
failures[nz_der] = np.abs(dp) >= tol # not yet converged
# stop iterating if there aren't any failures, not incl zero der
if not failures[nz_der].any():
break
p1, p = p, p1
q0 = q1
q1 = func(p1, *args)
return p
@nb.njit
def root_array_newton(func, x0, fprime, args, tol, maxiter):
"""
A vectorized version of Newton, Halley, and secant methods for arrays.
Do not use this method directly. This method is called from `newton`
when ``np.size(x0) > 1`` is ``True``. For docstring, see `newton`.
Taken from scipy.optimize.newton.
Also accepts a derivative function in `fprime`.
"""
# Explicitly copy `x0` as `p` will be modified inplace, but the
# user's array should not be altered.
p = x0.copy()
# failures = np.ones_like(p).astype(bool)
# nz_der = np.ones_like(failures)
failures = p != -1234.4321 # bool array creation for numba
nz_der = failures.copy()
if fprime is not None:
# print('using newton raphson method')
# Newton-Raphson method
for iteration in range(maxiter):
# first evaluate fval
fval = func(p, *args)
# If all fval are 0, all roots have been found, then terminate
if not fval.any():
failures = fval.astype(bool)
break
fder = fprime(p, *args)
nz_der = fder != 0
# stop iterating if all derivatives are zero
if not nz_der.any():
break
# Newton step
dp = fval[nz_der] / fder[nz_der]
# only update nonzero derivatives
p[nz_der] -= dp
failures[nz_der] = np.abs(dp) >= tol # items not yet converged
# stop iterating if there aren't any failures, not incl zero der
if not failures[nz_der].any():
break
else:
# print('using secant method')
# Secant method
dx = np.finfo(np.float64).eps ** 0.33
p1 = p * (1 + dx) + np.where(p >= 0, dx, -dx)
q0 = func(p, *args)
q1 = func(p1, *args)
# active = np.ones_like(p, dtype=bool)
active = failures.copy()
for iteration in range(maxiter):
nz_der = q1 != q0
# stop iterating if all derivatives are zero
if not nz_der.any():
p = (p1 + p) / 2.0
break
# Secant Step
dp = (q1 * (p1 - p))[nz_der] / (q1 - q0)[nz_der]
# only update nonzero derivatives
p[nz_der] = p1[nz_der] - dp
active_zero_der = ~nz_der & active
p[active_zero_der] = (p1 + p)[active_zero_der] / 2.0
active &= nz_der # don't assign zero derivatives again
failures[nz_der] = np.abs(dp) >= tol # not yet converged
# stop iterating if there aren't any failures, not incl zero der
if not failures[nz_der].any():
break
p1, p = p, p1
q0 = q1
q1 = func(p1, *args)
return p
@nb.njit
def root_array_newton_fast(func, x0, fprime, args, tol, maxiter):
"""
A vectorized version of Newton, Halley, and secant methods for arrays.
Do not use this method directly. This method is called from `newton`
when ``np.size(x0) > 1`` is ``True``. For docstring, see `newton`.
**ONLY USE THIS WHEN ACCURACY IS NOT IMPORTANT!!**
"""
# Explicitly copy `x0` as `p` will be modified inplace, but the
# user's array should not be altered.
p = x0.copy()
# failures = np.ones_like(p).astype(bool)
# nz_der = np.ones_like(failures)
nz_der = p != -1234.4321 # bool array creation for numba
if fprime is not None:
# print('using newton raphson method')
# Newton-Raphson method
for iteration in range(maxiter):
# first evaluate fval
fval = func(p, *args)
# If all fval are 0, all roots have been found, then terminate
if not fval.any():
failure = False
break
fder = fprime(p, *args)
nz_der = fder != 0
# stop iterating if all derivatives are zero
if not nz_der.any():
break
# Newton step
dp = fval[nz_der] / fder[nz_der]
# only update nonzero derivatives
p[nz_der] -= dp
failure = ((dp - tol) ** 2).mean() > tol # items not yet converged
# stop iterating if there aren't any failures, not incl zero der
if not failure:
break
else:
# print('using secant method')
# Secant method
dx = np.finfo(np.float64).eps ** 0.33
p1 = p * (1 + dx) + np.where(p >= 0, dx, -dx)
q0 = func(p, *args)
q1 = func(p1, *args)
# active = np.ones_like(p, dtype=bool)
active = nz_der.copy()
for iteration in range(maxiter):
nz_der = q1 != q0
# stop iterating if all derivatives are zero
if not nz_der.any():
p = (p1 + p) / 2.0
break
# Secant Step
dp = (q1 * (p1 - p))[nz_der] / (q1 - q0)[nz_der]
# only update nonzero derivatives
p[nz_der] = p1[nz_der] - dp
active_zero_der = ~nz_der & active
p[active_zero_der] = (p1 + p)[active_zero_der] / 2.0
active &= nz_der # don't assign zero derivatives again
# failures[nz_der] = np.abs(dp) >= tol # not yet converged
failure = ((dp - tol) ** 2).mean() > tol
# stop iterating if there aren't any failures, not incl zero der
if not failure:
break
p1, p = p, p1
q0 = q1
q1 = func(p1, *args)
return p, iteration, q1
@nb.njit(cache=True)
def root_secant(f, x0, h, yprev, input_args, tol=1e-3, max_iter=100):
"""
Solve for root using the secant method.
This is a pure basic secant method without approximation of the Jacobian.
Parameters
----------
f : TYPE
DESCRIPTION.
x0 : TYPE
DESCRIPTION.
h : TYPE
DESCRIPTION.
yprev : TYPE
DESCRIPTION.
input_args : TYPE
DESCRIPTION.
tol : TYPE, optional
DESCRIPTION. The default is 1e-3.
max_iter : TYPE, optional
DESCRIPTION. The default is 100.
Returns
-------
TYPE
DESCRIPTION.
"""
# set bracket to +-10% of starting point
p0 = x0 * (1 + 1e-1)
p1 = x0 * (1 - 1e-1)
# store y values instead of recomputing them
fp0 = f(p0, yprev, h, input_args)
fp1 = f(p1, yprev, h, input_args)
false_mask = fp0 * fp1 >= 0
p0[false_mask], p1[false_mask] = p1[false_mask], p0[false_mask]
# gt_eps = np.ones_like(false_mask, dtype=np.bool)
eps = 1e-8 # np.finfo(np.float64).eps * 1e4
# succesful vars:
# tol_ok = np.abs(fp1) <= tol
# iterate up to maximum number of times
for _ in range(max_iter):
# see whether the answer has converged (MSE)
if ((fp1 - tol) ** 2).mean() < tol:
return p1
# check if epsilon is reached or no diff
gt_eps = (np.abs(fp1) > eps) | (np.abs(fp0) > eps) | (fp0 != fp1)
# do calculation
p2 = (p0 * fp1 - p1 * fp0) / (fp1 - fp0)
# shift variables (prepare for next loop) and except lower eps values
p0[gt_eps], p1[gt_eps] = p1[gt_eps], p2[gt_eps]
# shift for next step
fp0, fp1 = fp1, f(p1, yprev, h, input_args)
return p1 # return if not converged
| [
"numpy.log10",
"numpy.log",
"numpy.roots",
"numpy.equal",
"numpy.array",
"numba.prange",
"numpy.greater",
"numpy.less",
"numpy.where",
"numpy.max",
"numpy.exp",
"numpy.dot",
"numpy.empty",
"numpy.maximum",
"numpy.abs",
"numpy.ones",
"numpy.full",
"numba.njit",
"numpy.any",
"num... | [((363, 411), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nopython=True, nogil=GLOB_NOGIL, cache=True)\n', (366, 411), False, 'from numba import jit, njit, float64, int32\n'), ((931, 965), 'numba.njit', 'njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (935, 965), False, 'from numba import jit, njit, float64, int32\n'), ((1591, 1625), 'numba.njit', 'njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (1595, 1625), False, 'from numba import jit, njit, float64, int32\n'), ((2046, 2080), 'numba.njit', 'njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (2050, 2080), False, 'from numba import jit, njit, float64, int32\n'), ((2431, 2468), 'numba.njit', 'nb.njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (2438, 2468), True, 'import numba as nb\n'), ((2779, 2813), 'numba.njit', 'njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (2783, 2813), False, 'from numba import jit, njit, float64, int32\n'), ((3896, 3930), 'numba.njit', 'njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (3900, 3930), False, 'from numba import jit, njit, float64, int32\n'), ((5132, 5166), 'numba.njit', 'njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (5136, 5166), False, 'from numba import jit, njit, float64, int32\n'), ((6735, 6769), 'numba.njit', 'njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (6739, 6769), False, 'from numba import jit, njit, float64, int32\n'), ((8742, 8779), 'numba.njit', 'nb.njit', ([], {'cache': '(True)', 'nogil': 'GLOB_NOGIL'}), '(cache=True, nogil=GLOB_NOGIL)\n', (8749, 8779), True, 'import numba as nb\n'), ((9938, 9972), 'numba.njit', 'njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (9942, 9972), False, 'from numba import jit, njit, float64, int32\n'), ((11162, 11196), 'numba.njit', 'njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (11166, 11196), False, 'from numba import jit, njit, float64, int32\n'), ((11832, 11866), 'numba.njit', 'njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (11836, 11866), False, 'from numba import jit, njit, float64, int32\n'), ((12475, 12509), 'numba.njit', 'njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (12479, 12509), False, 'from numba import jit, njit, float64, int32\n'), ((12998, 13032), 'numba.njit', 'njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (13002, 13032), False, 'from numba import jit, njit, float64, int32\n'), ((13704, 13738), 'numba.njit', 'njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (13708, 13738), False, 'from numba import jit, njit, float64, int32\n'), ((14430, 14464), 'numba.njit', 'njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (14434, 14464), False, 'from numba import jit, njit, float64, int32\n'), ((14882, 14916), 'numba.njit', 'njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (14886, 14916), False, 'from numba import jit, njit, float64, int32\n'), ((15508, 15542), 'numba.njit', 'njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (15512, 15542), False, 'from numba import jit, njit, float64, int32\n'), ((16248, 16282), 'numba.njit', 'njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (16252, 16282), False, 'from numba import jit, njit, float64, int32\n'), ((17005, 17039), 'numba.njit', 'njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (17009, 17039), False, 'from numba import jit, njit, float64, int32\n'), ((17437, 17471), 'numba.njit', 'njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (17441, 17471), False, 'from numba import jit, njit, float64, int32\n'), ((17850, 17884), 'numba.njit', 'njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (17854, 17884), False, 'from numba import jit, njit, float64, int32\n'), ((18219, 18256), 'numba.njit', 'nb.njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (18226, 18256), True, 'import numba as nb\n'), ((20341, 20378), 'numba.njit', 'nb.njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (20348, 20378), True, 'import numba as nb\n'), ((21902, 21936), 'numba.njit', 'njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (21906, 21936), False, 'from numba import jit, njit, float64, int32\n'), ((26647, 26681), 'numba.njit', 'njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (26651, 26681), False, 'from numba import jit, njit, float64, int32\n'), ((31900, 31934), 'numba.njit', 'njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (31904, 31934), False, 'from numba import jit, njit, float64, int32\n'), ((37368, 37402), 'numba.njit', 'njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (37372, 37402), False, 'from numba import jit, njit, float64, int32\n'), ((42457, 42491), 'numba.njit', 'njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (42461, 42491), False, 'from numba import jit, njit, float64, int32\n'), ((48860, 48894), 'numba.njit', 'njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (48864, 48894), False, 'from numba import jit, njit, float64, int32\n'), ((54770, 54804), 'numba.njit', 'njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (54774, 54804), False, 'from numba import jit, njit, float64, int32\n'), ((59921, 59955), 'numba.njit', 'njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (59925, 59955), False, 'from numba import jit, njit, float64, int32\n'), ((65580, 65614), 'numba.njit', 'njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (65584, 65614), False, 'from numba import jit, njit, float64, int32\n'), ((73076, 73110), 'numba.njit', 'njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (73080, 73110), False, 'from numba import jit, njit, float64, int32\n'), ((78527, 78561), 'numba.njit', 'njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (78531, 78561), False, 'from numba import jit, njit, float64, int32\n'), ((84621, 84640), 'numba.njit', 'nb.njit', ([], {'cache': '(True)'}), '(cache=True)\n', (84628, 84640), True, 'import numba as nb\n'), ((90610, 90644), 'numba.njit', 'njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (90614, 90644), False, 'from numba import jit, njit, float64, int32\n'), ((95842, 95879), 'numba.njit', 'nb.njit', ([], {'cache': '(True)', 'nogil': 'GLOB_NOGIL'}), '(cache=True, nogil=GLOB_NOGIL)\n', (95849, 95879), True, 'import numba as nb\n'), ((95983, 96020), 'numba.njit', 'nb.njit', ([], {'cache': '(True)', 'nogil': 'GLOB_NOGIL'}), '(cache=True, nogil=GLOB_NOGIL)\n', (95990, 96020), True, 'import numba as nb\n'), ((96739, 96776), 'numba.njit', 'nb.njit', ([], {'cache': '(True)', 'nogil': 'GLOB_NOGIL'}), '(cache=True, nogil=GLOB_NOGIL)\n', (96746, 96776), True, 'import numba as nb\n'), ((98629, 98663), 'numba.njit', 'njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (98633, 98663), False, 'from numba import jit, njit, float64, int32\n'), ((98742, 98779), 'numba.njit', 'nb.njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (98749, 98779), True, 'import numba as nb\n'), ((102391, 102428), 'numba.njit', 'nb.njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (102398, 102428), True, 'import numba as nb\n'), ((109641, 109729), 'numba.jit', 'jit', (['(float64[:], int32[:], float64[:])'], {'nopython': '(True)', 'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '((float64[:], int32[:], float64[:]), nopython=True, nogil=GLOB_NOGIL,\n cache=True)\n', (109644, 109729), False, 'from numba import jit, njit, float64, int32\n'), ((109952, 109986), 'numba.njit', 'njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (109956, 109986), False, 'from numba import jit, njit, float64, int32\n'), ((110047, 110098), 'numba.jit', 'nb.jit', ([], {'nopython': '(True)', 'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nopython=True, nogil=GLOB_NOGIL, cache=True)\n', (110053, 110098), True, 'import numba as nb\n'), ((111202, 111236), 'numba.njit', 'njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (111206, 111236), False, 'from numba import jit, njit, float64, int32\n'), ((111301, 111392), 'numba.jit', 'nb.jit', (['(float64[:], int32[:], float64[:])'], {'nopython': '(True)', 'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '((float64[:], int32[:], float64[:]), nopython=True, nogil=GLOB_NOGIL,\n cache=True)\n', (111307, 111392), True, 'import numba as nb\n'), ((111559, 111593), 'numba.njit', 'njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (111563, 111593), False, 'from numba import jit, njit, float64, int32\n'), ((111656, 111693), 'numba.njit', 'nb.njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (111663, 111693), True, 'import numba as nb\n'), ((112111, 112162), 'numba.jit', 'nb.jit', ([], {'nopython': '(True)', 'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nopython=True, nogil=GLOB_NOGIL, cache=True)\n', (112117, 112162), True, 'import numba as nb\n'), ((114903, 114940), 'numba.njit', 'nb.njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (114910, 114940), True, 'import numba as nb\n'), ((121245, 121279), 'numba.njit', 'njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (121249, 121279), False, 'from numba import jit, njit, float64, int32\n'), ((122961, 123012), 'numba.jit', 'nb.jit', ([], {'nopython': '(True)', 'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nopython=True, nogil=GLOB_NOGIL, cache=True)\n', (122967, 123012), True, 'import numba as nb\n'), ((123543, 123594), 'numba.jit', 'nb.jit', ([], {'nopython': '(True)', 'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nopython=True, nogil=GLOB_NOGIL, cache=True)\n', (123549, 123594), True, 'import numba as nb\n'), ((123958, 124009), 'numba.jit', 'nb.jit', ([], {'nopython': '(True)', 'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nopython=True, nogil=GLOB_NOGIL, cache=True)\n', (123964, 124009), True, 'import numba as nb\n'), ((124361, 124412), 'numba.jit', 'nb.jit', ([], {'nopython': '(True)', 'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nopython=True, nogil=GLOB_NOGIL, cache=True)\n', (124367, 124412), True, 'import numba as nb\n'), ((124844, 124895), 'numba.jit', 'nb.jit', ([], {'nopython': '(True)', 'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nopython=True, nogil=GLOB_NOGIL, cache=True)\n', (124850, 124895), True, 'import numba as nb\n'), ((125417, 125468), 'numba.jit', 'nb.jit', ([], {'nopython': '(True)', 'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nopython=True, nogil=GLOB_NOGIL, cache=True)\n', (125423, 125468), True, 'import numba as nb\n'), ((125855, 125906), 'numba.jit', 'nb.jit', ([], {'nopython': '(True)', 'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nopython=True, nogil=GLOB_NOGIL, cache=True)\n', (125861, 125906), True, 'import numba as nb\n'), ((126321, 126355), 'numba.njit', 'njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (126325, 126355), False, 'from numba import jit, njit, float64, int32\n'), ((126708, 126759), 'numba.jit', 'nb.jit', ([], {'nopython': '(True)', 'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nopython=True, nogil=GLOB_NOGIL, cache=True)\n', (126714, 126759), True, 'import numba as nb\n'), ((127316, 127367), 'numba.jit', 'nb.jit', ([], {'nopython': '(True)', 'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nopython=True, nogil=GLOB_NOGIL, cache=True)\n', (127322, 127367), True, 'import numba as nb\n'), ((127899, 127950), 'numba.jit', 'nb.jit', ([], {'nopython': '(True)', 'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nopython=True, nogil=GLOB_NOGIL, cache=True)\n', (127905, 127950), True, 'import numba as nb\n'), ((128316, 128353), 'numba.njit', 'nb.njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (128323, 128353), True, 'import numba as nb\n'), ((128647, 128698), 'numba.jit', 'nb.jit', ([], {'nopython': '(True)', 'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nopython=True, nogil=GLOB_NOGIL, cache=True)\n', (128653, 128698), True, 'import numba as nb\n'), ((128810, 128847), 'numba.njit', 'nb.njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (128817, 128847), True, 'import numba as nb\n'), ((128969, 129020), 'numba.jit', 'nb.jit', ([], {'nopython': '(True)', 'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nopython=True, nogil=GLOB_NOGIL, cache=True)\n', (128975, 129020), True, 'import numba as nb\n'), ((129246, 129297), 'numba.jit', 'nb.jit', ([], {'nopython': '(True)', 'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nopython=True, nogil=GLOB_NOGIL, cache=True)\n', (129252, 129297), True, 'import numba as nb\n'), ((129540, 129574), 'numba.njit', 'njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (129544, 129574), False, 'from numba import jit, njit, float64, int32\n'), ((129801, 129852), 'numba.jit', 'nb.jit', ([], {'nopython': '(True)', 'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nopython=True, nogil=GLOB_NOGIL, cache=True)\n', (129807, 129852), True, 'import numba as nb\n'), ((130096, 130130), 'numba.njit', 'njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (130100, 130130), False, 'from numba import jit, njit, float64, int32\n'), ((130393, 130444), 'numba.jit', 'nb.jit', ([], {'nopython': '(True)', 'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nopython=True, nogil=GLOB_NOGIL, cache=True)\n', (130399, 130444), True, 'import numba as nb\n'), ((131176, 131227), 'numba.jit', 'nb.jit', ([], {'nopython': '(True)', 'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nopython=True, nogil=GLOB_NOGIL, cache=True)\n', (131182, 131227), True, 'import numba as nb\n'), ((134914, 134965), 'numba.jit', 'nb.jit', ([], {'nopython': '(True)', 'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nopython=True, nogil=GLOB_NOGIL, cache=True)\n', (134920, 134965), True, 'import numba as nb\n'), ((135135, 135186), 'numba.jit', 'nb.jit', ([], {'nopython': '(True)', 'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nopython=True, nogil=GLOB_NOGIL, cache=True)\n', (135141, 135186), True, 'import numba as nb\n'), ((135271, 135322), 'numba.jit', 'nb.jit', ([], {'nopython': '(True)', 'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nopython=True, nogil=GLOB_NOGIL, cache=True)\n', (135277, 135322), True, 'import numba as nb\n'), ((138334, 138385), 'numba.jit', 'nb.jit', ([], {'nopython': '(True)', 'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nopython=True, nogil=GLOB_NOGIL, cache=True)\n', (138340, 138385), True, 'import numba as nb\n'), ((141560, 141611), 'numba.jit', 'nb.jit', ([], {'nopython': '(True)', 'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nopython=True, nogil=GLOB_NOGIL, cache=True)\n', (141566, 141611), True, 'import numba as nb\n'), ((144774, 144811), 'numba.njit', 'nb.njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (144781, 144811), True, 'import numba as nb\n'), ((148020, 148071), 'numba.jit', 'nb.jit', ([], {'nopython': '(True)', 'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nopython=True, nogil=GLOB_NOGIL, cache=True)\n', (148026, 148071), True, 'import numba as nb\n'), ((151109, 151160), 'numba.jit', 'nb.jit', ([], {'nopython': '(True)', 'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nopython=True, nogil=GLOB_NOGIL, cache=True)\n', (151115, 151160), True, 'import numba as nb\n'), ((152557, 152591), 'numba.njit', 'njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (152561, 152591), False, 'from numba import jit, njit, float64, int32\n'), ((154119, 154153), 'numba.njit', 'njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (154123, 154153), False, 'from numba import jit, njit, float64, int32\n'), ((157202, 157253), 'numba.jit', 'nb.jit', ([], {'nopython': '(True)', 'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nopython=True, nogil=GLOB_NOGIL, cache=True)\n', (157208, 157253), True, 'import numba as nb\n'), ((159801, 159852), 'numba.jit', 'nb.jit', ([], {'nopython': '(True)', 'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nopython=True, nogil=GLOB_NOGIL, cache=True)\n', (159807, 159852), True, 'import numba as nb\n'), ((161553, 161604), 'numba.jit', 'nb.jit', ([], {'nopython': '(True)', 'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nopython=True, nogil=GLOB_NOGIL, cache=True)\n', (161559, 161604), True, 'import numba as nb\n'), ((163213, 163264), 'numba.jit', 'nb.jit', ([], {'nopython': '(True)', 'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nopython=True, nogil=GLOB_NOGIL, cache=True)\n', (163219, 163264), True, 'import numba as nb\n'), ((165532, 165583), 'numba.jit', 'nb.jit', ([], {'nopython': '(True)', 'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nopython=True, nogil=GLOB_NOGIL, cache=True)\n', (165538, 165583), True, 'import numba as nb\n'), ((167099, 167136), 'numba.njit', 'nb.njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (167106, 167136), True, 'import numba as nb\n'), ((167886, 167937), 'numba.jit', 'nb.jit', ([], {'nopython': '(True)', 'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nopython=True, nogil=GLOB_NOGIL, cache=True)\n', (167892, 167937), True, 'import numba as nb\n'), ((169166, 169217), 'numba.jit', 'nb.jit', ([], {'nopython': '(True)', 'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nopython=True, nogil=GLOB_NOGIL, cache=True)\n', (169172, 169217), True, 'import numba as nb\n'), ((170186, 170223), 'numba.njit', 'nb.njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (170193, 170223), True, 'import numba as nb\n'), ((171831, 171868), 'numba.njit', 'nb.njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (171838, 171868), True, 'import numba as nb\n'), ((173461, 173512), 'numba.jit', 'nb.jit', ([], {'nopython': '(True)', 'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nopython=True, nogil=GLOB_NOGIL, cache=True)\n', (173467, 173512), True, 'import numba as nb\n'), ((174350, 174401), 'numba.jit', 'nb.jit', ([], {'nopython': '(True)', 'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nopython=True, nogil=GLOB_NOGIL, cache=True)\n', (174356, 174401), True, 'import numba as nb\n'), ((175288, 175325), 'numba.njit', 'nb.njit', ([], {'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nogil=GLOB_NOGIL, cache=True)\n', (175295, 175325), True, 'import numba as nb\n'), ((176141, 176192), 'numba.jit', 'nb.jit', ([], {'nopython': '(True)', 'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nopython=True, nogil=GLOB_NOGIL, cache=True)\n', (176147, 176192), True, 'import numba as nb\n'), ((176530, 176581), 'numba.jit', 'nb.jit', ([], {'nopython': '(True)', 'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nopython=True, nogil=GLOB_NOGIL, cache=True)\n', (176536, 176581), True, 'import numba as nb\n'), ((177204, 177255), 'numba.jit', 'nb.jit', ([], {'nopython': '(True)', 'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nopython=True, nogil=GLOB_NOGIL, cache=True)\n', (177210, 177255), True, 'import numba as nb\n'), ((177528, 177579), 'numba.jit', 'nb.jit', ([], {'nopython': '(True)', 'nogil': 'GLOB_NOGIL', 'cache': '(True)'}), '(nopython=True, nogil=GLOB_NOGIL, cache=True)\n', (177534, 177579), True, 'import numba as nb\n'), ((178604, 178635), 'numba.njit', 'nb.njit', ([], {'parallel': 'GLOB_PARALLEL'}), '(parallel=GLOB_PARALLEL)\n', (178611, 178635), True, 'import numba as nb\n'), ((180957, 180976), 'numba.njit', 'nb.njit', ([], {'cache': '(True)'}), '(cache=True)\n', (180964, 180976), True, 'import numba as nb\n'), ((182085, 182104), 'numba.njit', 'nb.njit', ([], {'cache': '(True)'}), '(cache=True)\n', (182092, 182104), True, 'import numba as nb\n'), ((183404, 183423), 'numba.njit', 'nb.njit', ([], {'cache': '(True)'}), '(cache=True)\n', (183411, 183423), True, 'import numba as nb\n'), ((184324, 184343), 'numba.njit', 'nb.njit', ([], {'cache': '(True)'}), '(cache=True)\n', (184331, 184343), True, 'import numba as nb\n'), ((185580, 185599), 'numba.njit', 'nb.njit', ([], {'cache': '(True)'}), '(cache=True)\n', (185587, 185599), True, 'import numba as nb\n'), ((199944, 199974), 'numba.jit', 'nb.jit', ([], {'parallel': 'GLOB_PARALLEL'}), '(parallel=GLOB_PARALLEL)\n', (199950, 199974), True, 'import numba as nb\n'), ((210572, 210591), 'numba.njit', 'nb.njit', ([], {'cache': '(True)'}), '(cache=True)\n', (210579, 210591), True, 'import numba as nb\n'), ((9167, 9195), 'numpy.zeros', 'np.zeros', (['port_own_idx.shape'], {}), '(port_own_idx.shape)\n', (9175, 9195), True, 'import numpy as np\n'), ((10325, 10353), 'numpy.zeros', 'np.zeros', (['port_own_idx.shape'], {}), '(port_own_idx.shape)\n', (10333, 10353), True, 'import numpy as np\n'), ((18891, 18910), 'numpy.all', 'np.all', (['(T_diff <= 0)'], {}), '(T_diff <= 0)\n', (18897, 18910), True, 'import numpy as np\n'), ((19212, 19227), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (19220, 19227), True, 'import numpy as np\n'), ((21136, 21155), 'numpy.all', 'np.all', (['(T_diff <= 0)'], {}), '(T_diff <= 0)\n', (21142, 21155), True, 'import numpy as np\n'), ((23620, 23631), 'numpy.empty', 'np.empty', (['(3)'], {}), '(3)\n', (23628, 23631), True, 'import numpy as np\n'), ((28299, 28310), 'numpy.empty', 'np.empty', (['(3)'], {}), '(3)\n', (28307, 28310), True, 'import numpy as np\n'), ((33652, 33663), 'numpy.empty', 'np.empty', (['(3)'], {}), '(3)\n', (33660, 33663), True, 'import numpy as np\n'), ((61209, 61244), 'numpy.any', 'np.any', (['(T_ext >= emergency_shutdown)'], {}), '(T_ext >= emergency_shutdown)\n', (61215, 61244), True, 'import numpy as np\n'), ((80085, 80116), 'numpy.abs', 'np.abs', (['(dm_io / channel_divisor)'], {}), '(dm_io / channel_divisor)\n', (80091, 80116), True, 'import numpy as np\n'), ((87980, 88006), 'numpy.all', 'np.all', (['(X_pred[0, 2:] != 0)'], {}), '(X_pred[0, 2:] != 0)\n', (87986, 88006), True, 'import numpy as np\n'), ((97471, 97487), 'numpy.zeros_like', 'np.zeros_like', (['y'], {}), '(y)\n', (97484, 97487), True, 'import numpy as np\n'), ((97499, 97517), 'numpy.any', 'np.any', (['(err > rtol)'], {}), '(err > rtol)\n', (97505, 97517), True, 'import numpy as np\n'), ((99582, 99599), 'numpy.greater', 'np.greater', (['dm', '(0)'], {}), '(dm, 0)\n', (99592, 99599), True, 'import numpy as np\n'), ((99613, 99627), 'numpy.less', 'np.less', (['dm', '(0)'], {}), '(dm, 0)\n', (99620, 99627), True, 'import numpy as np\n'), ((136780, 136795), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (136788, 136795), True, 'import numpy as np\n'), ((136838, 136855), 'numpy.zeros', 'np.zeros', (['T.shape'], {}), '(T.shape)\n', (136846, 136855), True, 'import numpy as np\n'), ((136865, 136880), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (136873, 136880), True, 'import numpy as np\n'), ((139699, 139714), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (139707, 139714), True, 'import numpy as np\n'), ((139755, 139772), 'numpy.zeros', 'np.zeros', (['T.shape'], {}), '(T.shape)\n', (139763, 139772), True, 'import numpy as np\n'), ((139782, 139797), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (139790, 139797), True, 'import numpy as np\n'), ((143024, 143039), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (143032, 143039), True, 'import numpy as np\n'), ((143134, 143149), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (143142, 143149), True, 'import numpy as np\n'), ((146236, 146251), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (146244, 146251), True, 'import numpy as np\n'), ((146346, 146361), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (146354, 146361), True, 'import numpy as np\n'), ((149585, 149607), 'numpy.zeros', 'np.zeros', (['T_mean.shape'], {}), '(T_mean.shape)\n', (149593, 149607), True, 'import numpy as np\n'), ((149618, 149640), 'numpy.zeros', 'np.zeros', (['T_mean.shape'], {}), '(T_mean.shape)\n', (149626, 149640), True, 'import numpy as np\n'), ((153101, 153123), 'numpy.zeros', 'np.zeros', (['T_mean.shape'], {}), '(T_mean.shape)\n', (153109, 153123), True, 'import numpy as np\n'), ((153134, 153156), 'numpy.zeros', 'np.zeros', (['T_mean.shape'], {}), '(T_mean.shape)\n', (153142, 153156), True, 'import numpy as np\n'), ((155058, 155080), 'numpy.empty', 'np.empty', (['T_mean.shape'], {}), '(T_mean.shape)\n', (155066, 155080), True, 'import numpy as np\n'), ((176225, 176249), 'numpy.zeros', 'np.zeros', (['(x.shape[0] + 1)'], {}), '(x.shape[0] + 1)\n', (176233, 176249), True, 'import numpy as np\n'), ((176279, 176293), 'numpy.cumsum', 'np.cumsum', (['arr'], {}), '(arr)\n', (176288, 176293), True, 'import numpy as np\n'), ((176739, 176759), 'numpy.zeros', 'np.zeros', (['new_length'], {}), '(new_length)\n', (176747, 176759), True, 'import numpy as np\n'), ((180027, 180099), 'numpy.zeros', 'np.zeros', (['(roots.shape[0], poly_coeff.shape[0] - 1)'], {'dtype': 'np.complex128'}), '((roots.shape[0], poly_coeff.shape[0] - 1), dtype=np.complex128)\n', (180035, 180099), True, 'import numpy as np\n'), ((180127, 180152), 'numba.prange', 'nb.prange', (['roots.shape[0]'], {}), '(roots.shape[0])\n', (180136, 180152), True, 'import numba as nb\n'), ((190151, 190215), 'numpy.array', 'np.array', (['[[dm_water_thresh, dm_water_thresh]]'], {'dtype': 'np.float64'}), '([[dm_water_thresh, dm_water_thresh]], dtype=np.float64)\n', (190159, 190215), True, 'import numpy as np\n'), ((202400, 202461), 'numpy.ones', 'np.ones', (['(n_samples, int_comb_idx.shape[0])'], {'dtype': 'np.float64'}), '((n_samples, int_comb_idx.shape[0]), dtype=np.float64)\n', (202407, 202461), True, 'import numpy as np\n'), ((202781, 202820), 'numpy.dot', 'np.dot', (['(XP - pca_mean)', 'pca_components.T'], {}), '(XP - pca_mean, pca_components.T)\n', (202787, 202820), True, 'import numpy as np\n'), ((4313, 4324), 'numpy.empty', 'np.empty', (['(2)'], {}), '(2)\n', (4321, 4324), True, 'import numpy as np\n'), ((5886, 5902), 'numpy.cumsum', 'np.cumsum', (['dm_io'], {}), '(dm_io)\n', (5895, 5902), True, 'import numpy as np\n'), ((99690, 99703), 'numpy.sum', 'np.sum', (['dm_in'], {}), '(dm_in)\n', (99696, 99703), True, 'import numpy as np\n'), ((176035, 176064), 'numpy.log', 'np.log', (['(Delta_T_A / Delta_T_B)'], {}), '(Delta_T_A / Delta_T_B)\n', (176041, 176064), True, 'import numpy as np\n'), ((178155, 178171), 'numpy.zeros', 'np.zeros', (['wa_len'], {}), '(wa_len)\n', (178163, 178171), True, 'import numpy as np\n'), ((178250, 178266), 'numpy.zeros', 'np.zeros', (['length'], {}), '(length)\n', (178258, 178266), True, 'import numpy as np\n'), ((180223, 180242), 'numpy.roots', 'np.roots', (['polcoeffs'], {}), '(polcoeffs)\n', (180231, 180242), True, 'import numpy as np\n'), ((202955, 202984), 'numpy.dot', 'np.dot', (['XP_pca_transf', 'coef.T'], {}), '(XP_pca_transf, coef.T)\n', (202961, 202984), True, 'import numpy as np\n'), ((204025, 204050), 'numpy.where', 'np.where', (['(p >= 0)', 'dx', '(-dx)'], {}), '(p >= 0, dx, -dx)\n', (204033, 204050), True, 'import numpy as np\n'), ((21729, 21770), 'numpy.abs', 'np.abs', (['(-g * beta * T_diff / grid_spacing)'], {}), '(-g * beta * T_diff / grid_spacing)\n', (21735, 21770), True, 'import numpy as np\n'), ((23856, 23887), 'numpy.max', 'np.max', (['(UA_tb[1:-1] / rhocp[1:])'], {}), '(UA_tb[1:-1] / rhocp[1:])\n', (23862, 23887), True, 'import numpy as np\n'), ((24105, 24147), 'numpy.max', 'np.max', (['(UA_port / (A_port * port_subs_gsp))'], {}), '(UA_port / (A_port * port_subs_gsp))\n', (24111, 24147), True, 'import numpy as np\n'), ((24266, 24294), 'numpy.max', 'np.max', (['(UA_amb_shell / rhocp)'], {}), '(UA_amb_shell / rhocp)\n', (24272, 24294), True, 'import numpy as np\n'), ((33888, 33919), 'numpy.max', 'np.max', (['(UA_tb[1:-1] / rhocp[1:])'], {}), '(UA_tb[1:-1] / rhocp[1:])\n', (33894, 33919), True, 'import numpy as np\n'), ((34137, 34179), 'numpy.max', 'np.max', (['(UA_port / (A_port * port_subs_gsp))'], {}), '(UA_port / (A_port * port_subs_gsp))\n', (34143, 34179), True, 'import numpy as np\n'), ((34298, 34326), 'numpy.max', 'np.max', (['(UA_amb_shell / rhocp)'], {}), '(UA_amb_shell / rhocp)\n', (34304, 34326), True, 'import numpy as np\n'), ((100040, 100082), 'numpy.sum', 'np.sum', (['(dm[dm_in] * cp_T[dm_in] * T[dm_in])'], {}), '(dm[dm_in] * cp_T[dm_in] * T[dm_in])\n', (100046, 100082), True, 'import numpy as np\n'), ((100396, 100410), 'numpy.sum', 'np.sum', (['dm_out'], {}), '(dm_out)\n', (100402, 100410), True, 'import numpy as np\n'), ((128743, 128752), 'numpy.abs', 'np.abs', (['v'], {}), '(v)\n', (128749, 128752), True, 'import numpy as np\n'), ((128890, 128899), 'numpy.abs', 'np.abs', (['v'], {}), '(v)\n', (128896, 128899), True, 'import numpy as np\n'), ((188099, 188121), 'numpy.expand_dims', 'np.expand_dims', (['y0', '(-1)'], {}), '(y0, -1)\n', (188113, 188121), True, 'import numpy as np\n'), ((203968, 203988), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (203976, 203988), True, 'import numpy as np\n'), ((204710, 204720), 'numpy.abs', 'np.abs', (['dp'], {}), '(dp)\n', (204716, 204720), True, 'import numpy as np\n'), ((206742, 206767), 'numpy.where', 'np.where', (['(p >= 0)', 'dx', '(-dx)'], {}), '(p >= 0, dx, -dx)\n', (206750, 206767), True, 'import numpy as np\n'), ((209477, 209502), 'numpy.where', 'np.where', (['(p >= 0)', 'dx', '(-dx)'], {}), '(p >= 0, dx, -dx)\n', (209485, 209502), True, 'import numpy as np\n'), ((99884, 99900), 'numpy.sum', 'np.sum', (['T[dm_in]'], {}), '(T[dm_in])\n', (99890, 99900), True, 'import numpy as np\n'), ((101000, 101013), 'numpy.sum', 'np.sum', (['dm_in'], {}), '(dm_in)\n', (101006, 101013), True, 'import numpy as np\n'), ((101070, 101085), 'numpy.equal', 'np.equal', (['dm', '(0)'], {}), '(dm, 0)\n', (101078, 101085), True, 'import numpy as np\n'), ((137991, 138009), 'numpy.log10', 'np.log10', (['Re[turb]'], {}), '(Re[turb])\n', (137999, 138009), True, 'import numpy as np\n'), ((141193, 141211), 'numpy.log10', 'np.log10', (['Re[turb]'], {}), '(Re[turb])\n', (141201, 141211), True, 'import numpy as np\n'), ((144424, 144442), 'numpy.log10', 'np.log10', (['Re[turb]'], {}), '(Re[turb])\n', (144432, 144442), True, 'import numpy as np\n'), ((147658, 147676), 'numpy.log10', 'np.log10', (['Re[turb]'], {}), '(Re[turb])\n', (147666, 147676), True, 'import numpy as np\n'), ((189132, 189176), 'numpy.full', 'np.full', (['(X_pred.shape[0],)', 'dm_water_thresh'], {}), '((X_pred.shape[0],), dm_water_thresh)\n', (189139, 189176), True, 'import numpy as np\n'), ((195029, 195061), 'numpy.any', 'np.any', (['(T > T_chp_in_max_emrgncy)'], {}), '(T > T_chp_in_max_emrgncy)\n', (195035, 195061), True, 'import numpy as np\n'), ((195410, 195442), 'numpy.any', 'np.any', (['(T > T_chp_in_max_emrgncy)'], {}), '(T > T_chp_in_max_emrgncy)\n', (195416, 195442), True, 'import numpy as np\n'), ((206408, 206418), 'numpy.abs', 'np.abs', (['dp'], {}), '(dp)\n', (206414, 206418), True, 'import numpy as np\n'), ((206681, 206701), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (206689, 206701), True, 'import numpy as np\n'), ((207507, 207517), 'numpy.abs', 'np.abs', (['dp'], {}), '(dp)\n', (207513, 207517), True, 'import numpy as np\n'), ((209416, 209436), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (209424, 209436), True, 'import numpy as np\n'), ((24646, 24659), 'numpy.abs', 'np.abs', (['dm_io'], {}), '(dm_io)\n', (24652, 24659), True, 'import numpy as np\n'), ((29899, 29912), 'numpy.abs', 'np.abs', (['dm_io'], {}), '(dm_io)\n', (29905, 29912), True, 'import numpy as np\n'), ((107122, 107137), 'numpy.exp', 'np.exp', (['rs_ntus'], {}), '(rs_ntus)\n', (107128, 107137), True, 'import numpy as np\n'), ((122747, 122766), 'numpy.abs', 'np.abs', (['(T_s - T_inf)'], {}), '(T_s - T_inf)\n', (122753, 122766), True, 'import numpy as np\n'), ((149894, 149913), 'numpy.abs', 'np.abs', (['(T_s - T_inf)'], {}), '(T_s - T_inf)\n', (149900, 149913), True, 'import numpy as np\n'), ((163171, 163198), 'numpy.log', 'np.log', (['((r_o / r_i + 1) / 2)'], {}), '((r_o / r_i + 1) / 2)\n', (163177, 163198), True, 'import numpy as np\n'), ((167026, 167053), 'numpy.log', 'np.log', (['(2 / (r_i / r_o + 1))'], {}), '(2 / (r_i / r_o + 1))\n', (167032, 167053), True, 'import numpy as np\n'), ((193468, 193500), 'numpy.any', 'np.any', (['(T > T_chp_in_max_emrgncy)'], {}), '(T > T_chp_in_max_emrgncy)\n', (193474, 193500), True, 'import numpy as np\n'), ((194247, 194263), 'numpy.abs', 'np.abs', (['mod_diff'], {}), '(mod_diff)\n', (194253, 194263), True, 'import numpy as np\n'), ((197028, 197070), 'numpy.abs', 'np.abs', (['(startuptsteps - shutdown_factor_th)'], {}), '(startuptsteps - shutdown_factor_th)\n', (197034, 197070), True, 'import numpy as np\n'), ((211954, 211965), 'numpy.abs', 'np.abs', (['fp1'], {}), '(fp1)\n', (211960, 211965), True, 'import numpy as np\n'), ((211976, 211987), 'numpy.abs', 'np.abs', (['fp0'], {}), '(fp0)\n', (211982, 211987), True, 'import numpy as np\n'), ((35068, 35083), 'numpy.abs', 'np.abs', (['dm_port'], {}), '(dm_port)\n', (35074, 35083), True, 'import numpy as np\n'), ((107154, 107169), 'numpy.exp', 'np.exp', (['rs_ntus'], {}), '(rs_ntus)\n', (107160, 107169), True, 'import numpy as np\n'), ((165427, 165454), 'numpy.log', 'np.log', (['(2 / (r_i / r_o + 1))'], {}), '(2 / (r_i / r_o + 1))\n', (165433, 165454), True, 'import numpy as np\n'), ((116196, 116238), 'numpy.maximum', 'np.maximum', (['res[stepnum - 1]', 'res[stepnum]'], {}), '(res[stepnum - 1], res[stepnum])\n', (116206, 116238), True, 'import numpy as np\n')] |
from activfuncs import plot, x
import numpy as np
def softplus(x):
return np.log(1+np.exp(x))
plot(softplus, yaxis=(-0.4, 1.4))
| [
"numpy.exp",
"activfuncs.plot"
] | [((100, 133), 'activfuncs.plot', 'plot', (['softplus'], {'yaxis': '(-0.4, 1.4)'}), '(softplus, yaxis=(-0.4, 1.4))\n', (104, 133), False, 'from activfuncs import plot, x\n'), ((88, 97), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (94, 97), True, 'import numpy as np\n')] |
import gym
import numpy as np
import os
import random
import time
import pandas
from tqdm import tqdm
import matplotlib.pyplot as plt
from sklearn.model_selection import ParameterGrid
np.random.seed(42)
random.seed(42)
def print_state_values_tabular(Q, size=8):
# Get the max value for state
V = [max(q_s) for q_s in Q]
print("\n\t\t State Value")
s = 0
for _ in range(size):
print("------------------------------------------------")
for _ in range(size):
if V[s] >= 0:
print(" %.2f|" % V[s], end="")
else:
print("%.2f|" % V[s], end="")
s += 1
print("")
print("------------------------------------------------")
def print_policy_tabular(Q, size=8):
actions_names = ['l', 's', 'r', 'n']
i = 0
print("\n\t\t Policy/Actions")
for _ in range(size):
print("------------------------------------------------")
for _ in range(size):
# Get the best action
best_action = _argmax(Q[i])
i += 1
print(" %s |" % actions_names[best_action], end="")
print("")
print("------------------------------------------------")
def generate_stats(env, Q):
wins = 0
r = 100
for i in range(r):
w = _run(env, Q)[-1][-1]
if w == 1:
wins += 1
return wins/r
def play(env, Q):
_run(env, Q, display=True)
def plot_episode_return(data):
plt.xlabel("Episode")
plt.ylabel("Cumulative Reward")
plt.plot(data)
plt.show()
def _argmax(Q):
# Find the action with maximum value
actions = [a for a, v in enumerate(Q) if v == max(Q)]
return random.choice(actions)
def _run(env, Q, eps_params=None, display=False):
env.reset()
episode = []
eps = 0
if display:
env.render()
while True:
state = env.env.s
# If epsilon greedy params is defined
if eps_params is not None:
n0, n = eps_params
# Define the epsilon
eps = n0/(n0 + n[state])
# Select the action prob
p = np.random.random()
# epsilon-greedy for exploration vs exploitation
if p < (1 - eps):
action = _argmax(Q[state])
else:
action = np.random.choice(env.action_space.n)
# Run the action
_, reward, done, _ = env.step(action)
# Add step to the episode
episode.append([state, action, reward])
if display:
os.system('clear')
env.render()
time.sleep(1)
if done:
break
return episode
def _learn_mc_tabular(env, episodes, gamma, n0, disable_tqdm=False):
# Initialize state-action
Q = [[0 for _ in range(env.action_space.n)] for _ in range(env.observation_space.n)]
# Number of visits for each state
n = {s:0 for s in range(env.observation_space.n)}
# Number of action's selections for state
na = {(s, a):0 for s in range(env.observation_space.n) for a in range(env.action_space.n)}
stats = {'return':[]}
for t in tqdm(range(episodes), disable=disable_tqdm):
G = 0
# Run an episode
episode = _run(env, Q, eps_params=(n0, n))
for i in reversed(range(len(episode))):
s_t, a_t, r_t = episode[i]
state_action = (s_t, a_t)
# Cummulative discounted rewards
G = gamma*G + r_t
if not state_action in [(x[0], x[1]) for x in episode[0:i]]:
# Increment the state visits
n[s_t] = n[s_t] + 1
# Increment the action selection
na[state_action] = na[state_action] + 1
# Compute the alpha
alpha = 1/na[state_action]
# Update the action-value
Q[s_t][a_t] = Q[s_t][a_t] + alpha*(G - Q[s_t][a_t])
if i == len(episode)-1:
stats['return'].append(G)
return Q, stats
def train_tabular(stochastic, episodes=10000, gamma=0.9, n0=10):
env = gym.make('FrozenLake8x8-v1', is_slippery=stochastic)
# Reset the seed
np.random.seed(42)
random.seed(42)
env.seed(42)
# Learn a policy with MC
Q, stats = _learn_mc_tabular(env, episodes=episodes, gamma=gamma, n0=n0, disable_tqdm=False)
# Plot stats
plot_episode_return(stats['return'])
return Q, env
def grid_search_tabular(stochastic):
if stochastic:
param_grid = {'n0': [1, 100, 1000, 10000], 'gamma': [1, 0.9, 0.1], 'episodes': [10000, 100000]}
else:
param_grid = {'n0': [0.1, 1, 10], 'gamma': [1, 0.9, 0.5, 0.1], 'episodes': [100, 1000]}
env = gym.make('FrozenLake8x8-v1', is_slippery=stochastic)
results = pandas.DataFrame(columns=['n0', 'gamma', 'episodes', 'win/loss (%)', 'elapsed time (s)'])
for c in ParameterGrid(param_grid):
# Reset the seed
np.random.seed(42)
random.seed(42)
env.seed(42)
tic = time.time()
# Learn policy
Q, stats = _learn_mc_tabular(env, **c, disable_tqdm=True)
toc = time.time()
elapsed_time = toc - tic
# Generate wins
win = generate_stats(env, Q)*100
new_row = {'n0': c['n0'],
'gamma': c['gamma'],
'episodes': c['episodes'],
'win/loss (%)': win,
'elapsed time (s)': elapsed_time}
results = results.append(new_row, ignore_index=True)
print(results)
if __name__ == '__main__':
Q, env = train_tabular(stochastic=False, episodes=100, gamma=0.9, n0=1)
play(env, Q)
#print_state_values_tabular(Q)
#print(generate_stats(env, Q)*100)
| [
"sklearn.model_selection.ParameterGrid",
"random.choice",
"matplotlib.pyplot.ylabel",
"numpy.random.random",
"numpy.random.choice",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"random.seed",
"time.sleep",
"numpy.random.seed",
"pandas.DataFrame",
"os.system",
"time.time",
"gym.mak... | [((184, 202), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (198, 202), True, 'import numpy as np\n'), ((203, 218), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (214, 218), False, 'import random\n'), ((1651, 1672), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Episode"""'], {}), "('Episode')\n", (1661, 1672), True, 'import matplotlib.pyplot as plt\n'), ((1677, 1708), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cumulative Reward"""'], {}), "('Cumulative Reward')\n", (1687, 1708), True, 'import matplotlib.pyplot as plt\n'), ((1713, 1727), 'matplotlib.pyplot.plot', 'plt.plot', (['data'], {}), '(data)\n', (1721, 1727), True, 'import matplotlib.pyplot as plt\n'), ((1732, 1742), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1740, 1742), True, 'import matplotlib.pyplot as plt\n'), ((1901, 1923), 'random.choice', 'random.choice', (['actions'], {}), '(actions)\n', (1914, 1923), False, 'import random\n'), ((4606, 4658), 'gym.make', 'gym.make', (['"""FrozenLake8x8-v1"""'], {'is_slippery': 'stochastic'}), "('FrozenLake8x8-v1', is_slippery=stochastic)\n", (4614, 4658), False, 'import gym\n'), ((4689, 4707), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (4703, 4707), True, 'import numpy as np\n'), ((4712, 4727), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (4723, 4727), False, 'import random\n'), ((5266, 5318), 'gym.make', 'gym.make', (['"""FrozenLake8x8-v1"""'], {'is_slippery': 'stochastic'}), "('FrozenLake8x8-v1', is_slippery=stochastic)\n", (5274, 5318), False, 'import gym\n'), ((5334, 5427), 'pandas.DataFrame', 'pandas.DataFrame', ([], {'columns': "['n0', 'gamma', 'episodes', 'win/loss (%)', 'elapsed time (s)']"}), "(columns=['n0', 'gamma', 'episodes', 'win/loss (%)',\n 'elapsed time (s)'])\n", (5350, 5427), False, 'import pandas\n'), ((5446, 5471), 'sklearn.model_selection.ParameterGrid', 'ParameterGrid', (['param_grid'], {}), '(param_grid)\n', (5459, 5471), False, 'from sklearn.model_selection import ParameterGrid\n'), ((2415, 2433), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2431, 2433), True, 'import numpy as np\n'), ((5515, 5533), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (5529, 5533), True, 'import numpy as np\n'), ((5542, 5557), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (5553, 5557), False, 'import random\n'), ((5594, 5605), 'time.time', 'time.time', ([], {}), '()\n', (5603, 5605), False, 'import time\n'), ((5727, 5738), 'time.time', 'time.time', ([], {}), '()\n', (5736, 5738), False, 'import time\n'), ((2600, 2636), 'numpy.random.choice', 'np.random.choice', (['env.action_space.n'], {}), '(env.action_space.n)\n', (2616, 2636), True, 'import numpy as np\n'), ((2857, 2875), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (2866, 2875), False, 'import os\n'), ((2913, 2926), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2923, 2926), False, 'import time\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 22 15:58:57 2021
@author: javier
"""
import numpy as np
def abs_v(x, y):
return np.abs(x - y)
def quadratic(x, y):
return (x-y) * (x-y)
def square(x, y):
return np.sqrt(abs_v(x, y))
def squarewise(x, y):
return np.abs(np.sqrt(x) - np.sqrt(y))
def abs_square(x, y):
return np.abs(x*x - y*y)
def root_square(x, y):
return (np.sqrt(x) - np.sqrt(y))**2 | [
"numpy.abs",
"numpy.sqrt"
] | [((156, 169), 'numpy.abs', 'np.abs', (['(x - y)'], {}), '(x - y)\n', (162, 169), True, 'import numpy as np\n'), ((368, 389), 'numpy.abs', 'np.abs', (['(x * x - y * y)'], {}), '(x * x - y * y)\n', (374, 389), True, 'import numpy as np\n'), ((309, 319), 'numpy.sqrt', 'np.sqrt', (['x'], {}), '(x)\n', (316, 319), True, 'import numpy as np\n'), ((322, 332), 'numpy.sqrt', 'np.sqrt', (['y'], {}), '(y)\n', (329, 332), True, 'import numpy as np\n'), ((422, 432), 'numpy.sqrt', 'np.sqrt', (['x'], {}), '(x)\n', (429, 432), True, 'import numpy as np\n'), ((435, 445), 'numpy.sqrt', 'np.sqrt', (['y'], {}), '(y)\n', (442, 445), True, 'import numpy as np\n')] |
import numpy as np
from scipy.spatial import Voronoi
from scipy.spatial import Delaunay
from ..graph import Graph
from ...core.utils import as_id_array
class VoronoiGraph(Graph):
"""Graph of a voronoi grid.
Examples
--------
>>> from landlab.graph import VoronoiGraph
"""
def __init__(self, nodes, **kwds):
"""Create a voronoi grid.
Parameters
----------
nodes : tuple of array_like
Coordinates of every node. First *y*, then *x*.
Examples
--------
>>> from landlab.graph import VoronoiGraph
>>> node_x = [0, 1, 2,
... 1, 2, 3]
>>> node_y = [0, 0, 0,
... 2, 2, 2]
>>> graph = VoronoiGraph((node_y, node_x))
>>> graph.x_of_node
array([ 0., 1., 2., 1., 2., 3.])
>>> graph.y_of_node
array([ 0., 0., 0., 2., 2., 2.])
>>> graph.nodes_at_link # doctest: +NORMALIZE_WHITESPACE
array([[0, 1], [1, 2],
[0, 3], [1, 3], [1, 4], [2, 4], [2, 5],
[3, 4], [4, 5]])
>>> graph.links_at_node # doctest: +NORMALIZE_WHITESPACE
array([[ 0, 2, -1, -1], [ 1, 4, 3, 0], [ 6, 5, 1, -1],
[ 7, 2, 3, -1], [ 8, 7, 4, 5], [ 8, 6, -1, -1]])
>>> graph.links_at_patch # doctest: +NORMALIZE_WHITESPACE
array([[3, 2, 0], [5, 4, 1], [4, 7, 3], [6, 8, 5]])
>>> graph.nodes_at_patch # doctest: +NORMALIZE_WHITESPACE
array([[3, 0, 1], [4, 1, 2], [4, 3, 1], [5, 4, 2]])
"""
# xy_sort = kwds.pop('xy_sort', True)
# rot_sort = kwds.pop('rot_sort', True)
max_node_spacing = kwds.pop('max_node_spacing', None)
from .ext.delaunay import _setup_links_at_patch, remove_tris
node_y, node_x = (np.asarray(nodes[0], dtype=float),
np.asarray(nodes[1], dtype=float))
delaunay = Delaunay(list(zip(node_x, node_y)))
# nodes_at_patch = delaunay.simplices
nodes_at_patch = np.array(delaunay.simplices, dtype=int)
neighbors_at_patch = np.array(delaunay.neighbors, dtype=int)
if max_node_spacing is not None:
max_node_dist = np.ptp(delaunay.simplices, axis=1)
bad_patches = as_id_array(np.where(max_node_dist >
max_node_spacing)[0])
if len(bad_patches) > 0:
remove_tris(nodes_at_patch, neighbors_at_patch, bad_patches)
nodes_at_patch = nodes_at_patch[:-len(bad_patches), :]
neighbors_at_patch = neighbors_at_patch[:-len(bad_patches), :]
n_patches = len(nodes_at_patch)
n_shared_links = np.count_nonzero(neighbors_at_patch > -1)
n_links = 3 * n_patches - n_shared_links // 2
links_at_patch = np.empty((n_patches, 3), dtype=int)
nodes_at_link = np.empty((n_links, 2), dtype=int)
_setup_links_at_patch(nodes_at_patch,
neighbors_at_patch,
nodes_at_link, links_at_patch)
super(VoronoiGraph, self).__init__((node_y.flat, node_x.flat),
links=nodes_at_link,
patches=links_at_patch)
| [
"numpy.ptp",
"numpy.where",
"numpy.asarray",
"numpy.count_nonzero",
"numpy.array",
"numpy.empty"
] | [((2043, 2082), 'numpy.array', 'np.array', (['delaunay.simplices'], {'dtype': 'int'}), '(delaunay.simplices, dtype=int)\n', (2051, 2082), True, 'import numpy as np\n'), ((2112, 2151), 'numpy.array', 'np.array', (['delaunay.neighbors'], {'dtype': 'int'}), '(delaunay.neighbors, dtype=int)\n', (2120, 2151), True, 'import numpy as np\n'), ((2719, 2760), 'numpy.count_nonzero', 'np.count_nonzero', (['(neighbors_at_patch > -1)'], {}), '(neighbors_at_patch > -1)\n', (2735, 2760), True, 'import numpy as np\n'), ((2841, 2876), 'numpy.empty', 'np.empty', (['(n_patches, 3)'], {'dtype': 'int'}), '((n_patches, 3), dtype=int)\n', (2849, 2876), True, 'import numpy as np\n'), ((2901, 2934), 'numpy.empty', 'np.empty', (['(n_links, 2)'], {'dtype': 'int'}), '((n_links, 2), dtype=int)\n', (2909, 2934), True, 'import numpy as np\n'), ((1819, 1852), 'numpy.asarray', 'np.asarray', (['nodes[0]'], {'dtype': 'float'}), '(nodes[0], dtype=float)\n', (1829, 1852), True, 'import numpy as np\n'), ((1880, 1913), 'numpy.asarray', 'np.asarray', (['nodes[1]'], {'dtype': 'float'}), '(nodes[1], dtype=float)\n', (1890, 1913), True, 'import numpy as np\n'), ((2222, 2256), 'numpy.ptp', 'np.ptp', (['delaunay.simplices'], {'axis': '(1)'}), '(delaunay.simplices, axis=1)\n', (2228, 2256), True, 'import numpy as np\n'), ((2295, 2337), 'numpy.where', 'np.where', (['(max_node_dist > max_node_spacing)'], {}), '(max_node_dist > max_node_spacing)\n', (2303, 2337), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 25 13:53:40 2017
@author: ratnadeepb
@License: MIT
"""
# System Imports
import numpy as np
import sys
# Local imports
from InnerProductSpaces.dot import dot
from InnerProductSpaces.norm import norm
def angle(u, v, op="radians"):
n_u = norm(u)
n_v = norm(v)
if op not in ("radians", "degrees"):
sys.exit("At this time we only handle radians and degrees")
# The angle does not exist if one of them is a zero vector
if n_u == 0 or n_v == 0:
return np.NaN
a = np.arccos(dot(u, v) / (norm(u) * norm(v)))
if op == "radians":
return a
else:
return a * (180 / np.pi)
if __name__ == "__main__":
# u = [6, 2]
u = (-3, 3)
# v = [1, 4]
v = (5, 5)
a_r = angle(u, v)
if np.isnan(a_r):
print("The angle does not exist")
else:
a_d = angle(u, v, "degrees")
print("The angle between u and v is {} radians".format(np.round(a_r,
decimals=4)))
print("This is the same as {} degrees".format(np.round(a_d,
decimals=4))) | [
"InnerProductSpaces.dot.dot",
"InnerProductSpaces.norm.norm",
"numpy.isnan",
"sys.exit",
"numpy.round"
] | [((313, 320), 'InnerProductSpaces.norm.norm', 'norm', (['u'], {}), '(u)\n', (317, 320), False, 'from InnerProductSpaces.norm import norm\n'), ((331, 338), 'InnerProductSpaces.norm.norm', 'norm', (['v'], {}), '(v)\n', (335, 338), False, 'from InnerProductSpaces.norm import norm\n'), ((839, 852), 'numpy.isnan', 'np.isnan', (['a_r'], {}), '(a_r)\n', (847, 852), True, 'import numpy as np\n'), ((393, 452), 'sys.exit', 'sys.exit', (['"""At this time we only handle radians and degrees"""'], {}), "('At this time we only handle radians and degrees')\n", (401, 452), False, 'import sys\n'), ((595, 604), 'InnerProductSpaces.dot.dot', 'dot', (['u', 'v'], {}), '(u, v)\n', (598, 604), False, 'from InnerProductSpaces.dot import dot\n'), ((608, 615), 'InnerProductSpaces.norm.norm', 'norm', (['u'], {}), '(u)\n', (612, 615), False, 'from InnerProductSpaces.norm import norm\n'), ((618, 625), 'InnerProductSpaces.norm.norm', 'norm', (['v'], {}), '(v)\n', (622, 625), False, 'from InnerProductSpaces.norm import norm\n'), ((1006, 1031), 'numpy.round', 'np.round', (['a_r'], {'decimals': '(4)'}), '(a_r, decimals=4)\n', (1014, 1031), True, 'import numpy as np\n'), ((1103, 1128), 'numpy.round', 'np.round', (['a_d'], {'decimals': '(4)'}), '(a_d, decimals=4)\n', (1111, 1128), True, 'import numpy as np\n')] |
from functools import lru_cache
import numpy as np
from .geometry import Circle, Point, Rectangle
def bbox_center(region):
"""Return the center of the bounding box of an scikit-image region.
Parameters
----------
region
A scikit-image region as calculated by skimage.measure.regionprops().
Returns
-------
point : :class:`~pylinac.core.geometry.Point`
"""
bbox = region.bbox
y = abs(bbox[0] - bbox[2]) / 2 + min(bbox[0], bbox[2])
x = abs(bbox[1] - bbox[3]) / 2 + min(bbox[1], bbox[3])
return Point(x, y)
class DiskROI(Circle):
"""An class representing a disk-shaped Region of Interest."""
def __init__(self, array, angle, roi_radius, dist_from_center, phantom_center):
"""
Parameters
----------
array : ndarray
The 2D array representing the image the disk is on.
angle : int, float
The angle of the ROI in degrees from the phantom center.
roi_radius : int, float
The radius of the ROI from the center of the phantom.
dist_from_center : int, float
The distance of the ROI from the phantom center.
phantom_center : tuple
The location of the phantom center.
"""
center = self._get_shifted_center(angle, dist_from_center, phantom_center)
super().__init__(center_point=center, radius=roi_radius)
self._array = array
@staticmethod
def _get_shifted_center(angle, dist_from_center, phantom_center):
"""The center of the ROI; corrects for phantom dislocation and roll."""
y_shift = np.sin(np.deg2rad(angle)) * dist_from_center
x_shift = np.cos(np.deg2rad(angle)) * dist_from_center
return Point(phantom_center.x + x_shift, phantom_center.y + y_shift)
@property
def pixel_value(self):
"""The median pixel value of the ROI."""
masked_img = self.circle_mask()
return np.nanmedian(masked_img)
@property
def std(self):
"""The standard deviation of the pixel values."""
masked_img = self.circle_mask()
return np.nanstd(masked_img)
@lru_cache(maxsize=1)
def circle_mask(self):
"""Return a mask of the image, only showing the circular ROI."""
# http://scikit-image.org/docs/dev/auto_examples/plot_camera_numpy.html
masked_array = np.copy(self._array).astype(np.float)
l_x, l_y = self._array.shape[0], self._array.shape[1]
X, Y = np.ogrid[:l_x, :l_y]
outer_disk_mask = (X - self.center.y) ** 2 + (Y - self.center.x) ** 2 > self.radius ** 2
masked_array[outer_disk_mask] = np.NaN
return masked_array
class LowContrastDiskROI(DiskROI):
"""A class for analyzing the low-contrast disks."""
def __init__(self, array, angle, roi_radius, dist_from_center, phantom_center, contrast_threshold=None, background=None,
cnr_threshold=None):
"""
Parameters
----------
contrast_threshold : float, int
The threshold for considering a bubble to be "seen".
"""
super().__init__(array, angle, roi_radius, dist_from_center, phantom_center)
self.contrast_threshold = contrast_threshold
self.cnr_threshold = cnr_threshold
self.background = background
@property
def contrast_to_noise(self):
"""The contrast to noise ratio of the bubble: (Signal - Background)/Stdev."""
return abs(self.pixel_value - self.background) / self.std
@property
def contrast(self):
"""The contrast of the bubble compared to background: (ROI - backg) / (ROI + backg)."""
return abs((self.pixel_value - self.background) / (self.pixel_value + self.background))
@property
def cnr_constant(self):
"""The contrast-to-noise value times the bubble diameter."""
return self.contrast_to_noise * self.diameter
@property
def contrast_constant(self):
"""The contrast value times the bubble diameter."""
return self.contrast * self.diameter
@property
def passed(self):
"""Whether the disk ROI contrast passed."""
return self.contrast > self.contrast_threshold
@property
def passed_contrast_constant(self):
"""Boolean specifying if ROI pixel value was within tolerance of the nominal value."""
return self.contrast_constant > self.contrast_threshold
@property
def passed_cnr_constant(self):
"""Boolean specifying if ROI pixel value was within tolerance of the nominal value."""
return self.cnr_constant > self.cnr_threshold
@property
def plot_color(self):
"""Return one of two colors depending on if ROI passed."""
return 'blue' if self.passed else 'red'
@property
def plot_color_constant(self):
"""Return one of two colors depending on if ROI passed."""
return 'blue' if self.passed_contrast_constant else 'red'
@property
def plot_color_cnr(self):
"""Return one of two colors depending on if ROI passed."""
return 'blue' if self.passed_cnr_constant else 'red'
class HighContrastDiskROI(DiskROI):
"""A class for analyzing the high-contrast disks."""
def __init__(self, array, angle, roi_radius, dist_from_center, phantom_center, contrast_threshold, mtf_norm=None):
"""
Parameters
----------
contrast_threshold : float, int
The threshold for considering a bubble to be "seen".
"""
super().__init__(array, angle, roi_radius, dist_from_center, phantom_center)
self.contrast_threshold = contrast_threshold
self.mtf_norm = mtf_norm
@property
def mtf(self):
"""The contrast of the bubble compared to background: (ROI - backg) / (ROI + backg)."""
mtf = (self.max - self.min) / (self.max + self.min)
if self.mtf_norm is not None:
mtf /= self.mtf_norm
return mtf
@property
def passed(self):
"""Boolean specifying if ROI pixel value was within tolerance of the nominal value."""
return self.mtf > self.contrast_threshold
@property
def plot_color(self):
"""Return one of two colors depending on if ROI passed."""
return 'blue' if self.passed else 'red'
@property
def max(self):
"""The max pixel value of the ROI."""
masked_img = self.circle_mask()
return np.nanmax(masked_img)
@property
def min(self):
"""The min pixel value of the ROI."""
masked_img = self.circle_mask()
return np.nanmin(masked_img)
class RectangleROI(Rectangle):
"""Class that represents a rectangular ROI."""
def __init__(self, array, width, height, angle, dist_from_center, phantom_center):
y_shift = np.sin(np.deg2rad(angle)) * dist_from_center
x_shift = np.cos(np.deg2rad(angle)) * dist_from_center
center = Point(phantom_center.x + x_shift, phantom_center.y + y_shift)
super().__init__(width, height, center, as_int=True)
self._array = array
@property
def pixel_array(self):
"""The pixel array within the ROI."""
return self._array[self.bl_corner.x:self.tr_corner.x, self.bl_corner.y:self.tr_corner.y] | [
"numpy.copy",
"numpy.nanstd",
"numpy.nanmedian",
"numpy.deg2rad",
"numpy.nanmax",
"numpy.nanmin",
"functools.lru_cache"
] | [((2158, 2178), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(1)'}), '(maxsize=1)\n', (2167, 2178), False, 'from functools import lru_cache\n'), ((1958, 1982), 'numpy.nanmedian', 'np.nanmedian', (['masked_img'], {}), '(masked_img)\n', (1970, 1982), True, 'import numpy as np\n'), ((2130, 2151), 'numpy.nanstd', 'np.nanstd', (['masked_img'], {}), '(masked_img)\n', (2139, 2151), True, 'import numpy as np\n'), ((6457, 6478), 'numpy.nanmax', 'np.nanmax', (['masked_img'], {}), '(masked_img)\n', (6466, 6478), True, 'import numpy as np\n'), ((6614, 6635), 'numpy.nanmin', 'np.nanmin', (['masked_img'], {}), '(masked_img)\n', (6623, 6635), True, 'import numpy as np\n'), ((1634, 1651), 'numpy.deg2rad', 'np.deg2rad', (['angle'], {}), '(angle)\n', (1644, 1651), True, 'import numpy as np\n'), ((1697, 1714), 'numpy.deg2rad', 'np.deg2rad', (['angle'], {}), '(angle)\n', (1707, 1714), True, 'import numpy as np\n'), ((2382, 2402), 'numpy.copy', 'np.copy', (['self._array'], {}), '(self._array)\n', (2389, 2402), True, 'import numpy as np\n'), ((6833, 6850), 'numpy.deg2rad', 'np.deg2rad', (['angle'], {}), '(angle)\n', (6843, 6850), True, 'import numpy as np\n'), ((6896, 6913), 'numpy.deg2rad', 'np.deg2rad', (['angle'], {}), '(angle)\n', (6906, 6913), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
###AWG
sys.path.append('/home/pulseepr/Sources/AWG/Examples/python')
###sys.path.append('/home/anatoly/AWG/spcm_examples/python')
##sys.path.append('/home/anatoly/awg_files/python')
#sys.path.append('C:/Users/User/Desktop/Examples/python')
import numpy as np
import atomize.device_modules.config.config_utils as cutil
import atomize.general_modules.general_functions as general
from pyspcm import *
from spcm_tools import *
class Spectrum_M4I_4450_X8:
def __init__(self):
#### Inizialization
# setting path to *.ini file
self.path_current_directory = os.path.dirname(__file__)
self.path_config_file = os.path.join(self.path_current_directory, 'config','Spectrum_M4I_4450_X8_config.ini')
# configuration data
#config = cutil.read_conf_util(self.path_config_file)
self.specific_parameters = cutil.read_specific_parameters(self.path_config_file)
# Channel assignments
#ch0 = self.specific_parameters['ch0'] # TRIGGER
self.timebase_dict = {'ms': 1000000, 'us': 1000, 'ns': 1, }
self.channel_dict = {'CH0': 0, 'CH1': 1, }
self.coupling_dict = {'DC': 0, 'AC': 1, }
self.impedance_dict = {'1 M': 0, '50': 1, }
self.sample_rate_list = [1907, 3814, 7629, 15258, 30517, 61035, 122070, 244140, 488281, 976562, \
1953125, 3906250, 7812500, 15625000, 31250000, 62500000, 125000000, \
250000000, 500000000]
self.hf_mode_range_list = [500, 1000, 2500, 5000]
self.buffered_mode_range_list = [200, 500, 1000, 2000, 5000, 10000]
# Limits and Ranges (depends on the exact model):
#clock = float(self.specific_parameters['clock'])
# Delays and restrictions
# MaxDACValue corresponds to the amplitude of the output signal; MaxDACValue - Amplitude and so on
# lMaxDACValue = int32 (0)
# spcm_dwGetParam_i32 (hCard, SPC_MIINST_MAXADCVALUE, byref(lMaxDACValue))
# lMaxDACValue.value = lMaxDACValue.value - 1
#maxCAD = 8191 # MaxCADValue of the AWG card - 1
#minCAD = -8192
self.amplitude_max = 2500 # mV
self.amplitude_min = 80 # mV
self.sample_rate_max = 500 # MHz
self.sample_rate_min = 0.001907 # MHz
self.sample_ref_clock_max = 100 # MHz
self.sample_ref_clock_min = 10 # MHz
self.averages_max = 100000
self.delay_max = 8589934576
self.delay_min = 0
# Test run parameters
# These values are returned by the modules in the test run
if len(sys.argv) > 1:
self.test_flag = sys.argv[1]
else:
self.test_flag = 'None'
if self.test_flag != 'test':
# Collect all parameters for digitizer settings
self.sample_rate = 500 # MHz
self.clock_mode = 1 # 1 is Internal; 32 is External
self.reference_clock = 100 # MHz
self.card_mode = 1 # 1 is Single; 2 is Average (Multi);
self.trigger_ch = 2 # 1 is Software; 2 is External
self.trigger_mode = 1 # 1 is Positive; 2 is Negative; 8 is High; 10 is Low
self.aver = 2 # 0 is infinity
self.delay = 0 # in sample rate; step is 32; rounded
self.channel = 3 # 1 is CH0; 2 is CH1; 3 is CH0 + CH1
self.points = 128 # number of points
self.posttrig_points = 64 # number of posttrigger points
self.input_mode = 1 # 1 is HF mode; 0 is Buffered
self.amplitude_0 = 500 # amlitude for CH0 in mV
self.amplitude_1 = 500 # amlitude for CH1 in mV
self.offset_0 = 0 # offset for CH0 in percentage
self.offset_1 = 0 # offset for CH1 in percentage
self.coupling_0 = 0 # coupling for CH0; AC is 1; DC is 0
self.coupling_1 = 0 # coupling for CH1
self.impedance_0 = 1 # impedance for CH0; 1 M is 0; 50 is 0
self.impedance_1 = 1 # impedance for CH1;
# change of settings
self.setting_change_count = 0
# state counter
self.state = 0
self.read = 0
# integration window
self.win_left = 0
self.win_right = 1
elif self.test_flag == 'test':
self.test_sample_rate = '500 MHz'
self.test_clock_mode = 'Internal'
self.test_ref_clock = 100
self.test_card_mode = 'Single'
self.test_trigger_ch = 'External'
self.test_trigger_mode = 'Positive'
self.test_averages = 10
self.test_delay = 0
self.test_channel = 'CH0'
self.test_amplitude = 'CH0: 500 mV; CH1: 500 mV'
self.test_num_segments = 1
self.test_points = 128
self.test_posttrig_points = 64
self.test_input_mode = 'HF'
self.test_offset = 'CH0: 10'
self.test_coupling = 'CH0: DC'
self.test_impedance = 'CH0: 50'
self.test_integral = 10**-9 # in V*s
# Collect all parameters for digitizer settings
self.sample_rate = 500
self.clock_mode = 1
self.reference_clock = 100
self.card_mode = 1
self.trigger_ch = 2
self.trigger_mode = 1
self.aver = 2
self.delay = 0
self.channel = 3
self.points = 128
self.posttrig_points = 64
self.input_mode = 1
self.amplitude_0 = 500
self.amplitude_1 = 500
self.offset_0 = 0
self.offset_1 = 0
self.coupling_0 = 0
self.coupling_1 = 0
self.impedance_0 = 1
self.impedance_1 = 1
# change of settings
self.setting_change_count = 0
# state counter
self.state = 0
self.read = 0
# integration window
self.win_left = 0
self.win_right = 1
# Module functions
def digitizer_name(self):
answer = 'Spectrum M4I.4450-X8'
return answer
def digitizer_setup(self):
"""
Write settings to the digitizer. No argument; No output
Everything except the buffer information will be write to the digitizer
This function should be called after all functions that change settings are called
"""
if self.test_flag != 'test':
if self.state == 0:
# open card
self.hCard = spcm_hOpen ( create_string_buffer (b'/dev/spcm1') )
self.state = 1
if self.hCard == None:
general.message("No card found...")
sys.exit()
else:
pass
spcm_dwSetParam_i32 (self.hCard, SPC_TIMEOUT, 10000)
# general parameters of the card; internal/external clock
if self.clock_mode == 1:
spcm_dwSetParam_i64 (self.hCard, SPC_SAMPLERATE, int( 1000000 * self.sample_rate ))
elif self.clock_mode == 32:
spcm_dwSetParam_i32 (self.hCard, SPC_CLOCKMODE, self.clock_mode)
spcm_dwSetParam_i64 (self.hCard, SPC_REFERENCECLOCK, MEGA(self.reference_clock))
spcm_dwSetParam_i64 (self.hCard, SPC_SAMPLERATE, int( 1000000 * self.sample_rate ) )
# change card mode and memory
if self.card_mode == 1:
spcm_dwSetParam_i32(self.hCard, SPC_CARDMODE, self.card_mode)
spcm_dwSetParam_i32(self.hCard, SPC_MEMSIZE, self.points)
spcm_dwSetParam_i32(self.hCard, SPC_POSTTRIGGER, self.posttrig_points)
elif self.card_mode == 2:
spcm_dwSetParam_i32(self.hCard, SPC_CARDMODE, self.card_mode)
spcm_dwSetParam_i32(self.hCard, SPC_MEMSIZE, int( self.points * self.aver ) )
# segment size should be multiple of memory size
spcm_dwSetParam_i32(self.hCard, SPC_SEGMENTSIZE, self.points )
spcm_dwSetParam_i32(self.hCard, SPC_POSTTRIGGER, self.posttrig_points)
# trigger
spcm_dwSetParam_i32(self.hCard, SPC_TRIG_TERM, 1) # 50 Ohm trigger load
spcm_dwSetParam_i32(self.hCard, SPC_TRIG_ORMASK, self.trigger_ch) # software / external
if self.trigger_ch == 2:
spcm_dwSetParam_i32(self.hCard, SPC_TRIG_EXT0_MODE, self.trigger_mode)
# loop
#spcm_dwSetParam_i32(self.hCard, SPC_LOOPS, self.loop)
# trigger delay
spcm_dwSetParam_i32( self.hCard, SPC_TRIG_DELAY, int(self.delay) )
# set the output channels
spcm_dwSetParam_i32 (self.hCard, SPC_PATH0, self.input_mode)
spcm_dwSetParam_i32 (self.hCard, SPC_PATH1, self.input_mode)
spcm_dwSetParam_i32 (self.hCard, SPC_CHENABLE, self.channel)
spcm_dwSetParam_i32 (self.hCard, SPC_AMP0, self.amplitude_0)
spcm_dwSetParam_i32 (self.hCard, SPC_AMP1, self.amplitude_1)
if ( self.amplitude_0 != 1000 or self.amplitude_0 != 10000 ) and self.input_mode == 0:
spcm_dwSetParam_i32 (self.hCard, SPC_OFFS0, -self.offset_0 )
spcm_dwSetParam_i32 (self.hCard, SPC_OFFS1, -self.offset_1 )
elif self.input_mode == 1:
spcm_dwSetParam_i32 (self.hCard, SPC_OFFS0, -self.offset_0 )
spcm_dwSetParam_i32 (self.hCard, SPC_OFFS1, -self.offset_1 )
spcm_dwSetParam_i32 (self.hCard, SPC_ACDC0, self.coupling_0)
spcm_dwSetParam_i32 (self.hCard, SPC_ACDC1, self.coupling_1)
# in HF mode impedance is fixed
if self.input_mode == 0:
spcm_dwSetParam_i32 (self.hCard, SPC_50OHM0, self.impedance_0 )
spcm_dwSetParam_i32 (self.hCard, SPC_50OHM1, self.impedance_1 )
# define the memory size / max amplitude
#llMemSamples = int64 (self.memsize)
#lBytesPerSample = int32(0)
#spcm_dwGetParam_i32 (hCard, SPC_MIINST_BYTESPERSAMPLE, byref(lBytesPerSample))
#lSetChannels = int32 (0)
#spcm_dwGetParam_i32 (hCard, SPC_CHCOUNT, byref (lSetChannels))
# The Spectrum driver also contains a register that holds the value of the decimal value of the full scale representation of the installed ADC. This
# value should be used when converting ADC values (in LSB) into real-world voltage values, because this register also automatically takes any
# specialities into account, such as slightly reduced ADC resolution with reserved codes for gain/offset compensation.
self.lMaxDACValue = int32 (0)
spcm_dwGetParam_i32 (self.hCard, SPC_MIINST_MAXADCVALUE, byref(self.lMaxDACValue))
#if lMaxDACValue.value == maxCAD:
# pass
#else:
# general.message('maxCAD value does not equal to lMaxDACValue.value')
# sys.exit()
if self.channel == 1 or self.channel == 2:
if self.card_mode == 1:
self.qwBufferSize = uint64 (self.points * 2 * 1) # in bytes. samples with 2 bytes each, one channel active
elif self.card_mode == 2:
self.qwBufferSize = uint64 (int( self.points * self.aver ) * 2 * 1)
elif self.channel == 3:
if self.card_mode == 1:
self.qwBufferSize = uint64 (self.points * 2 * 2) # in bytes. samples with 2 bytes each
elif self.card_mode == 2:
self.qwBufferSize = uint64 (int( self.points * self.aver ) * 2 * 2)
self.lNotifySize = int32 (0) # driver should notify program after all data has been transfered
spcm_dwSetParam_i32 (self.hCard, SPC_M2CMD, M2CMD_CARD_WRITESETUP)
elif self.test_flag == 'test':
# to run several important checks
#if self.setting_change_count == 1:
# if self.card_mode == 32768 and self.sequence_mode == 0:
# self.buf = self.define_buffer_single()[0]
# elif self.card_mode == 32768 and self.sequence_mode == 0:
# self.buf = self.define_buffer_single_joined()[0]
# elif self.card_mode == 512 and self.sequence_mode == 0:
# self.buf = self.define_buffer_multi()[0]
#else:
pass
def digitizer_get_curve(self, integral = False):
"""
Start digitizer. No argument; No output
Default settings:
Sample clock is 500 MHz; Clock mode is 'Internal'; Reference clock is 100 MHz; Card mode is 'Single';
Trigger channel is 'External'; Trigger mode is 'Positive'; Number of averages is 2; Trigger delay is 0;
Enabled channels is CH0 and CH1; Range of CH0 is '500 mV'; Range of CH1 is '500 mV';
Number of segments is 1; Number of points is 128 samples; Posttriger points is 64;
Input mode if 'HF'; Coupling of CH0 and CH1 are 'DC'; Impedance of CH0 and CH1 are '50';
Horizontal offset of CH0 and CH1 are 0%;
"""
if self.test_flag != 'test':
#spcm_dwSetParam_i32 (self.hCard, SPC_M2CMD, M2CMD_CARD_WRITESETUP)
# define the buffer
pvBuffer = c_void_p ()
pvBuffer = pvAllocMemPageAligned ( self.qwBufferSize.value )
# transfer
spcm_dwDefTransfer_i64 (self.hCard, SPCM_BUF_DATA, SPCM_DIR_CARDTOPC, self.lNotifySize, pvBuffer, uint64 (0), self.qwBufferSize)
# start card and DMA
spcm_dwSetParam_i32 (self.hCard, SPC_M2CMD, M2CMD_CARD_START | M2CMD_CARD_ENABLETRIGGER | M2CMD_DATA_STARTDMA)
# wait for acquisition
# dwError =
dwError = spcm_dwSetParam_i32 (self.hCard, SPC_M2CMD, M2CMD_CARD_WAITREADY | M2CMD_DATA_WAITDMA)
# timeout Error
if dwError == 263:
general.message('A timeout occurred while waiting. Probably the digitizer is not triggered')
self.digitizer_stop()
# this is the point to do anything with the data
lBitsPerSample = int32 (0)
spcm_dwGetParam_i32 (self.hCard, SPC_MIINST_BITSPERSAMPLE, byref (lBitsPerSample)) # lBitsPerSample.value = 14
pnData = cast (pvBuffer, ptr16) # cast to pointer to 16bit integer
if self.channel == 1 or self.channel == 2:
if self.card_mode == 1:
if self.channel == 1:
data = ( self.amplitude_0 / 1000) * np.ctypeslib.as_array(pnData, shape = (int( self.qwBufferSize.value / 2 ), )) / self.lMaxDACValue.value
elif self.channel == 2:
data = ( self.amplitude_1 / 1000) * np.ctypeslib.as_array(pnData, shape = (int( self.qwBufferSize.value / 2 ), )) / self.lMaxDACValue.value
xs = np.arange( len(data) ) / (self.sample_rate * 1000000)
return xs, data
elif self.card_mode == 2:
if integral == False:
if self.channel == 1:
data = ( self.amplitude_0 / 1000) * np.ctypeslib.as_array(pnData, \
shape = (int( self.qwBufferSize.value / 4 ), )).reshape((self.aver, self.points)) / self.lMaxDACValue.value
#data_ave = np.sum( data, axis = 0 ) / self.aver
data_ave = np.average( data, axis = 0 )
elif self.channel == 2:
data = ( self.amplitude_1 / 1000) * np.ctypeslib.as_array(pnData, \
shape = (int( self.qwBufferSize.value / 4 ), )).reshape((self.aver, self.points)) / self.lMaxDACValue.value
#data_ave = np.sum( data, axis = 0 ) / self.aver
data_ave = np.average( data, axis = 0 )
xs = np.arange( len(data_ave) ) / (self.sample_rate * 1000000)
return xs, data_ave
elif integral == True:
if self.read == 1:
if self.channel == 1:
data = ( self.amplitude_0 / 1000) * np.ctypeslib.as_array(pnData, \
shape = (int( self.qwBufferSize.value / 4 ), )).reshape((self.aver, self.points)) / self.lMaxDACValue.value
#data_ave = np.sum( data, axis = 0 ) / self.aver
data_ave = np.average( data, axis = 0 )
integ = np.sum( data_ave[self.win_left:self.win_right] ) * ( 10**(-6) / self.sample_rate )
# integral in V*s
elif self.channel == 2:
data = ( self.amplitude_1 / 1000) * np.ctypeslib.as_array(pnData, \
shape = (int( self.qwBufferSize.value / 4 ), )).reshape((self.aver, self.points)) / self.lMaxDACValue.value
#data_ave = np.sum( data, axis = 0 ) / self.aver
data_ave = np.average( data, axis = 0 )
integ = np.sum( data_ave[self.win_left:self.win_right] ) * ( 10**(-6) / self.sample_rate )
# integral in V*s
else:
if self.channel == 1:
integ = ( self.amplitude_0 / 1000) * np.sum( np.ctypeslib.as_array(pnData, \
shape = (int( self.qwBufferSize.value / 4 ), )) ) * ( 10**(-6) / self.sample_rate ) / ( self.lMaxDACValue.value * self.aver )
# integral in V*s
elif self.channel == 2:
integ = ( self.amplitude_1 / 1000) * np.sum( np.ctypeslib.as_array(pnData, \
shape = (int( self.qwBufferSize.value / 4 ), )) ) * ( 10**(-6) / self.sample_rate ) / ( self.lMaxDACValue.value * self.aver )
# integral in V*s
return integ
elif self.channel == 3:
if self.card_mode == 1:
data = np.ctypeslib.as_array(pnData, shape = (int( self.qwBufferSize.value / 2 ), ))
# / 1000 convertion in V
# CH0
data1 = ( data[0::2] * ( self.amplitude_0 / 1000) ) / self.lMaxDACValue.value
# CH1
data2 = ( data[1::2] * ( self.amplitude_1 / 1000) ) / self.lMaxDACValue.value
xs = np.arange( len(data1) ) / (self.sample_rate * 1000000)
return xs, data1, data2
elif self.card_mode == 2:
if integral == False:
data = np.ctypeslib.as_array(pnData, \
shape = (int( self.qwBufferSize.value / 2 ), )).reshape((self.aver, 2 * self.points))
#data_ave = np.sum( data, axis = 0 ) / self.aver
data_ave = np.average( data, axis = 0 )
# CH0
data1 = ( data_ave[0::2] * ( self.amplitude_0 / 1000) ) / self.lMaxDACValue.value
# CH1
data2 = ( data_ave[1::2] * ( self.amplitude_1 / 1000) ) / self.lMaxDACValue.value
xs = np.arange( len(data1) ) / (self.sample_rate * 1000000)
return xs, data1, data2
elif integral == True:
if self.read == 1:
data = np.ctypeslib.as_array(pnData, \
shape = (int( self.qwBufferSize.value / 2 ), )).reshape((self.aver, 2 * self.points))
#data_ave = np.sum( data, axis = 0 ) / self.aver
data_ave = np.average( data, axis = 0 )
# CH0
data1 = ( np.sum( data_ave[0::2][self.win_left:self.win_right] ) * ( 10**(-6) / self.sample_rate ) * ( self.amplitude_0 / 1000) ) / ( self.lMaxDACValue.value )
# CH1
data2 = ( np.sum( data_ave[1::2][self.win_left:self.win_right] ) * ( 10**(-6) / self.sample_rate ) * ( self.amplitude_1 / 1000) ) / ( self.lMaxDACValue.value )
return data1, data2
else:
data = np.ctypeslib.as_array(pnData, shape = (int( self.qwBufferSize.value / 2 ), ))
# CH0
data1 = ( np.sum( data[0::2] ) * ( 10**(-6) / self.sample_rate ) * ( self.amplitude_0 / 1000) ) / ( self.lMaxDACValue.value * self.aver )
# CH1
data2 = ( np.sum( data[1::2] ) * ( 10**(-6) / self.sample_rate ) * ( self.amplitude_1 / 1000) ) / ( self.lMaxDACValue.value * self.aver )
return data1, data2
#print( len(data) )
#xs = 2*np.arange( int(qwBufferSize.value / 4) )
# test or error message
#general.message(dwError)
# clean up
#spcm_vClose (hCard)
elif self.test_flag == 'test':
# CHECK FOR AVERAGE MODE
if self.card_mode == 1:
dummy = np.zeros( self.points )
elif self.card_mode == 2:
dummy = np.zeros( int( self.digitizer_number_of_points() ) )
#dummy = np.zeros( int( self.digitizer_window() ) )
if self.channel == 1 or self.channel == 2:
if integral == False:
return dummy, dummy
elif integral == True:
if self.card_mode == 1:
return dummy, dummy
elif self.card_mode == 2:
return self.test_integral
elif self.channel == 3:
if integral == False:
return dummy, dummy, dummy
elif integral == True:
if self.card_mode == 1:
return dummy, dummy, dummy
elif self.card_mode == 2:
return self.test_integral, self.test_integral
def digitizer_close(self):
"""
Close the digitizer. No argument; No output
"""
if self.test_flag != 'test':
# clean up
spcm_vClose ( self.hCard )
self.state == 0
elif self.test_flag == 'test':
pass
def digitizer_stop(self):
"""
Stop the digitizer. No argument; No output
"""
if self.test_flag != 'test':
# open card
#hCard = spcm_hOpen( create_string_buffer (b'/dev/spcm0') )
#if hCard == None:
# general.message("No card found...")
# sys.exit()
spcm_dwSetParam_i32 (self.hCard, SPC_M2CMD, M2CMD_CARD_STOP)
#general.message('Digitizer stopped')
elif self.test_flag == 'test':
pass
def digitizer_number_of_points(self, *points):
"""
Set or query number of points;
Input: digitizer_number_of_points(128); Number of points should be divisible by 16; 32 is the minimum
Default: 128;
Output: '128'
"""
if self.test_flag != 'test':
self.setting_change_count = 1
if len(points) == 1:
pnts = int(points[0])
if pnts < 32:
pnts = 32
general.message('Number of points must be more than 32')
if pnts % 16 != 0:
general.message('Number of points should be divisible by 16; The closest avalaibale number is used')
#self.points = int( 16*(pnts // 16) )
self.points = self.round_to_closest(pnts, 16)
else:
self.points = pnts
elif len(points) == 0:
return self.points
# to update on-the-fly
if self.state == 0:
pass
elif self.state == 1:
# change card mode and memory
if self.card_mode == 1:
spcm_dwSetParam_i32(self.hCard, SPC_MEMSIZE, self.points)
elif self.card_mode == 2:
spcm_dwSetParam_i32(self.hCard, SPC_MEMSIZE, int( self.points * self.aver ) )
spcm_dwSetParam_i32(self.hCard, SPC_SEGMENTSIZE, self.points )
# correct buffer size
if self.channel == 1 or self.channel == 2:
if self.card_mode == 1:
self.qwBufferSize = uint64 (self.points * 2 * 1) # in bytes. samples with 2 bytes each, one channel active
elif self.card_mode == 2:
self.qwBufferSize = uint64 (int( self.points * self.aver ) * 2 * 1)
elif self.channel == 3:
if self.card_mode == 1:
self.qwBufferSize = uint64 (self.points * 2 * 2) # in bytes. samples with 2 bytes each
elif self.card_mode == 2:
self.qwBufferSize = uint64 (int( self.points * self.aver ) * 2 * 2)
spcm_dwSetParam_i32 (self.hCard, SPC_M2CMD, M2CMD_CARD_WRITESETUP)
elif self.test_flag == 'test':
self.setting_change_count = 1
if len(points) == 1:
pnts = int(points[0])
assert( pnts >= 32 ), "Number of points must be more than 32"
if pnts % 16 != 0:
#general.message('Number of points should be divisible by 16; The closest avalaibale number is used')
#self.points = int( 16*(pnts // 16) )
self.points = self.round_to_closest(pnts, 16)
else:
self.points = pnts
elif len(points) == 0:
return self.test_points
else:
assert( 1 == 2 ), 'Incorrect argument'
def digitizer_posttrigger(self, *post_points):
"""
Set or query number of posttrigger points;
Input: digitizer_posttrigger(64); Number of points should be divisible by 16; 16 is the minimum
Default: 64;
Output: '64'
"""
if self.test_flag != 'test':
self.setting_change_count = 1
if len(post_points) == 1:
pnts = int(post_points[0])
if pnts < 16:
pnts = 16
general.message('Number of posttrigger points must be more than 16')
if pnts % 16 != 0:
general.message('Number of posttrigger points should be divisible by 16; The closest avalaibale number is used')
#self.posttrig_points = int( 16*(pnts // 16) )
self.posttrig_points = self.round_to_closest(pnts, 16)
else:
self.posttrig_points = pnts
if self.posttrig_points > self.points:
general.message('Number of posttrigger points should be less than number of points; The closest avalaibale number is used')
self.posttrig_points = self.points
elif len(post_points) == 0:
return self.posttrig_points
# to update on-the-fly
if self.state == 0:
pass
elif self.state == 1:
spcm_dwSetParam_i32(self.hCard, SPC_POSTTRIGGER, self.posttrig_points)
spcm_dwSetParam_i32 (self.hCard, SPC_M2CMD, M2CMD_CARD_WRITESETUP)
elif self.test_flag == 'test':
self.setting_change_count = 1
if len(post_points) == 1:
pnts = int(post_points[0])
assert( pnts >= 16 ), "Number of postrigger points must be more than 16"
if pnts % 16 != 0:
#general.message('Number of points should be divisible by 16; The closest avalaibale number is used')
#self.posttrig_points = int( 16*(pnts // 16) )
self.posttrig_points = self.round_to_closest(pnts, 16)
else:
self.posttrig_points = pnts
if self.posttrig_points >= ( self.points - 16 ):
general.message('Number of posttrigger points should be less than number of points - 16 samlpes; The closest avalaibale number is used')
self.posttrig_points = self.points - 16
elif len(post_points) == 0:
return self.test_posttrig_points
else:
assert( 1 == 2 ), 'Incorrect argument'
def digitizer_channel(self, *channel):
"""
Enable the specified channel or query enabled channels;
Input: digitizer_channel('CH0', 'CH1'); Channel is 'CH0' or 'CH1'
Default: both channels are enabled
Output: 'CH0'
"""
if self.test_flag != 'test':
self.setting_change_count = 1
if len(channel) == 1:
ch = str(channel[0])
if ch == 'CH0':
self.channel = 1
elif ch == 'CH1':
self.channel = 2
else:
general.message('Incorrect channel')
sys.exit()
elif len(channel) == 2:
ch1 = str(channel[0])
ch2 = str(channel[1])
if (ch1 == 'CH0' and ch2 == 'CH1') or (ch1 == 'CH1' and ch2 == 'CH0'):
self.channel = 3
else:
general.message('Incorrect channel; Channel should be CH0 or CH1')
sys.exit()
elif len(channel) == 0:
if self.channel == 1:
return 'CH0'
elif self.channel == 2:
return 'CH1'
elif self.channel == 3:
return 'CH0, CH1'
else:
general.message('Incorrect argument; Channel should be CH0 or CH1')
sys.exit()
elif self.test_flag == 'test':
self.setting_change_count = 1
if len(channel) == 1:
ch = str(channel[0])
assert( ch == 'CH0' or ch == 'CH1' ), 'Incorrect channel; Channel should be CH0 or CH1'
if ch == 'CH0':
self.channel = 1
elif ch == 'CH1':
self.channel = 2
elif len(channel) == 2:
ch1 = str(channel[0])
ch2 = str(channel[1])
assert( (ch1 == 'CH0' and ch2 == 'CH1') or (ch1 == 'CH1' and ch2 == 'CH0')), 'Incorrect channel; Channel should be CH0 or CH1'
if (ch1 == 'CH0' and ch2 == 'CH1') or (ch1 == 'CH1' and ch2 == 'CH0'):
self.channel = 3
elif len(channel) == 0:
return self.test_channel
else:
assert( 1 == 2 ), 'Incorrect argument'
def digitizer_sample_rate(self, *s_rate):
"""
Set or query sample rate; Range: 500 MHz - 1.907 kHz
Input: digitizer_sample_rate('500'); Sample rate is in MHz
Default: '500';
Output: '500 MHz'
"""
if self.test_flag != 'test':
self.setting_change_count = 1
if len(s_rate) == 1:
rate = 1000000 * int(s_rate[0])
if rate <= 1000000 * self.sample_rate_max and rate >= 1000000 * self.sample_rate_min:
closest_available = min(self.sample_rate_list, key = lambda x: abs(x - rate))
if int(closest_available) != rate:
general.message("Desired sample rate cannot be set, the nearest available value " + str(closest_available) + " is used")
self.sample_rate = closest_available / 1000000
else:
general.message('Incorrect sample rate; Should be 500 <= Rate <= 50')
sys.exit()
elif len(s_rate) == 0:
return str( self.sample_rate ) + ' MHz'
# to update on-the-fly
if self.state == 0:
pass
elif self.state == 1:
spcm_dwSetParam_i64 (self.hCard, SPC_SAMPLERATE, int( 1000000 * self.sample_rate ))
spcm_dwSetParam_i32 (self.hCard, SPC_M2CMD, M2CMD_CARD_WRITESETUP)
elif self.test_flag == 'test':
self.setting_change_count = 1
if len(s_rate) == 1:
rate = 1000000 * int(s_rate[0])
closest_available = min(self.sample_rate_list, key = lambda x: abs(x - rate))
assert(rate <= 1000000 * self.sample_rate_max and rate >= 1000000 * self.sample_rate_min), "Incorrect sample rate; Should be 500 MHz <= Rate <= 0.001907 MHz"
self.sample_rate = closest_available / 1000000
elif len(s_rate) == 0:
return self.test_sample_rate
else:
assert( 1 == 2 ), 'Incorrect argument'
def digitizer_clock_mode(self, *mode):
"""
Set or query clock mode; the driver needs to know the external fed in frequency
Input: digitizer_clock_mode('Internal'); Clock mode is 'Internal' or 'External'
Default: 'Internal';
Output: 'Internal'
"""
if self.test_flag != 'test':
self.setting_change_count = 1
if len(mode) == 1:
md = str(mode[0])
if md == 'Internal':
self.clock_mode = 1
elif md == 'External':
self.clock_mode = 32
else:
general.message('Incorrect clock mode; Only Internal and External modes are available')
sys.exit()
elif len(mode) == 0:
if self.clock_mode == 1:
return 'Internal'
elif self.clock_mode == 32:
return 'External'
elif self.test_flag == 'test':
self.setting_change_count = 1
if len(mode) == 1:
md = str(mode[0])
assert(md == 'Internal' or md == 'External'), "Incorrect clock mode; Only Internal and External modes are available"
if md == 'Internal':
self.clock_mode = 1
elif md == 'External':
self.clock_mode = 32
elif len(mode) == 0:
return self.test_clock_mode
else:
assert( 1 == 2 ), 'Incorrect argument'
def digitizer_reference_clock(self, *ref_clock):
"""
Set or query reference clock; the driver needs to know the external fed in frequency
Input: digitizer_reference_clock(100); Reference clock is in MHz; Range: 10 - 100
Default: '100';
Output: '200 MHz'
"""
if self.test_flag != 'test':
self.setting_change_count = 1
if len(ref_clock) == 1:
rate = int(ref_clock[0])
if rate <= self.sample_ref_clock_max and rate >= self.sample_ref_clock_min:
self.reference_clock = rate
else:
general.message('Incorrect reference clock; Should be 100 MHz <= Clock <= 10 MHz')
sys.exit()
elif len(ref_clock) == 0:
return str(self.reference_clock) + ' MHz'
elif self.test_flag == 'test':
self.setting_change_count = 1
if len(ref_clock) == 1:
rate = int(ref_clock[0])
assert(rate <= self.sample_ref_clock_max and rate >= self.sample_ref_clock_min), "Incorrect reference clock; Should be 100 MHz <= Clock <= 10 MHz"
self.reference_clock = rate
elif len(ref_clock) == 0:
return self.test_ref_clock
else:
assert( 1 == 2 ), 'Incorrect argument'
def digitizer_card_mode(self, *mode):
"""
Set or query digitizer mode;
'Single' is "Data acquisition to on-board memory for one single trigger event."
"Average" is "The memory is segmented and with each trigger condition a predefined number of samples, a
segment, is acquired."
Input: digitizer_card_mode('Single'); Card mode is 'Single'; 'Average';
Default: 'Single';
Output: 'Single'
"""
if self.test_flag != 'test':
self.setting_change_count = 1
if len(mode) == 1:
md = str(mode[0])
if md == 'Single':
self.card_mode = 1
elif md == 'Average':
self.card_mode = 2
else:
general.message('Incorrect card mode; Only Single and Average modes are available')
sys.exit()
elif len(mode) == 0:
if self.card_mode == 1:
return 'Single'
elif self.card_mode == 2:
return 'Average'
elif self.test_flag == 'test':
self.setting_change_count = 1
if len(mode) == 1:
md = str(mode[0])
assert(md == 'Single' or md == 'Average'), "Incorrect card mode; Only Single and Average modes are available"
if md == 'Single':
self.card_mode = 1
elif md == 'Average':
self.card_mode = 2
elif len(mode) == 0:
return self.test_card_mode
else:
assert( 1 == 2 ), 'Incorrect argument'
def digitizer_trigger_channel(self, *ch):
"""
Set or query trigger channel;
Input: digitizer_trigger_channel('Software'); Trigger channel is 'Software'; 'External'
Default: 'External';
Output: 'Software'
"""
if self.test_flag != 'test':
self.setting_change_count = 1
if len(ch) == 1:
md = str(ch[0])
if md == 'Software':
self.trigger_ch = 1
elif md == 'External':
self.trigger_ch = 2
else:
general.message('Incorrect trigger channel; Only Software and External modes are available')
sys.exit()
elif len(ch) == 0:
if self.trigger_ch == 1:
return 'Software'
elif self.trigger_ch == 2:
return 'External'
elif self.test_flag == 'test':
self.setting_change_count = 1
if len(ch) == 1:
md = str(ch[0])
assert(md == 'Software' or md == 'External'), "Incorrect trigger channel; Only Software and External modes are available"
if md == 'Software':
self.trigger_ch = 1
elif md == 'External':
self.trigger_ch = 2
elif len(ch) == 0:
return self.test_trigger_ch
else:
assert( 1 == 2 ), 'Incorrect argument'
def digitizer_trigger_mode(self, *mode):
"""
Set or query trigger mode;
Input: digitizer_trigger_mode('Positive'); Trigger mode is 'Positive'; 'Negative'; 'High'; 'Low'
Default: 'Positive';
Output: 'Positive'
"""
if self.test_flag != 'test':
self.setting_change_count = 1
if len(mode) == 1:
md = str(mode[0])
if md == 'Positive':
self.trigger_mode = 1
elif md == 'Negative':
self.trigger_mode = 2
elif md == 'High':
self.trigger_mode = 8
elif md == 'Low':
self.trigger_mode = 10
else:
general.message("Incorrect trigger mode; Only Positive, Negative, High, and Low are available")
sys.exit()
elif len(mode) == 0:
if self.trigger_mode == 1:
return 'Positive'
elif self.trigger_mode == 2:
return 'Negative'
elif self.trigger_mode == 8:
return 'High'
elif self.trigger_mode == 10:
return 'Low'
elif self.test_flag == 'test':
self.setting_change_count = 1
if len(mode) == 1:
md = str(mode[0])
assert(md == 'Positive' or md == 'Negative' or md == 'High' or md == 'Low'), "Incorrect trigger mode; \
Only Positive, Negative, High, and Low are available"
if md == 'Positive':
self.trigger_mode = 1
elif md == 'Negative':
self.trigger_mode = 2
elif md == 'High':
self.trigger_mode = 8
elif md == 'Low':
self.trigger_mode = 10
elif len(mode) == 0:
return self.test_trigger_mode
else:
assert( 1 == 2 ), 'Incorrect argument'
def digitizer_number_of_averages(self, *averages):
"""
Set or query number of averages;
Input: digitizer_number_of_averages(10); Number of averages from 1 to 10000; 0 is infinite averages
Default: 2;
Output: '100'
"""
if self.test_flag != 'test':
self.setting_change_count = 1
if len(averages) == 1:
ave = int(averages[0])
self.aver = ave
elif len(averages) == 0:
return self.aver
# to update on-the-fly
if self.state == 0:
pass
elif self.state == 1:
# change card mode and memory
if self.card_mode == 2:
spcm_dwSetParam_i32(self.hCard, SPC_MEMSIZE, int( self.points * self.aver ) )
#spcm_dwSetParam_i32(self.hCard, SPC_SEGMENTSIZE, self.points )
# correct buffer size
if self.channel == 1 or self.channel == 2:
if self.card_mode == 2:
self.qwBufferSize = uint64 (int( self.points * self.aver ) * 2 * 1)
elif self.channel == 3:
if self.card_mode == 2:
self.qwBufferSize = uint64 (int( self.points * self.aver ) * 2 * 2)
spcm_dwSetParam_i32 (self.hCard, SPC_M2CMD, M2CMD_CARD_WRITESETUP)
elif self.test_flag == 'test':
self.setting_change_count = 1
if len(averages) == 1:
ave = int(averages[0])
assert( ave >= 1 and ave <= self.averages_max ), "Incorrect number of averages; Should be 1 <= Averages <= 10000"
self.aver = ave
elif len(aver) == 0:
return self.test_averages
else:
assert( 1 == 2 ), 'Incorrect argument'
def digitizer_trigger_delay(self, *delay):
"""
Set or query trigger delay;
Input: digitizer_trigger_delay('100 ns'); delay in [ms, us, ns]
Step is 16 sample clock; will be rounded if input is not divisible by 16 sample clock
Default: 0 ns;
Output: '100 ns'
"""
if self.test_flag != 'test':
self.setting_change_count = 1
if len(delay) == 1:
temp = delay[0].split(' ')
delay_num = int(temp[0])
dimen = str(temp[1])
if dimen in self.timebase_dict:
flag = self.timebase_dict[dimen]
# trigger delay in samples; maximum is 8589934576, step is 16
del_in_sample = int( delay_num*flag*self.sample_rate / 1000 )
if del_in_sample % 16 != 0:
#self.delay = int( 16*(del_in_sample // 16) )
self.delay = self.round_to_closest(del_in_sample, 16)
general.message('Delay should be divisible by 16 samples (32 ns at 500 MHz); The closest avalaibale number ' + str( self.delay * 1000 / self.sample_rate) + ' ns is used')
else:
self.delay = del_in_sample
else:
general.message('Incorrect delay dimension; Should be ns, us or ms')
sys.exit()
elif len(delay) == 0:
return str(self.delay / self.sample_rate * 1000) + ' ns'
elif self.test_flag == 'test':
self.setting_change_count = 1
if len(delay) == 1:
temp = delay[0].split(' ')
delay_num = int(temp[0])
dimen = str(temp[1])
assert( dimen in self.timebase_dict), 'Incorrect delay dimension; Should be ns, us or ms'
flag = self.timebase_dict[dimen]
# trigger delay in samples; maximum is 8589934576, step is 16
del_in_sample = int( delay_num*flag*self.sample_rate / 1000 )
if del_in_sample % 16 != 0:
#self.delay = int( 16*(del_in_sample // 16) )
self.delay = self.round_to_closest(del_in_sample, 16)
else:
self.delay = del_in_sample
assert(self.delay >= self.delay_min and self.delay <= self.delay_max), 'Incorrect delay; Should be 0 <= Delay <= 8589934560 samples'
elif len(delay) == 0:
return self.test_delay
else:
assert( 1 == 2 ), 'Incorrect argument'
def digitizer_input_mode(self, *mode):
"""
Set or query input mode;
Input: digitizer_input_mode('HF'); Input mode is 'HF'; 'Buffered'.
HF mode allows using a high frequency 50 ohm path to have full bandwidth and best dynamic performance.
Buffered mode allows using a buffered path with all features but limited bandwidth and dynamic performance.
The specified input mode will be used for both channels.
Default: 'HF';
Output: 'Buffered'
"""
if self.test_flag != 'test':
self.setting_change_count = 1
if len(mode) == 1:
md = str(mode[0])
if md == 'Buffered':
self.input_mode = 0
elif md == 'HF':
self.input_mode = 1
else:
general.message("Incorrect input mode; Only HF and Buffered are available")
sys.exit()
elif len(mode) == 0:
if self.input_mode == 0:
return 'Buffered'
elif self.input_mode == 1:
return 'HF'
elif self.test_flag == 'test':
self.setting_change_count = 1
if len(mode) == 1:
md = str(mode[0])
assert(md == 'Buffered' or md == 'HF'), "Incorrect input mode; Only HF and Buffered are available"
if md == 'Buffered':
self.input_mode = 0
elif md == 'HF':
self.input_mode = 1
elif len(mode) == 0:
return self.test_input_mode
else:
assert( 1 == 2 ), 'Incorrect argument'
def digitizer_amplitude(self, *ampl):
"""
Set or query range of the channels in mV;
Input: digitizer_amplitude(500);
Buffered range is [200, 500, 1000, 2000, 5000, 10000]
HF range is [500, 1000, 25200, 5000]
The specified range will be used for both channels.
Default: '500';
Output: 'CH0: 500 mV; CH1: 500 mV'
"""
if self.test_flag != 'test':
self.setting_change_count = 1
if len(ampl) == 1:
amp = int(ampl[0])
if self.input_mode == 0: # Buffered
closest_available = min(self.buffered_mode_range_list, key = lambda x: abs(x - amp))
if closest_available != amp:
general.message("Desired amplitude cannot be set, the nearest available value " + str(closest_available) + " mV is used")
self.amplitude_0 = closest_available
self.amplitude_1 = closest_available
elif self.input_mode == 1: # HF
closest_available = min(self.hf_mode_range_list, key = lambda x: abs(x - amp))
if closest_available != amp:
general.message("Desired amplitude cannot be set, the nearest available value " + str(closest_available) + " mV is used")
self.amplitude_0 = closest_available
self.amplitude_1 = closest_available
else:
general.message('Incorrect amplitude or input mode')
sys.exit()
elif len(ampl) == 0:
return 'CH0: ' + str(self.amplitude_0) + ' mV; ' + 'CH1: ' + str(self.amplitude_1) + ' mV'
# to update on-the-fly
if self.state == 0:
pass
elif self.state == 1:
spcm_dwGetParam_i32 (self.hCard, SPC_MIINST_MAXADCVALUE, byref(self.lMaxDACValue))
spcm_dwSetParam_i32 (self.hCard, SPC_AMP0, self.amplitude_0)
spcm_dwSetParam_i32 (self.hCard, SPC_AMP1, self.amplitude_1)
spcm_dwSetParam_i32 (self.hCard, SPC_M2CMD, M2CMD_CARD_WRITESETUP)
elif self.test_flag == 'test':
self.setting_change_count = 1
if len(ampl) == 1:
amp = int(ampl[0])
if self.input_mode == 0: # Buffered
closest_available = min(self.buffered_mode_range_list, key = lambda x: abs(x - amp))
if closest_available != amp:
general.message("Desired amplitude cannot be set, the nearest available value " + str(closest_available) + " mV is used")
self.amplitude_0 = closest_available
self.amplitude_1 = closest_available
elif self.input_mode == 1: # HF
closest_available = min(self.hf_mode_range_list, key = lambda x: abs(x - amp))
if closest_available != amp:
general.message("Desired amplitude cannot be set, the nearest available value " + str(closest_available) + " mV is used")
self.amplitude_0 = closest_available
self.amplitude_1 = closest_available
else:
assert( 1 == 2), 'Incorrect amplitude or input mode'
elif len(ampl) == 0:
return self.test_amplitude
def digitizer_offset(self, *offset):
"""
Set or query offset of the channels as a percentage of range;
The value of the offset (range * percentage) is ALWAYS substracted from the signal
No offset can be used for 1000 mV and 10000 mV range in Buffered mode
Input: digitizer_offset('CH0', '1', 'CH1', '50')
Default: '0'; '0'
Output: 'CH0: 10'
"""
if self.test_flag != 'test':
self.setting_change_count = 1
if self.input_mode == 0:
if self.amplitude_0 == 1000 or self.amplitude_0 == 10000:
general.message("No offset can be used for 1000 mV and 10000 mV range in Buffered mode")
sys.exit()
elif self.amplitude_1 == 1000 or self.amplitude_1 == 10000:
general.message("No offset can be used for 1000 mV and 10000 mV range in Buffered mode")
sys.exit()
if len(offset) == 2:
ch = str(offset[0])
ofst = int(offset[1])
if ch == 'CH0':
self.offset_0 = ofst
elif ch == 'CH1':
self.offset_1 = ofst
elif len(offset) == 4:
ch1 = str(offset[0])
ofst1 = int(offset[1])
ch2 = str(offset[2])
ofst2 = int(offset[3])
if ch1 == 'CH0':
self.offset_0 = ofst1
elif ch1 == 'CH1':
self.offset_1 = ofst1
if ch2 == 'CH0':
self.offset_0 = ofst2
elif ch2 == 'CH1':
self.offset_1 = ofst2
elif len(offset) == 1:
ch = str(offset[0])
if ch == 'CH0':
return 'CH0: ' + str(self.offset_0)
elif ch == 'CH1':
return 'CH1: ' + str(self.offset_1)
# to update on-the-fly
if self.state == 0:
pass
elif self.state == 1:
if ( self.amplitude_0 != 1000 or self.amplitude_0 != 10000 ) and self.input_mode == 0:
spcm_dwSetParam_i32 (self.hCard, SPC_OFFS0, -self.offset_0 )
spcm_dwSetParam_i32 (self.hCard, SPC_OFFS1, -self.offset_1 )
elif self.input_mode == 1:
spcm_dwSetParam_i32 (self.hCard, SPC_OFFS0, -self.offset_0 )
spcm_dwSetParam_i32 (self.hCard, SPC_OFFS1, -self.offset_1 )
spcm_dwSetParam_i32 (self.hCard, SPC_M2CMD, M2CMD_CARD_WRITESETUP)
elif self.test_flag == 'test':
self.setting_change_count = 1
if self.input_mode == 0:
assert(self.amplitude_0 != 1000 or self.amplitude_0 != 10000 ), "No offset can be used for 1000 mV and 10000 mV range in Buffered mode"
assert(self.amplitude_1 != 1000 or self.amplitude_1 != 10000 ), "No offset can be used for 1000 mV and 10000 mV range in Buffered mode"
if len(offset) == 2:
ch = str(offset[0])
ofst = int(offset[1])
assert(ch == 'CH0' or ch == 'CH1'), "Incorrect channel; Should be CH0 or CH1"
assert( ofst >= 0 and ofst <= 100 ), "Incorrect offset percentage; Should be 0 <= offset <= 100"
if ch == 'CH0':
self.offset_0 = ofst
elif ch == 'CH1':
self.offset_1 = ofst
elif len(offset) == 4:
ch1 = str(offset[0])
ofst1 = int(offset[1])
ch2 = str(offset[2])
ofst2 = int(offset[3])
assert(ch1 == 'CH0' or ch1 == 'CH1'), "Incorrect channel 1; Should be CH0 or CH1"
assert( ofst1 >= 0 and ofst1 <= 100 ), "Incorrect offset percentage 1; Should be 0 <= offset <= 100"
assert(ch2 == 'CH0' or ch2 == 'CH1'), "Incorrect channel 2; Should be CH0 or CH1"
assert( ofst2 >= 0 and ofst2 <= 100 ), "Incorrect offset percentage 2; Should be 0 <= offset <= 100"
if ch1 == 'CH0':
self.offset_0 = ofst1
elif ch1 == 'CH1':
self.offset_1 = ofst1
if ch2 == 'CH0':
self.offset_0 = ofst2
elif ch2 == 'CH1':
self.offset_1 = ofst2
elif len(offset) == 1:
ch1 = str(offset[0])
assert(ch1 == 'CH0' or ch1 == 'CH1'), "Incorrect channel; Should be CH0 or CH1"
return self.test_offset
else:
assert( 1 == 2 ), 'Incorrect arguments'
def digitizer_coupling(self, *coupling):
"""
Set or query coupling of the channels; Two options are available: [AC, DC]
Input: digitizer_coupling('CH0', 'AC', 'CH1', 'DC')
Default: 'DC'; 'DC'
Output: 'CH0: AC'
"""
if self.test_flag != 'test':
self.setting_change_count = 1
if len(coupling) == 2:
ch = str(coupling[0])
cplng = str(coupling[1])
flag = self.coupling_dict[cplng]
if ch == 'CH0':
self.coupling_0 = flag
elif ch == 'CH1':
self.coupling_1 = flag
elif len(coupling) == 4:
ch1 = str(coupling[0])
cplng1 = str(coupling[1])
flag1 = self.coupling_dict[cplng1]
ch2 = str(coupling[2])
cplng2 = str(coupling[3])
flag2 = self.coupling_dict[cplng2]
if ch1 == 'CH0':
self.coupling_0 = flag1
elif ch1 == 'CH1':
self.coupling_1 = flag1
if ch2 == 'CH0':
self.coupling_0 = flag2
elif ch2 == 'CH1':
self.coupling_1 = flag2
elif len(coupling) == 1:
ch = str(coupling[0])
if ch == 'CH0':
return 'CH0: ' + str(self.coupling_0)
elif ch == 'CH1':
return 'CH1: ' + str(self.coupling_1)
elif self.test_flag == 'test':
self.setting_change_count = 1
if len(coupling) == 2:
ch = str(coupling[0])
cplng = str(coupling[1])
assert(ch == 'CH0' or ch == 'CH1'), "Incorrect channel; Should be CH0 or CH1"
assert( cplng in self.coupling_dict ), "Incorrect coupling; Only DC and AC are available"
flag = self.coupling_dict[cplng]
if ch == 'CH0':
self.coupling_0 = flag
elif ch == 'CH1':
self.coupling_1 = flag
elif len(coupling) == 4:
ch1 = str(coupling[0])
cplng1 = str(coupling[1])
ch2 = str(coupling[2])
cplng2 = str(coupling[3])
assert(ch1 == 'CH0' or ch1 == 'CH1'), "Incorrect channel 1; Should be CH0 or CH1"
assert( cplng1 in self.coupling_dict ), "Incorrect coupling 1; Only DC and AC are available"
flag1 = self.coupling_dict[cplng1]
assert(ch2 == 'CH0' or ch2 == 'CH1'), "Incorrect channel 2; Should be CH0 or CH1"
assert( cplng2 in self.coupling_dict ), "Incorrect coupling 2; Only DC and AC are available"
flag2 = self.coupling_dict[cplng2]
if ch1 == 'CH0':
self.coupling_0 = flag1
elif ch1 == 'CH1':
self.coupling_1 = flag1
if ch2 == 'CH0':
self.coupling_0 = flag2
elif ch2 == 'CH1':
self.coupling_1 = flag2
elif len(coupling) == 1:
ch1 = str(coupling[0])
assert(ch1 == 'CH0' or ch1 == 'CH1'), "Incorrect channel; Should be CH0 or CH1"
return self.test_coupling
else:
assert( 1 == 2 ), 'Incorrect arguments'
def digitizer_impedance(self, *impedance):
"""
Set or query impedance of the channels in buffered mode; Two options are available: [1 M, 50]
In the HF mode impedance is fixed at 50 ohm
Input: digitizer_coupling('CH0', '50', 'CH1', '50')
Default: '50'; '50'
Output: 'CH0: 50'
"""
if self.test_flag != 'test':
self.setting_change_count = 1
if self.input_mode == 1:
general.message("Impedance is fixed at 50 Ohm in HF mode")
sys.exit()
if len(impedance) == 2:
ch = str(impedance[0])
imp = str(impedance[1])
flag = self.impedance_dict[imp]
if ch == 'CH0':
self.impedance_0 = flag
elif ch == 'CH1':
self.impedance_1 = flag
elif len(impedance) == 4:
ch1 = str(impedance[0])
imp1 = str(impedance[1])
flag1 = self.impedance_dict[imp1]
ch2 = str(impedance[2])
imp2 = str(impedance[3])
flag2 = self.impedance_dict[imp2]
if ch1 == 'CH0':
self.impedance_0 = flag1
elif ch1 == 'CH1':
self.impedance_1 = flag1
if ch2 == 'CH0':
self.impedance_0 = flag2
elif ch2 == 'CH1':
self.impedance_1 = flag2
elif len(impedance) == 1:
ch = str(impedance[0])
if ch == 'CH0':
return 'CH0: ' + str(self.impedance_0)
elif ch == 'CH1':
return 'CH1: ' + str(self.impedance_1)
elif self.test_flag == 'test':
self.setting_change_count = 1
if self.input_mode == 1:
assert( 1 == 2 ), "Impedance is fixed at 50 Ohm in HF mode"
if len(impedance) == 2:
ch = str(impedance[0])
imp = str(impedance[1])
assert(ch == 'CH0' or ch == 'CH1'), "Incorrect channel; Should be CH0 or CH1"
assert( imp in self.impedance_dict ), "Incorrect impedance; Only 1 M and 50 are available"
flag = self.impedance_dict[imp]
if ch == 'CH0':
self.impedance_0 = flag
elif ch == 'CH1':
self.impedance_1 = flag
elif len(impedance) == 4:
ch1 = str(impedance[0])
imp1 = str(impedance[1])
ch2 = str(impedance[2])
imp2 = str(impedance[3])
assert(ch1 == 'CH0' or ch1 == 'CH1'), "Incorrect channel 1; Should be CH0 or CH1"
assert( imp1 in self.impedance_dict ), "Incorrect impedance 1; Only 1 M and 50 are available"
flag1 = self.impedance_dict[imp1]
assert(ch2 == 'CH0' or ch2 == 'CH1'), "Incorrect channel 2; Should be CH0 or CH1"
assert( imp2 in self.impedance_dict ), "Incorrect impedance 2; Only 1 M and 50 are available"
flag2 = self.impedance_dict[imp2]
if ch1 == 'CH0':
self.impedance_0 = flag1
elif ch1 == 'CH1':
self.impedance_1 = flag1
if ch2 == 'CH0':
self.impedance_0 = flag2
elif ch2 == 'CH1':
self.impedance_1 = flag2
elif len(impedance) == 1:
ch1 = str(impedance[0])
assert(ch1 == 'CH0' or ch1 == 'CH1'), "Incorrect channel; Should be CH0 or CH1"
return self.test_impedance
else:
assert( 1 == 2 ), 'Incorrect arguments'
# UNDOCUMENTED
def digitizer_window(self):
"""
Special function for reading integration window
"""
return ( self.win_right - self.win_left ) * 1000 / self.sample_rate
def digitizer_read_settings(self):
"""
Special function for reading settings of the digitizer from the special file
"""
if self.test_flag != 'test':
self.read = 1
self.digitizer_card_mode('Average')
self.digitizer_clock_mode('External')
self.digitizer_reference_clock(100)
path_to_main = os.path.abspath( os.getcwd() )
path_file = os.path.join(path_to_main, 'atomize/control_center/digitizer.param')
#path_file = os.path.join(path_to_main, 'digitizer.param')
file_to_read = open(path_file, 'r')
text_from_file = file_to_read.read().split('\n')
# ['Points: 224', 'Sample Rate: 250', 'Posstriger: 16', 'Range: 500', 'CH0 Offset: 0', 'CH1 Offset: 0',
# 'Window Left: 0', 'Window Right: 0', '']
self.points = int( text_from_file[0].split(' ')[1] )
#self.digitizer_number_of_points( points )
self.sample_rate = int( text_from_file[1].split(' ')[2] )
#self.digitizer_sample_rate( sample_rate )
self.posttrig_points = int( text_from_file[2].split(' ')[1] )
#self.digitizer_posttrigger( posttrigger )
self.amplitude_0 = int( text_from_file[3].split(' ')[1] )
self.amplitude_1 = int( text_from_file[3].split(' ')[1] )
#self.digitizer_amplitude( amplitude )
self.offset_0 = int( text_from_file[4].split(' ')[2] )
self.offset_1 = int( text_from_file[5].split(' ')[2] )
#self.digitizer_offset('CH0', ch0_offset, 'CH1', ch1_offset)
self.win_left = int( text_from_file[6].split(' ')[2] )
self.win_right = 1 + int( text_from_file[7].split(' ')[2] )
self.digitizer_setup()
elif self.test_flag == 'test':
self.read = 1
self.digitizer_card_mode('Average')
self.digitizer_clock_mode('External')
self.digitizer_reference_clock(100)
path_to_main = os.path.abspath( os.getcwd() )
path_file = os.path.join(path_to_main, 'atomize/control_center/digitizer.param')
#path_file = os.path.join(path_to_main, 'digitizer.param')
file_to_read = open(path_file, 'r')
text_from_file = file_to_read.read().split('\n')
# ['Points: 224', 'Sample Rate: 250', 'Posstriger: 16', 'Range: 500', 'CH0 Offset: 0', 'CH1 Offset: 0',
# 'Window Left: 0', 'Window Right: 0', '']
points = int( text_from_file[0].split(' ')[1] )
self.digitizer_number_of_points( points )
sample_rate = int( text_from_file[1].split(' ')[2] )
self.digitizer_sample_rate( sample_rate )
posttrigger = int( text_from_file[2].split(' ')[1] )
self.digitizer_posttrigger( posttrigger )
amplitude = int( text_from_file[3].split(' ')[1] )
self.digitizer_amplitude( amplitude )
ch0_offset = int( text_from_file[4].split(' ')[2] )
ch1_offset = int( text_from_file[5].split(' ')[2] )
self.digitizer_offset('CH0', ch0_offset, 'CH1', ch1_offset)
self.win_left = int( text_from_file[6].split(' ')[2] )
self.win_right = 1 + int( text_from_file[7].split(' ')[2] )
# Auxilary functions
def round_to_closest(self, x, y):
"""
A function to round x to divisible by y
"""
#temp = int( 16*(x // 16) )
#if temp < x:
# temp = temp + 16
return int( y * ( ( x // y) + (x % y > 0) ) )
def main():
pass
if __name__ == "__main__":
main()
| [
"numpy.average",
"os.path.join",
"os.getcwd",
"os.path.dirname",
"numpy.zeros",
"numpy.sum",
"atomize.general_modules.general_functions.message",
"sys.exit",
"sys.path.append",
"atomize.device_modules.config.config_utils.read_specific_parameters"
] | [((76, 137), 'sys.path.append', 'sys.path.append', (['"""/home/pulseepr/Sources/AWG/Examples/python"""'], {}), "('/home/pulseepr/Sources/AWG/Examples/python')\n", (91, 137), False, 'import sys\n'), ((651, 676), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (666, 676), False, 'import os\n'), ((709, 799), 'os.path.join', 'os.path.join', (['self.path_current_directory', '"""config"""', '"""Spectrum_M4I_4450_X8_config.ini"""'], {}), "(self.path_current_directory, 'config',\n 'Spectrum_M4I_4450_X8_config.ini')\n", (721, 799), False, 'import os\n'), ((922, 975), 'atomize.device_modules.config.config_utils.read_specific_parameters', 'cutil.read_specific_parameters', (['self.path_config_file'], {}), '(self.path_config_file)\n', (952, 975), True, 'import atomize.device_modules.config.config_utils as cutil\n'), ((64527, 64595), 'os.path.join', 'os.path.join', (['path_to_main', '"""atomize/control_center/digitizer.param"""'], {}), "(path_to_main, 'atomize/control_center/digitizer.param')\n", (64539, 64595), False, 'import os\n'), ((14184, 14286), 'atomize.general_modules.general_functions.message', 'general.message', (['"""A timeout occurred while waiting. Probably the digitizer is not triggered"""'], {}), "(\n 'A timeout occurred while waiting. Probably the digitizer is not triggered'\n )\n", (14199, 14286), True, 'import atomize.general_modules.general_functions as general\n'), ((60523, 60581), 'atomize.general_modules.general_functions.message', 'general.message', (['"""Impedance is fixed at 50 Ohm in HF mode"""'], {}), "('Impedance is fixed at 50 Ohm in HF mode')\n", (60538, 60581), True, 'import atomize.general_modules.general_functions as general\n'), ((60598, 60608), 'sys.exit', 'sys.exit', ([], {}), '()\n', (60606, 60608), False, 'import sys\n'), ((64489, 64500), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (64498, 64500), False, 'import os\n'), ((66222, 66290), 'os.path.join', 'os.path.join', (['path_to_main', '"""atomize/control_center/digitizer.param"""'], {}), "(path_to_main, 'atomize/control_center/digitizer.param')\n", (66234, 66290), False, 'import os\n'), ((6810, 6845), 'atomize.general_modules.general_functions.message', 'general.message', (['"""No card found..."""'], {}), "('No card found...')\n", (6825, 6845), True, 'import atomize.general_modules.general_functions as general\n'), ((6866, 6876), 'sys.exit', 'sys.exit', ([], {}), '()\n', (6874, 6876), False, 'import sys\n'), ((21871, 21892), 'numpy.zeros', 'np.zeros', (['self.points'], {}), '(self.points)\n', (21879, 21892), True, 'import numpy as np\n'), ((24130, 24186), 'atomize.general_modules.general_functions.message', 'general.message', (['"""Number of points must be more than 32"""'], {}), "('Number of points must be more than 32')\n", (24145, 24186), True, 'import atomize.general_modules.general_functions as general\n'), ((24242, 24352), 'atomize.general_modules.general_functions.message', 'general.message', (['"""Number of points should be divisible by 16; The closest avalaibale number is used"""'], {}), "(\n 'Number of points should be divisible by 16; The closest avalaibale number is used'\n )\n", (24257, 24352), True, 'import atomize.general_modules.general_functions as general\n'), ((27198, 27266), 'atomize.general_modules.general_functions.message', 'general.message', (['"""Number of posttrigger points must be more than 16"""'], {}), "('Number of posttrigger points must be more than 16')\n", (27213, 27266), True, 'import atomize.general_modules.general_functions as general\n'), ((27322, 27444), 'atomize.general_modules.general_functions.message', 'general.message', (['"""Number of posttrigger points should be divisible by 16; The closest avalaibale number is used"""'], {}), "(\n 'Number of posttrigger points should be divisible by 16; The closest avalaibale number is used'\n )\n", (27337, 27444), True, 'import atomize.general_modules.general_functions as general\n'), ((27722, 27855), 'atomize.general_modules.general_functions.message', 'general.message', (['"""Number of posttrigger points should be less than number of points; The closest avalaibale number is used"""'], {}), "(\n 'Number of posttrigger points should be less than number of points; The closest avalaibale number is used'\n )\n", (27737, 27855), True, 'import atomize.general_modules.general_functions as general\n'), ((32665, 32734), 'atomize.general_modules.general_functions.message', 'general.message', (['"""Incorrect sample rate; Should be 500 <= Rate <= 50"""'], {}), "('Incorrect sample rate; Should be 500 <= Rate <= 50')\n", (32680, 32734), True, 'import atomize.general_modules.general_functions as general\n'), ((32755, 32765), 'sys.exit', 'sys.exit', ([], {}), '()\n', (32763, 32765), False, 'import sys\n'), ((36011, 36098), 'atomize.general_modules.general_functions.message', 'general.message', (['"""Incorrect reference clock; Should be 100 MHz <= Clock <= 10 MHz"""'], {}), "(\n 'Incorrect reference clock; Should be 100 MHz <= Clock <= 10 MHz')\n", (36026, 36098), True, 'import atomize.general_modules.general_functions as general\n'), ((36114, 36124), 'sys.exit', 'sys.exit', ([], {}), '()\n', (36122, 36124), False, 'import sys\n'), ((45259, 45327), 'atomize.general_modules.general_functions.message', 'general.message', (['"""Incorrect delay dimension; Should be ns, us or ms"""'], {}), "('Incorrect delay dimension; Should be ns, us or ms')\n", (45274, 45327), True, 'import atomize.general_modules.general_functions as general\n'), ((45348, 45358), 'sys.exit', 'sys.exit', ([], {}), '()\n', (45356, 45358), False, 'import sys\n'), ((52402, 52495), 'atomize.general_modules.general_functions.message', 'general.message', (['"""No offset can be used for 1000 mV and 10000 mV range in Buffered mode"""'], {}), "(\n 'No offset can be used for 1000 mV and 10000 mV range in Buffered mode')\n", (52417, 52495), True, 'import atomize.general_modules.general_functions as general\n'), ((52511, 52521), 'sys.exit', 'sys.exit', ([], {}), '()\n', (52519, 52521), False, 'import sys\n'), ((66184, 66195), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (66193, 66195), False, 'import os\n'), ((28998, 29144), 'atomize.general_modules.general_functions.message', 'general.message', (['"""Number of posttrigger points should be less than number of points - 16 samlpes; The closest avalaibale number is used"""'], {}), "(\n 'Number of posttrigger points should be less than number of points - 16 samlpes; The closest avalaibale number is used'\n )\n", (29013, 29144), True, 'import atomize.general_modules.general_functions as general\n'), ((29970, 30006), 'atomize.general_modules.general_functions.message', 'general.message', (['"""Incorrect channel"""'], {}), "('Incorrect channel')\n", (29985, 30006), True, 'import atomize.general_modules.general_functions as general\n'), ((30027, 30037), 'sys.exit', 'sys.exit', ([], {}), '()\n', (30035, 30037), False, 'import sys\n'), ((30316, 30382), 'atomize.general_modules.general_functions.message', 'general.message', (['"""Incorrect channel; Channel should be CH0 or CH1"""'], {}), "('Incorrect channel; Channel should be CH0 or CH1')\n", (30331, 30382), True, 'import atomize.general_modules.general_functions as general\n'), ((30403, 30413), 'sys.exit', 'sys.exit', ([], {}), '()\n', (30411, 30413), False, 'import sys\n'), ((30707, 30774), 'atomize.general_modules.general_functions.message', 'general.message', (['"""Incorrect argument; Channel should be CH0 or CH1"""'], {}), "('Incorrect argument; Channel should be CH0 or CH1')\n", (30722, 30774), True, 'import atomize.general_modules.general_functions as general\n'), ((30791, 30801), 'sys.exit', 'sys.exit', ([], {}), '()\n', (30799, 30801), False, 'import sys\n'), ((34458, 34550), 'atomize.general_modules.general_functions.message', 'general.message', (['"""Incorrect clock mode; Only Internal and External modes are available"""'], {}), "(\n 'Incorrect clock mode; Only Internal and External modes are available')\n", (34473, 34550), True, 'import atomize.general_modules.general_functions as general\n'), ((34566, 34576), 'sys.exit', 'sys.exit', ([], {}), '()\n', (34574, 34576), False, 'import sys\n'), ((37551, 37639), 'atomize.general_modules.general_functions.message', 'general.message', (['"""Incorrect card mode; Only Single and Average modes are available"""'], {}), "(\n 'Incorrect card mode; Only Single and Average modes are available')\n", (37566, 37639), True, 'import atomize.general_modules.general_functions as general\n'), ((37655, 37665), 'sys.exit', 'sys.exit', ([], {}), '()\n', (37663, 37665), False, 'import sys\n'), ((39052, 39154), 'atomize.general_modules.general_functions.message', 'general.message', (['"""Incorrect trigger channel; Only Software and External modes are available"""'], {}), "(\n 'Incorrect trigger channel; Only Software and External modes are available'\n )\n", (39067, 39154), True, 'import atomize.general_modules.general_functions as general\n'), ((39165, 39175), 'sys.exit', 'sys.exit', ([], {}), '()\n', (39173, 39175), False, 'import sys\n'), ((47421, 47496), 'atomize.general_modules.general_functions.message', 'general.message', (['"""Incorrect input mode; Only HF and Buffered are available"""'], {}), "('Incorrect input mode; Only HF and Buffered are available')\n", (47436, 47496), True, 'import atomize.general_modules.general_functions as general\n'), ((47517, 47527), 'sys.exit', 'sys.exit', ([], {}), '()\n', (47525, 47527), False, 'import sys\n'), ((49815, 49867), 'atomize.general_modules.general_functions.message', 'general.message', (['"""Incorrect amplitude or input mode"""'], {}), "('Incorrect amplitude or input mode')\n", (49830, 49867), True, 'import atomize.general_modules.general_functions as general\n'), ((49888, 49898), 'sys.exit', 'sys.exit', ([], {}), '()\n', (49896, 49898), False, 'import sys\n'), ((52618, 52711), 'atomize.general_modules.general_functions.message', 'general.message', (['"""No offset can be used for 1000 mV and 10000 mV range in Buffered mode"""'], {}), "(\n 'No offset can be used for 1000 mV and 10000 mV range in Buffered mode')\n", (52633, 52711), True, 'import atomize.general_modules.general_functions as general\n'), ((52727, 52737), 'sys.exit', 'sys.exit', ([], {}), '()\n', (52735, 52737), False, 'import sys\n'), ((15737, 15761), 'numpy.average', 'np.average', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (15747, 15761), True, 'import numpy as np\n'), ((19543, 19567), 'numpy.average', 'np.average', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (19553, 19567), True, 'import numpy as np\n'), ((40722, 40827), 'atomize.general_modules.general_functions.message', 'general.message', (['"""Incorrect trigger mode; Only Positive, Negative, High, and Low are available"""'], {}), "(\n 'Incorrect trigger mode; Only Positive, Negative, High, and Low are available'\n )\n", (40737, 40827), True, 'import atomize.general_modules.general_functions as general\n'), ((40838, 40848), 'sys.exit', 'sys.exit', ([], {}), '()\n', (40846, 40848), False, 'import sys\n'), ((16171, 16195), 'numpy.average', 'np.average', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (16181, 16195), True, 'import numpy as np\n'), ((16838, 16862), 'numpy.average', 'np.average', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (16848, 16862), True, 'import numpy as np\n'), ((20391, 20415), 'numpy.average', 'np.average', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (20401, 20415), True, 'import numpy as np\n'), ((16940, 16986), 'numpy.sum', 'np.sum', (['data_ave[self.win_left:self.win_right]'], {}), '(data_ave[self.win_left:self.win_right])\n', (16946, 16986), True, 'import numpy as np\n'), ((17494, 17518), 'numpy.average', 'np.average', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (17504, 17518), True, 'import numpy as np\n'), ((17564, 17610), 'numpy.sum', 'np.sum', (['data_ave[self.win_left:self.win_right]'], {}), '(data_ave[self.win_left:self.win_right])\n', (17570, 17610), True, 'import numpy as np\n'), ((20493, 20545), 'numpy.sum', 'np.sum', (['data_ave[0::2][self.win_left:self.win_right]'], {}), '(data_ave[0::2][self.win_left:self.win_right])\n', (20499, 20545), True, 'import numpy as np\n'), ((20715, 20767), 'numpy.sum', 'np.sum', (['data_ave[1::2][self.win_left:self.win_right]'], {}), '(data_ave[1::2][self.win_left:self.win_right])\n', (20721, 20767), True, 'import numpy as np\n'), ((21130, 21148), 'numpy.sum', 'np.sum', (['data[0::2]'], {}), '(data[0::2])\n', (21136, 21148), True, 'import numpy as np\n'), ((21330, 21348), 'numpy.sum', 'np.sum', (['data[1::2]'], {}), '(data[1::2])\n', (21336, 21348), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.5
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # s_gamma_to_uniform [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_gamma_to_uniform&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=eb-gamma-to-unif).
# +
import numpy as np
import scipy.stats as stats
import matplotlib.pyplot as plt
from arpym.tools import histogram_sp, add_logo
# -
# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_gamma_to_uniform-parameters)
k = 4 # shape parameter
theta = 4 # scale parameter
j_ = 100000 # number of scenarios
# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_gamma_to_uniform-implementation-step01): Generate a gamma-distributed sample
x = stats.gamma.rvs(k, theta, size=j_)
# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_gamma_to_uniform-implementation-step02): Apply the gamma cdf to the sample
u = stats.gamma.cdf(x, k , theta)
# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_gamma_to_uniform-implementation-step03): Compute the empirical histogram of the pdf of the grade sample
k_bar = np.round(3*np.log(j_))
[f_hist, xi] = histogram_sp(u, k_=k_bar)
# ## Plots
plt.style.use('arpm')
fig = plt.figure(figsize=(1280.0/72.0, 720.0/72.0), dpi=72.0)
plt.title('Gamma-to-uniform mapping', fontsize=20, fontweight='bold')
# empirical pdf
plt.bar(xi, f_hist, width=xi[1]-xi[0], facecolor=[.7, .7, .7],
edgecolor='k', label='empirical pdf')
# uniform analytical pdf
plt.plot(np.linspace(0, 1, num=50), np.ones(50),
color='red', lw=1.5, label='uniform pdf')
plt.grid(True)
plt.ylim([0, 1.25*max(xi)])
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.legend(fontsize=17)
add_logo(fig, location=2, set_fig_size=False)
plt.tight_layout()
| [
"scipy.stats.gamma.cdf",
"matplotlib.pyplot.grid",
"scipy.stats.gamma.rvs",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xticks",
"numpy.ones",
"arpym.tools.add_logo",
"numpy.log",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.yticks",
... | [((1087, 1121), 'scipy.stats.gamma.rvs', 'stats.gamma.rvs', (['k', 'theta'], {'size': 'j_'}), '(k, theta, size=j_)\n', (1102, 1121), True, 'import scipy.stats as stats\n'), ((1266, 1294), 'scipy.stats.gamma.cdf', 'stats.gamma.cdf', (['x', 'k', 'theta'], {}), '(x, k, theta)\n', (1281, 1294), True, 'import scipy.stats as stats\n'), ((1511, 1536), 'arpym.tools.histogram_sp', 'histogram_sp', (['u'], {'k_': 'k_bar'}), '(u, k_=k_bar)\n', (1523, 1536), False, 'from arpym.tools import histogram_sp, add_logo\n'), ((1550, 1571), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""arpm"""'], {}), "('arpm')\n", (1563, 1571), True, 'import matplotlib.pyplot as plt\n'), ((1578, 1637), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(1280.0 / 72.0, 720.0 / 72.0)', 'dpi': '(72.0)'}), '(figsize=(1280.0 / 72.0, 720.0 / 72.0), dpi=72.0)\n', (1588, 1637), True, 'import matplotlib.pyplot as plt\n'), ((1634, 1703), 'matplotlib.pyplot.title', 'plt.title', (['"""Gamma-to-uniform mapping"""'], {'fontsize': '(20)', 'fontweight': '"""bold"""'}), "('Gamma-to-uniform mapping', fontsize=20, fontweight='bold')\n", (1643, 1703), True, 'import matplotlib.pyplot as plt\n'), ((1720, 1829), 'matplotlib.pyplot.bar', 'plt.bar', (['xi', 'f_hist'], {'width': '(xi[1] - xi[0])', 'facecolor': '[0.7, 0.7, 0.7]', 'edgecolor': '"""k"""', 'label': '"""empirical pdf"""'}), "(xi, f_hist, width=xi[1] - xi[0], facecolor=[0.7, 0.7, 0.7],\n edgecolor='k', label='empirical pdf')\n", (1727, 1829), True, 'import matplotlib.pyplot as plt\n'), ((1955, 1969), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1963, 1969), True, 'import matplotlib.pyplot as plt\n'), ((1998, 2021), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(14)'}), '(fontsize=14)\n', (2008, 2021), True, 'import matplotlib.pyplot as plt\n'), ((2022, 2045), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(14)'}), '(fontsize=14)\n', (2032, 2045), True, 'import matplotlib.pyplot as plt\n'), ((2046, 2069), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(17)'}), '(fontsize=17)\n', (2056, 2069), True, 'import matplotlib.pyplot as plt\n'), ((2070, 2115), 'arpym.tools.add_logo', 'add_logo', (['fig'], {'location': '(2)', 'set_fig_size': '(False)'}), '(fig, location=2, set_fig_size=False)\n', (2078, 2115), False, 'from arpym.tools import histogram_sp, add_logo\n'), ((2116, 2134), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2132, 2134), True, 'import matplotlib.pyplot as plt\n'), ((1864, 1889), 'numpy.linspace', 'np.linspace', (['(0)', '(1)'], {'num': '(50)'}), '(0, 1, num=50)\n', (1875, 1889), True, 'import numpy as np\n'), ((1891, 1902), 'numpy.ones', 'np.ones', (['(50)'], {}), '(50)\n', (1898, 1902), True, 'import numpy as np\n'), ((1484, 1494), 'numpy.log', 'np.log', (['j_'], {}), '(j_)\n', (1490, 1494), True, 'import numpy as np\n')] |
#!/bin/env python
# -*- coding: utf-8 -*-
##
# test_solvers.py: Checks correctness of azure.quantum.optimization module.
##
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
##
import pytest
from asyncmock import AsyncMock, patch, Mock
from azure.quantum.aio.optimization import Problem
from azure.quantum.optimization import Term
import azure.quantum.aio.optimization.problem
import azure.quantum.aio.job.base_job
from common import expected_terms
import numpy
import os
@pytest.fixture
def mock_ws():
mock_ws = AsyncMock()
mock_ws.get_container_uri = AsyncMock(return_value = "mock_container_uri/foo/bar")
return mock_ws
@pytest.fixture()
def problem():
## QUBO problem
problem = Problem(name="test")
problem.terms = [
Term(c=3, indices=[1, 0]),
Term(c=5, indices=[2, 0]),
]
problem.uploaded_blob_uri = "mock_blob_uri"
# Create equivalent NPZ file for translation
problem.row = numpy.array([1, 2])
problem.col = numpy.array([0, 0])
problem.data = numpy.array([3, 5])
return problem
@pytest.fixture
def default_qubo_problem(problem):
# If arguments are passed to savez with no keywords
# then default names are used (e.g. "arr_0", "arr_1", etc)
# otherwise it uses those supplied by user (e.g. "row", "col", etc)
default_qubo_filename = "default_qubo.npz"
numpy.savez(default_qubo_filename,
problem.row,
problem.col,
problem.data
)
yield default_qubo_filename
if os.path.isfile(default_qubo_filename):
os.remove(default_qubo_filename)
@pytest.fixture
def with_keywords_qubo_problem(problem):
fn = "with_keywords_qubo.npz"
numpy.savez(fn,
row=problem.row,
col=problem.col,
data=problem.data
)
yield fn
if os.path.isfile(fn):
os.remove(fn)
@pytest.fixture
def pubo_problem():
## PUBO problem
pubo_problem = Problem(name="test")
pubo_problem.terms = [
Term(c=3, indices=[1, 0, 1]),
Term(c=5, indices=[2, 0, 0]),
Term(c=-1, indices=[1, 0, 0]),
Term(c=4, indices=[0, 2, 1])
]
# Create equivalent NPZ file for translation
pubo_problem.i = numpy.array([1, 2, 1, 0])
pubo_problem.j = numpy.array([0, 0, 0, 2])
pubo_problem.k = numpy.array([1, 0, 0, 1])
pubo_problem.c = numpy.array([3, 5, -1, 4])
return pubo_problem
@pytest.fixture
def default_pubo_problem(pubo_problem):
fn = "default_pubo.npz"
numpy.savez(fn,
pubo_problem.i,
pubo_problem.j,
pubo_problem.k,
pubo_problem.c
)
yield fn
if os.path.isfile(fn):
os.remove(fn)
@pytest.fixture
def with_keywords_pubo_problem(pubo_problem):
fn = "with_keywords_pubo.npz"
numpy.savez(fn,
i=pubo_problem.i,
j=pubo_problem.j,
k=pubo_problem.k,
c=pubo_problem.c
)
yield fn
if os.path.isfile(fn):
os.remove(fn)
@pytest.mark.asyncio
async def test_upload(mock_ws, pubo_problem):
with patch("azure.quantum.aio.optimization.problem.BlobClient") as mock_blob_client, \
patch("azure.quantum.aio.optimization.problem.ContainerClient") as mock_container_client, \
patch("azure.quantum.aio.job.base_job.upload_blob") as mock_upload:
mock_blob_client.from_blob_url.return_value = Mock()
mock_container_client.from_container_url.return_value = Mock()
assert(pubo_problem.uploaded_blob_uri == None)
actual_result = await pubo_problem.upload(mock_ws)
mock_upload.get_blob_uri_with_sas_token = AsyncMock()
azure.quantum.aio.job.base_job.upload_blob.assert_called_once()
@pytest.mark.asyncio
async def test_download(problem, mock_ws):
with patch("azure.quantum.aio.optimization.problem.download_blob") as mock_download_blob,\
patch("azure.quantum.aio.optimization.problem.BlobClient") as mock_blob_client,\
patch("azure.quantum.aio.optimization.problem.ContainerClient") as mock_container_client:
mock_download_blob.return_value=expected_terms()
mock_blob_client.from_blob_url.return_value = Mock()
mock_container_client.from_container_url.return_value = Mock()
actual_result = await problem.download(mock_ws)
assert actual_result.name == "test"
azure.quantum.aio.optimization.problem.download_blob.assert_called_once()
def test_get_term(problem):
terms = problem.get_terms(0)
assert len(terms) == 2
def test_get_term_raise_exception():
test_prob = Problem(name="random")
with pytest.raises(Exception):
test_prob.get_terms(id=0)
def test_create_npz_file_default(default_qubo_problem, default_pubo_problem):
# When no keywords are supplied, columns have default names
# e.g. "arr_0", "arr_1" etc
# QUBO
npz_file = numpy.load(default_qubo_problem)
num_columns = 3
assert len(npz_file.files) == num_columns
for i in range(num_columns):
assert npz_file.files[i] == "arr_%s" % i
# PUBO
npz_file = numpy.load(default_pubo_problem)
num_columns = 4
assert len(npz_file.files) == num_columns
for i in range(num_columns):
assert npz_file.files[i] == "arr_%s" % i
def test_create_npz_file_with_keywords(with_keywords_qubo_problem, with_keywords_pubo_problem):
# When keywords are supplied, columns use these names
# QUBO
npz_file = numpy.load(with_keywords_qubo_problem)
keywords = ["row", "col", "data"]
assert len(npz_file.files) == len(keywords)
for i in range(len(keywords)):
assert npz_file.files[i] == keywords[i]
# PUBO
npz_file = numpy.load(with_keywords_pubo_problem)
keywords = ["i", "j", "k", "c"]
assert len(npz_file.files) == len(keywords)
for i in range(len(keywords)):
assert npz_file.files[i] == keywords[i]
def test_valid_npz(problem, pubo_problem, default_qubo_problem, default_pubo_problem, with_keywords_qubo_problem, with_keywords_pubo_problem):
default_qubo = numpy.load(default_qubo_problem)
with_keywords_qubo = numpy.load(with_keywords_qubo_problem)
default_pubo = numpy.load(default_pubo_problem)
with_keywords_pubo = numpy.load(with_keywords_pubo_problem)
## Valid files
assert problem.is_valid_npz(default_qubo.files)
assert problem.is_valid_npz(
default_qubo.files,
["arr_0", "arr_1"],
"arr_2")
assert problem.is_valid_npz(
with_keywords_qubo.files,
["col", "row"],
"data")
assert pubo_problem.is_valid_npz(
default_pubo.files,
["arr_0", "arr_1", "arr_2"],
"arr_3")
assert pubo_problem.is_valid_npz(
with_keywords_pubo.files,
["i", "j", "k"],
"c")
## Invalid files
# Too many columns
assert not problem.is_valid_npz(
default_qubo.files,
["arr_0", "arr_1", "arr_2"],
"arr_3")
assert not pubo_problem.is_valid_npz(
default_pubo.files,
["arr_0", "arr_1", "arr_2", "arr_3"],
"arr_4")
# Wrong column names
assert not problem.is_valid_npz(
with_keywords_qubo.files,
["i", "j"],
"k")
assert not pubo_problem.is_valid_npz(
with_keywords_pubo.files,
["x", "y", "z"],
"c")
# No indices column names
assert not problem.is_valid_npz(
with_keywords_qubo.files,
[],
"data")
# Wrong coefficient column name
assert not problem.is_valid_npz(
with_keywords_qubo.files,
["row", "col"],
"")
def test_invalid_file_path(problem):
# Exceptions are raised for invalid file paths or files with incorrect naming
with pytest.raises(Exception):
problem.terms_from_npz("invalid_file_path.npz")
def test_invalid_terms_qubo(default_qubo_problem):
with pytest.raises(Exception):
problem.terms_from_npz (
default_qubo_problem,
["arr_0", "arr_1", "arr_2"],
"arr_3"
)
def test_valid_files_produces_terms(problem, default_qubo_problem):
# Terms are produced for valid files
assert problem.terms_from_npz(default_qubo_problem) == problem.terms
def test_valid_keyword_files_produces_terms(problem, with_keywords_qubo_problem):
assert problem.terms_from_npz(
with_keywords_qubo_problem,
["row", "col"],
"data"
) == problem.terms
def test_terms_from_npz_pubo(pubo_problem, default_pubo_problem):
# Exceptions are raised for invalid file paths or files with incorrect naming
with pytest.raises(Exception):
pubo_problem.terms_from_npz("invalid_file_path.npz")
with pytest.raises(Exception):
pubo_problem.terms_from_npz (
default_pubo_problem,
["arr_0", "arr_1", "arr_2", "arr_3"],
"arr_4"
)
def test_terms_are_produced_for_valid_files(pubo_problem, default_pubo_problem):
# Terms are produced for valid files
assert pubo_problem.terms_from_npz(
default_pubo_problem,
["arr_0", "arr_1", "arr_2"],
"arr_3"
) == pubo_problem.terms
def test_terms_are_produced_for_valid_files_with_keywords(pubo_problem, with_keywords_pubo_problem):
assert pubo_problem.terms_from_npz(
with_keywords_pubo_problem,
["i", "j", "k"],
"c"
) == pubo_problem.terms
| [
"numpy.savez",
"common.expected_terms",
"asyncmock.Mock",
"os.path.isfile",
"azure.quantum.aio.optimization.Problem",
"numpy.array",
"azure.quantum.optimization.Term",
"asyncmock.AsyncMock",
"asyncmock.patch",
"pytest.raises",
"pytest.fixture",
"numpy.load",
"os.remove"
] | [((685, 701), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (699, 701), False, 'import pytest\n'), ((564, 575), 'asyncmock.AsyncMock', 'AsyncMock', ([], {}), '()\n', (573, 575), False, 'from asyncmock import AsyncMock, patch, Mock\n'), ((608, 660), 'asyncmock.AsyncMock', 'AsyncMock', ([], {'return_value': '"""mock_container_uri/foo/bar"""'}), "(return_value='mock_container_uri/foo/bar')\n", (617, 660), False, 'from asyncmock import AsyncMock, patch, Mock\n'), ((751, 771), 'azure.quantum.aio.optimization.Problem', 'Problem', ([], {'name': '"""test"""'}), "(name='test')\n", (758, 771), False, 'from azure.quantum.aio.optimization import Problem\n'), ((986, 1005), 'numpy.array', 'numpy.array', (['[1, 2]'], {}), '([1, 2])\n', (997, 1005), False, 'import numpy\n'), ((1024, 1043), 'numpy.array', 'numpy.array', (['[0, 0]'], {}), '([0, 0])\n', (1035, 1043), False, 'import numpy\n'), ((1063, 1082), 'numpy.array', 'numpy.array', (['[3, 5]'], {}), '([3, 5])\n', (1074, 1082), False, 'import numpy\n'), ((1400, 1474), 'numpy.savez', 'numpy.savez', (['default_qubo_filename', 'problem.row', 'problem.col', 'problem.data'], {}), '(default_qubo_filename, problem.row, problem.col, problem.data)\n', (1411, 1474), False, 'import numpy\n'), ((1546, 1583), 'os.path.isfile', 'os.path.isfile', (['default_qubo_filename'], {}), '(default_qubo_filename)\n', (1560, 1583), False, 'import os\n'), ((1723, 1791), 'numpy.savez', 'numpy.savez', (['fn'], {'row': 'problem.row', 'col': 'problem.col', 'data': 'problem.data'}), '(fn, row=problem.row, col=problem.col, data=problem.data)\n', (1734, 1791), False, 'import numpy\n'), ((1841, 1859), 'os.path.isfile', 'os.path.isfile', (['fn'], {}), '(fn)\n', (1855, 1859), False, 'import os\n'), ((1960, 1980), 'azure.quantum.aio.optimization.Problem', 'Problem', ([], {'name': '"""test"""'}), "(name='test')\n", (1967, 1980), False, 'from azure.quantum.aio.optimization import Problem\n'), ((2237, 2262), 'numpy.array', 'numpy.array', (['[1, 2, 1, 0]'], {}), '([1, 2, 1, 0])\n', (2248, 2262), False, 'import numpy\n'), ((2284, 2309), 'numpy.array', 'numpy.array', (['[0, 0, 0, 2]'], {}), '([0, 0, 0, 2])\n', (2295, 2309), False, 'import numpy\n'), ((2331, 2356), 'numpy.array', 'numpy.array', (['[1, 0, 0, 1]'], {}), '([1, 0, 0, 1])\n', (2342, 2356), False, 'import numpy\n'), ((2378, 2404), 'numpy.array', 'numpy.array', (['[3, 5, -1, 4]'], {}), '([3, 5, -1, 4])\n', (2389, 2404), False, 'import numpy\n'), ((2518, 2597), 'numpy.savez', 'numpy.savez', (['fn', 'pubo_problem.i', 'pubo_problem.j', 'pubo_problem.k', 'pubo_problem.c'], {}), '(fn, pubo_problem.i, pubo_problem.j, pubo_problem.k, pubo_problem.c)\n', (2529, 2597), False, 'import numpy\n'), ((2656, 2674), 'os.path.isfile', 'os.path.isfile', (['fn'], {}), '(fn)\n', (2670, 2674), False, 'import os\n'), ((2800, 2892), 'numpy.savez', 'numpy.savez', (['fn'], {'i': 'pubo_problem.i', 'j': 'pubo_problem.j', 'k': 'pubo_problem.k', 'c': 'pubo_problem.c'}), '(fn, i=pubo_problem.i, j=pubo_problem.j, k=pubo_problem.k, c=\n pubo_problem.c)\n', (2811, 2892), False, 'import numpy\n'), ((2945, 2963), 'os.path.isfile', 'os.path.isfile', (['fn'], {}), '(fn)\n', (2959, 2963), False, 'import os\n'), ((4569, 4591), 'azure.quantum.aio.optimization.Problem', 'Problem', ([], {'name': '"""random"""'}), "(name='random')\n", (4576, 4591), False, 'from azure.quantum.aio.optimization import Problem\n'), ((4867, 4899), 'numpy.load', 'numpy.load', (['default_qubo_problem'], {}), '(default_qubo_problem)\n', (4877, 4899), False, 'import numpy\n'), ((5076, 5108), 'numpy.load', 'numpy.load', (['default_pubo_problem'], {}), '(default_pubo_problem)\n', (5086, 5108), False, 'import numpy\n'), ((5440, 5478), 'numpy.load', 'numpy.load', (['with_keywords_qubo_problem'], {}), '(with_keywords_qubo_problem)\n', (5450, 5478), False, 'import numpy\n'), ((5676, 5714), 'numpy.load', 'numpy.load', (['with_keywords_pubo_problem'], {}), '(with_keywords_pubo_problem)\n', (5686, 5714), False, 'import numpy\n'), ((6047, 6079), 'numpy.load', 'numpy.load', (['default_qubo_problem'], {}), '(default_qubo_problem)\n', (6057, 6079), False, 'import numpy\n'), ((6105, 6143), 'numpy.load', 'numpy.load', (['with_keywords_qubo_problem'], {}), '(with_keywords_qubo_problem)\n', (6115, 6143), False, 'import numpy\n'), ((6164, 6196), 'numpy.load', 'numpy.load', (['default_pubo_problem'], {}), '(default_pubo_problem)\n', (6174, 6196), False, 'import numpy\n'), ((6222, 6260), 'numpy.load', 'numpy.load', (['with_keywords_pubo_problem'], {}), '(with_keywords_pubo_problem)\n', (6232, 6260), False, 'import numpy\n'), ((802, 827), 'azure.quantum.optimization.Term', 'Term', ([], {'c': '(3)', 'indices': '[1, 0]'}), '(c=3, indices=[1, 0])\n', (806, 827), False, 'from azure.quantum.optimization import Term\n'), ((837, 862), 'azure.quantum.optimization.Term', 'Term', ([], {'c': '(5)', 'indices': '[2, 0]'}), '(c=5, indices=[2, 0])\n', (841, 862), False, 'from azure.quantum.optimization import Term\n'), ((1593, 1625), 'os.remove', 'os.remove', (['default_qubo_filename'], {}), '(default_qubo_filename)\n', (1602, 1625), False, 'import os\n'), ((1869, 1882), 'os.remove', 'os.remove', (['fn'], {}), '(fn)\n', (1878, 1882), False, 'import os\n'), ((2016, 2044), 'azure.quantum.optimization.Term', 'Term', ([], {'c': '(3)', 'indices': '[1, 0, 1]'}), '(c=3, indices=[1, 0, 1])\n', (2020, 2044), False, 'from azure.quantum.optimization import Term\n'), ((2054, 2082), 'azure.quantum.optimization.Term', 'Term', ([], {'c': '(5)', 'indices': '[2, 0, 0]'}), '(c=5, indices=[2, 0, 0])\n', (2058, 2082), False, 'from azure.quantum.optimization import Term\n'), ((2092, 2121), 'azure.quantum.optimization.Term', 'Term', ([], {'c': '(-1)', 'indices': '[1, 0, 0]'}), '(c=-1, indices=[1, 0, 0])\n', (2096, 2121), False, 'from azure.quantum.optimization import Term\n'), ((2131, 2159), 'azure.quantum.optimization.Term', 'Term', ([], {'c': '(4)', 'indices': '[0, 2, 1]'}), '(c=4, indices=[0, 2, 1])\n', (2135, 2159), False, 'from azure.quantum.optimization import Term\n'), ((2684, 2697), 'os.remove', 'os.remove', (['fn'], {}), '(fn)\n', (2693, 2697), False, 'import os\n'), ((2973, 2986), 'os.remove', 'os.remove', (['fn'], {}), '(fn)\n', (2982, 2986), False, 'import os\n'), ((3070, 3128), 'asyncmock.patch', 'patch', (['"""azure.quantum.aio.optimization.problem.BlobClient"""'], {}), "('azure.quantum.aio.optimization.problem.BlobClient')\n", (3075, 3128), False, 'from asyncmock import AsyncMock, patch, Mock\n'), ((3160, 3223), 'asyncmock.patch', 'patch', (['"""azure.quantum.aio.optimization.problem.ContainerClient"""'], {}), "('azure.quantum.aio.optimization.problem.ContainerClient')\n", (3165, 3223), False, 'from asyncmock import AsyncMock, patch, Mock\n'), ((3260, 3311), 'asyncmock.patch', 'patch', (['"""azure.quantum.aio.job.base_job.upload_blob"""'], {}), "('azure.quantum.aio.job.base_job.upload_blob')\n", (3265, 3311), False, 'from asyncmock import AsyncMock, patch, Mock\n'), ((3382, 3388), 'asyncmock.Mock', 'Mock', ([], {}), '()\n', (3386, 3388), False, 'from asyncmock import AsyncMock, patch, Mock\n'), ((3453, 3459), 'asyncmock.Mock', 'Mock', ([], {}), '()\n', (3457, 3459), False, 'from asyncmock import AsyncMock, patch, Mock\n'), ((3624, 3635), 'asyncmock.AsyncMock', 'AsyncMock', ([], {}), '()\n', (3633, 3635), False, 'from asyncmock import AsyncMock, patch, Mock\n'), ((3782, 3843), 'asyncmock.patch', 'patch', (['"""azure.quantum.aio.optimization.problem.download_blob"""'], {}), "('azure.quantum.aio.optimization.problem.download_blob')\n", (3787, 3843), False, 'from asyncmock import AsyncMock, patch, Mock\n'), ((3876, 3934), 'asyncmock.patch', 'patch', (['"""azure.quantum.aio.optimization.problem.BlobClient"""'], {}), "('azure.quantum.aio.optimization.problem.BlobClient')\n", (3881, 3934), False, 'from asyncmock import AsyncMock, patch, Mock\n'), ((3965, 4028), 'asyncmock.patch', 'patch', (['"""azure.quantum.aio.optimization.problem.ContainerClient"""'], {}), "('azure.quantum.aio.optimization.problem.ContainerClient')\n", (3970, 4028), False, 'from asyncmock import AsyncMock, patch, Mock\n'), ((4095, 4111), 'common.expected_terms', 'expected_terms', ([], {}), '()\n', (4109, 4111), False, 'from common import expected_terms\n'), ((4166, 4172), 'asyncmock.Mock', 'Mock', ([], {}), '()\n', (4170, 4172), False, 'from asyncmock import AsyncMock, patch, Mock\n'), ((4237, 4243), 'asyncmock.Mock', 'Mock', ([], {}), '()\n', (4241, 4243), False, 'from asyncmock import AsyncMock, patch, Mock\n'), ((4601, 4625), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (4614, 4625), False, 'import pytest\n'), ((7726, 7750), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (7739, 7750), False, 'import pytest\n'), ((7869, 7893), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (7882, 7893), False, 'import pytest\n'), ((8606, 8630), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (8619, 8630), False, 'import pytest\n'), ((8701, 8725), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (8714, 8725), False, 'import pytest\n')] |
import io
import logging
import sqlite3
from os.path import abspath, dirname, join, exists
import numpy as np
from speaker_verification.utils.logger import SpeakerVerificationLogger
DATABASE_PATH = join(abspath(dirname(__file__)), "SQL", "sqlite.db")
logger = SpeakerVerificationLogger(name=__file__)
logger.setLevel(logging.INFO)
class DatabaseError(Exception):
pass
def adapt_array(arr):
"""
http://stackoverflow.com/a/31312102/190597 (SoulNibbler)
"""
out = io.BytesIO()
np.save(out, arr)
out.seek(0)
return sqlite3.Binary(out.read())
def convert_array(text):
out = io.BytesIO(text)
out.seek(0)
return np.load(out)
sqlite3.register_adapter(np.ndarray, adapt_array)
sqlite3.register_converter("array", convert_array)
def get_db_connection(database=DATABASE_PATH):
sqliteConnection = sqlite3.connect(database, detect_types=sqlite3.PARSE_DECLTYPES)
cur = sqliteConnection.cursor()
return sqliteConnection, cur
def establish_sqlite_db(table_name, database=DATABASE_PATH):
if not exists(database):
sqlite3.connect(database.split('/')[-1]).close()
create_db_table(table_name)
def read_sqlite_table(table, database=DATABASE_PATH):
"""read_sqlite_table.
print all records within users table.
Parameters
----------
table : str
Name of table to read record from.
"""
try:
_, cur = get_db_connection(database)
sqlite_select_query = f"select * from {table}"
cur.execute(sqlite_select_query)
records = cur.fetchall()
for row in records:
logger.info("Id: ", row[0])
logger.info("mfcc: ", type(row[1]))
except sqlite3.Error as error:
logger.error("Failed to read data from sqlite table", error)
raise DatabaseError()
def create_db_table(table: str, database=DATABASE_PATH):
"""create_db_table.
Creates a table within sqlite database to store user records.
Parameters
----------
table : str
Name of table to create.
"""
try:
_, cur = get_db_connection(database)
cur.execute(f"create table {table}(id integer primary key, arr array)")
except Exception as err:
logger.error(f"Cannot create table for {table}: ", err)
raise DatabaseError()
def remove_db_row(table: str, id: int, database=DATABASE_PATH):
"""remove_db_row.
Removes row within sqlite table according to "id" and "table" parameters.
Parameters
----------
table : str
Name of table to remove record from.
id : str
Id key for required record within table for removal.
"""
try:
_, cur = get_db_connection(database)
cur.execute(f"delete from {table} where id={id}")
except Exception as err:
logger.error(f"Database row doesn't exist for id ({id}) in table ({table}): ", err)
raise DatabaseError()
def select_db_row(table: str, id: int, database=DATABASE_PATH):
"""select_db_row.
Selects and prints out a row within a registered sqlite database table.
Parameters
----------
table : str
Name of table to select record from.
id : str
Id key for required record within table for selection.
"""
try:
_, cur = get_db_connection(database)
rows = cur.execute(f"select * from {table} where id={id}")
for row in rows:
return row
except Exception as err:
logger.error("Database Error: ", err)
def insert_db_row(table: str, id: int, mfcc: np.array, database=DATABASE_PATH):
"""insert_db_row.
Takes required parameters and inserts a record of given id and mfcc dataset into the sqlite database table specified.
Parameters
----------
table : str
Name of table to insert record within.
id : int
Id key for required record within table for insertion.
mfcc : numpy.array
MFCC dataset to be inserted within database records.
"""
try:
con, cur = get_db_connection(database)
cur.execute(f"insert into {table}(id, arr) values (?, ?)", (id, mfcc,))
con.commit()
except Exception as err:
logger.error("Database Error: ", err)
raise DatabaseError()
| [
"os.path.exists",
"sqlite3.register_converter",
"sqlite3.register_adapter",
"speaker_verification.utils.logger.SpeakerVerificationLogger",
"sqlite3.connect",
"io.BytesIO",
"os.path.dirname",
"numpy.load",
"numpy.save"
] | [((263, 303), 'speaker_verification.utils.logger.SpeakerVerificationLogger', 'SpeakerVerificationLogger', ([], {'name': '__file__'}), '(name=__file__)\n', (288, 303), False, 'from speaker_verification.utils.logger import SpeakerVerificationLogger\n'), ((673, 722), 'sqlite3.register_adapter', 'sqlite3.register_adapter', (['np.ndarray', 'adapt_array'], {}), '(np.ndarray, adapt_array)\n', (697, 722), False, 'import sqlite3\n'), ((723, 773), 'sqlite3.register_converter', 'sqlite3.register_converter', (['"""array"""', 'convert_array'], {}), "('array', convert_array)\n", (749, 773), False, 'import sqlite3\n'), ((488, 500), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (498, 500), False, 'import io\n'), ((505, 522), 'numpy.save', 'np.save', (['out', 'arr'], {}), '(out, arr)\n', (512, 522), True, 'import numpy as np\n'), ((614, 630), 'io.BytesIO', 'io.BytesIO', (['text'], {}), '(text)\n', (624, 630), False, 'import io\n'), ((658, 670), 'numpy.load', 'np.load', (['out'], {}), '(out)\n', (665, 670), True, 'import numpy as np\n'), ((846, 909), 'sqlite3.connect', 'sqlite3.connect', (['database'], {'detect_types': 'sqlite3.PARSE_DECLTYPES'}), '(database, detect_types=sqlite3.PARSE_DECLTYPES)\n', (861, 909), False, 'import sqlite3\n'), ((214, 231), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (221, 231), False, 'from os.path import abspath, dirname, join, exists\n'), ((1052, 1068), 'os.path.exists', 'exists', (['database'], {}), '(database)\n', (1058, 1068), False, 'from os.path import abspath, dirname, join, exists\n')] |
# The functions defined here were copied based on the source code
# defined in xarray
import datetime
from typing import Any, Iterable
import numpy as np
import pandas as pd
try:
import cftime
except ImportError:
cftime = None
try:
import dask.array
dask_array_type = dask.array.Array
except ImportError:
dask_array_type = ()
def asarray(data, xp=np):
return data if is_duck_array(data) else xp.asarray(data)
def is_duck_array(value: Any) -> bool:
"""Checks if value is a duck array."""
if isinstance(value, np.ndarray):
return True
return (
hasattr(value, "ndim")
and hasattr(value, "shape")
and hasattr(value, "dtype")
and hasattr(value, "__array_function__")
and hasattr(value, "__array_ufunc__")
)
def is_dask_collection(x):
try:
import dask
return dask.is_dask_collection(x)
except ImportError:
return False
def is_duck_dask_array(x):
return is_duck_array(x) and is_dask_collection(x)
class ReprObject:
"""Object that prints as the given value, for use with sentinel values."""
__slots__ = ("_value",)
def __init__(self, value: str):
self._value = value
def __repr__(self) -> str:
return self._value
def __eq__(self, other) -> bool:
if isinstance(other, ReprObject):
return self._value == other._value
return False
def __hash__(self) -> int:
return hash((type(self), self._value))
def __dask_tokenize__(self):
from dask.base import normalize_token
return normalize_token((type(self), self._value))
def is_scalar(value: Any, include_0d: bool = True) -> bool:
"""Whether to treat a value as a scalar.
Any non-iterable, string, or 0-D array
"""
NON_NUMPY_SUPPORTED_ARRAY_TYPES = (dask_array_type, pd.Index)
if include_0d:
include_0d = getattr(value, "ndim", None) == 0
return (
include_0d
or isinstance(value, (str, bytes))
or not (
isinstance(value, (Iterable,) + NON_NUMPY_SUPPORTED_ARRAY_TYPES)
or hasattr(value, "__array_function__")
)
)
def isnull(data):
data = np.asarray(data)
scalar_type = data.dtype.type
if issubclass(scalar_type, (np.datetime64, np.timedelta64)):
# datetime types use NaT for null
# note: must check timedelta64 before integers, because currently
# timedelta64 inherits from np.integer
return np.isnat(data)
elif issubclass(scalar_type, np.inexact):
# float types use NaN for null
return np.isnan(data)
elif issubclass(scalar_type, (np.bool_, np.integer, np.character, np.void)):
# these types cannot represent missing values
return np.zeros_like(data, dtype=bool)
else:
# at this point, array should have dtype=object
if isinstance(data, (np.ndarray, dask_array_type)):
return pd.isnull(data)
else:
# Not reachable yet, but intended for use with other duck array
# types. For full consistency with pandas, we should accept None as
# a null value as well as NaN, but it isn't clear how to do this
# with duck typing.
return data != data
def datetime_to_numeric(array, offset=None, datetime_unit=None, dtype=float):
"""Convert an array containing datetime-like data to numerical values.
Convert the datetime array to a timedelta relative to an offset.
Parameters
----------
array : array-like
Input data
offset : None, datetime or cftime.datetime
Datetime offset. If None, this is set by default to the array's minimum
value to reduce round off errors.
datetime_unit : {None, Y, M, W, D, h, m, s, ms, us, ns, ps, fs, as}
If not None, convert output to a given datetime unit. Note that some
conversions are not allowed due to non-linear relationships between units.
dtype : dtype
Output dtype.
Returns
-------
array
Numerical representation of datetime object relative to an offset.
Notes
-----
Some datetime unit conversions won't work, for example from days to years, even
though some calendars would allow for them (e.g. no_leap). This is because there
is no `cftime.timedelta` object.
"""
# TODO: make this function dask-compatible?
# Set offset to minimum if not given
from xarray.core.duck_array_ops import _datetime_nanmin
if offset is None:
if array.dtype.kind in "Mm":
offset = _datetime_nanmin(array)
else:
offset = min(array)
# Compute timedelta object.
# For np.datetime64, this can silently yield garbage due to overflow.
# One option is to enforce 1970-01-01 as the universal offset.
# This map_blocks call is for backwards compatibility.
# dask == 2021.04.1 does not support subtracting object arrays
# which is required for cftime
if is_duck_dask_array(array) and np.issubdtype(array.dtype, object):
array = array.map_blocks(lambda a, b: a - b, offset, meta=array._meta)
else:
array = array - offset
# Scalar is converted to 0d-array
if not hasattr(array, "dtype"):
array = np.array(array)
# Convert timedelta objects to float by first converting to microseconds.
if array.dtype.kind in "O":
return py_timedelta_to_float(array, datetime_unit or "ns").astype(dtype)
# Convert np.NaT to np.nan
elif array.dtype.kind in "mM":
# Convert to specified timedelta units.
if datetime_unit:
array = array / np.timedelta64(1, datetime_unit)
return np.where(isnull(array), np.nan, array.astype(dtype))
def timedelta_to_numeric(value, datetime_unit="ns", dtype=float):
"""Convert a timedelta-like object to numerical values.
Parameters
----------
value : datetime.timedelta, numpy.timedelta64, pandas.Timedelta, str
Time delta representation.
datetime_unit : {Y, M, W, D, h, m, s, ms, us, ns, ps, fs, as}
The time units of the output values. Note that some conversions are not allowed due to
non-linear relationships between units.
dtype : type
The output data type.
"""
import datetime as dt
if isinstance(value, dt.timedelta):
out = py_timedelta_to_float(value, datetime_unit)
elif isinstance(value, np.timedelta64):
out = np_timedelta64_to_float(value, datetime_unit)
elif isinstance(value, pd.Timedelta):
out = pd_timedelta_to_float(value, datetime_unit)
elif isinstance(value, str):
try:
a = pd.to_timedelta(value)
except ValueError:
raise ValueError(
f"Could not convert {value!r} to timedelta64 using pandas.to_timedelta"
)
return py_timedelta_to_float(a, datetime_unit)
else:
raise TypeError(
f"Expected value of type str, pandas.Timedelta, datetime.timedelta "
f"or numpy.timedelta64, but received {type(value).__name__}"
)
return out.astype(dtype)
def _to_pytimedelta(array, unit="us"):
return array.astype(f"timedelta64[{unit}]").astype(datetime.timedelta)
def np_timedelta64_to_float(array, datetime_unit):
"""Convert numpy.timedelta64 to float.
Notes
-----
The array is first converted to microseconds, which is less likely to
cause overflow errors.
"""
array = array.astype("timedelta64[ns]").astype(np.float64)
conversion_factor = np.timedelta64(1, "ns") / np.timedelta64(1, datetime_unit)
return conversion_factor * array
def pd_timedelta_to_float(value, datetime_unit):
"""Convert pandas.Timedelta to float.
Notes
-----
Built on the assumption that pandas timedelta values are in nanoseconds,
which is also the numpy default resolution.
"""
value = value.to_timedelta64()
return np_timedelta64_to_float(value, datetime_unit)
def _timedelta_to_seconds(array):
return np.reshape([a.total_seconds() for a in array.ravel()], array.shape) * 1e6
def py_timedelta_to_float(array, datetime_unit):
"""Convert a timedelta object to a float, possibly at a loss of resolution."""
array = asarray(array)
if is_duck_dask_array(array):
array = array.map_blocks(_timedelta_to_seconds, meta=np.array([], dtype=np.float64))
else:
array = _timedelta_to_seconds(array)
conversion_factor = np.timedelta64(1, "us") / np.timedelta64(1, datetime_unit)
return conversion_factor * array
def _contains_cftime_datetimes(array) -> bool:
"""Check if an array contains cftime.datetime objects"""
if cftime is None:
return False
else:
if array.dtype == np.dtype("O") and array.size > 0:
sample = array.ravel()[0]
if is_duck_dask_array(sample):
sample = sample.compute()
if isinstance(sample, np.ndarray):
sample = sample.item()
return isinstance(sample, cftime.datetime)
else:
return False
| [
"pandas.isnull",
"pandas.to_timedelta",
"dask.is_dask_collection",
"xarray.core.duck_array_ops._datetime_nanmin",
"numpy.asarray",
"numpy.array",
"numpy.issubdtype",
"numpy.isnan",
"numpy.timedelta64",
"numpy.isnat",
"numpy.dtype",
"numpy.zeros_like"
] | [((2217, 2233), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (2227, 2233), True, 'import numpy as np\n'), ((875, 901), 'dask.is_dask_collection', 'dask.is_dask_collection', (['x'], {}), '(x)\n', (898, 901), False, 'import dask\n'), ((2511, 2525), 'numpy.isnat', 'np.isnat', (['data'], {}), '(data)\n', (2519, 2525), True, 'import numpy as np\n'), ((5049, 5083), 'numpy.issubdtype', 'np.issubdtype', (['array.dtype', 'object'], {}), '(array.dtype, object)\n', (5062, 5083), True, 'import numpy as np\n'), ((5296, 5311), 'numpy.array', 'np.array', (['array'], {}), '(array)\n', (5304, 5311), True, 'import numpy as np\n'), ((7592, 7615), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""ns"""'], {}), "(1, 'ns')\n", (7606, 7615), True, 'import numpy as np\n'), ((7618, 7650), 'numpy.timedelta64', 'np.timedelta64', (['(1)', 'datetime_unit'], {}), '(1, datetime_unit)\n', (7632, 7650), True, 'import numpy as np\n'), ((8515, 8538), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""us"""'], {}), "(1, 'us')\n", (8529, 8538), True, 'import numpy as np\n'), ((8541, 8573), 'numpy.timedelta64', 'np.timedelta64', (['(1)', 'datetime_unit'], {}), '(1, datetime_unit)\n', (8555, 8573), True, 'import numpy as np\n'), ((2626, 2640), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (2634, 2640), True, 'import numpy as np\n'), ((4606, 4629), 'xarray.core.duck_array_ops._datetime_nanmin', '_datetime_nanmin', (['array'], {}), '(array)\n', (4622, 4629), False, 'from xarray.core.duck_array_ops import _datetime_nanmin\n'), ((2791, 2822), 'numpy.zeros_like', 'np.zeros_like', (['data'], {'dtype': 'bool'}), '(data, dtype=bool)\n', (2804, 2822), True, 'import numpy as np\n'), ((8404, 8434), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float64'}), '([], dtype=np.float64)\n', (8412, 8434), True, 'import numpy as np\n'), ((8801, 8814), 'numpy.dtype', 'np.dtype', (['"""O"""'], {}), "('O')\n", (8809, 8814), True, 'import numpy as np\n'), ((2968, 2983), 'pandas.isnull', 'pd.isnull', (['data'], {}), '(data)\n', (2977, 2983), True, 'import pandas as pd\n'), ((5674, 5706), 'numpy.timedelta64', 'np.timedelta64', (['(1)', 'datetime_unit'], {}), '(1, datetime_unit)\n', (5688, 5706), True, 'import numpy as np\n'), ((6698, 6720), 'pandas.to_timedelta', 'pd.to_timedelta', (['value'], {}), '(value)\n', (6713, 6720), True, 'import pandas as pd\n')] |
import numpy as np
def run_optimizer(opt, cost_f, iterations, *args, **kwargs):
errors = [cost_f.eval(cost_f.x_start, cost_f.y_start)]
xs,ys= [cost_f.x_start],[cost_f.y_start]
for epochs in range(iterations):
x, y= opt.step(*args, **kwargs)
xs.append(x)
ys.append(y)
errors.append(cost_f.eval(x,y))
distance = np.sqrt((np.array(xs)-cost_f.x_optimum)**2 + (np.array(ys)-cost_f.y_optimum)**2)
return errors, distance, xs, ys
class Optimizer:
def __init__(self, cost_f, lr, x, y, **kwargs):
self.lr = lr
self.cost_f = cost_f
if x==None or y==None:
self.x = self.cost_f.x_start
self.y = self.cost_f.y_start
else:
self.x = x
self.y = y
self.__dict__.update(kwargs)
def step(self, lr):
raise NotImplementedError() | [
"numpy.array"
] | [((369, 381), 'numpy.array', 'np.array', (['xs'], {}), '(xs)\n', (377, 381), True, 'import numpy as np\n'), ((406, 418), 'numpy.array', 'np.array', (['ys'], {}), '(ys)\n', (414, 418), True, 'import numpy as np\n')] |
# Filename: preprocessing.py
# Authors: apadin, mgkallit
# Start Date: 3/7/2017
# Last Update: 3/7/2017
"""Helper functions for preprocessing data
scale_features - Scale X matrix to put values in range of about -0.5 to 0.5
auto_regression - Adds n auto-regressive features to the X matrix
"""
#==================== LIBRARIES ====================#
import numpy as np
import pandas as pd
#==================== FUNCTIONS ====================#
def scale_features(X):
"""Scale X matrix to put values in range of about -0.5 to 0.5"""
X_scaled = (X - X.mean(0)) / (X.max(0) - X.min(0))
return np.nan_to_num(X_scaled)
def add_auto_regression(X, y, n):
"""Adds n auto-regressive features to the X matrix"""
for roll_value in xrange(n):
y = np.roll(y, 1)
y[0] = 0
X = np.concatenate((X, y), 1)
return X
def filter_low_variance(df):
"""
Filter features with little or no variance
Returns a new dataframe and list of features removed
"""
removed_list = []
for column in df.columns:
values = df[column].values
if (values.max() == values.min()):
df = df.drop(column, 1)
removed_list.append(column)
return df, removed_list
| [
"numpy.concatenate",
"numpy.roll",
"numpy.nan_to_num"
] | [((616, 639), 'numpy.nan_to_num', 'np.nan_to_num', (['X_scaled'], {}), '(X_scaled)\n', (629, 639), True, 'import numpy as np\n'), ((783, 796), 'numpy.roll', 'np.roll', (['y', '(1)'], {}), '(y, 1)\n', (790, 796), True, 'import numpy as np\n'), ((826, 851), 'numpy.concatenate', 'np.concatenate', (['(X, y)', '(1)'], {}), '((X, y), 1)\n', (840, 851), True, 'import numpy as np\n')] |
'''
<NAME>
Python version: 3.6
Conway's Game of life
'''
import numpy
import math
def get_generation(cells, generations):
#_ the direction of adjacent cells
adj = ((-2, -2), (-2, -1), (-2, 0), (-1, -2), (-1, 0),
(0, -2), (0, -1), (0, 0))
def status(cells, cur):
print("\ngeneration{0}\n".format(cur), cells)
if not generations or len(cells) < 1 or cur == generations:
return cells
#_ expand 1 cells in each border
#_ 1 for live cells, -1 for dead cells
h, w = len(cells), len(cells[0])
next_cells = numpy.full((h + 2, w + 2), -1, dtype = numpy.int8)
next_cells[1: -1, 1: -1] = cells[:]
#_ new height, width of next generation
nh, nw = -math.inf, -math.inf
min_h, min_w = math.inf, math.inf
for row in range(len(next_cells)):
for col in range(len(next_cells[0])):
#_ calculate how many adj live cells
#_ next_cells[i + 1][j + 1] = cells[i][j]
for r, c in adj:
if (-1 < row + r < h and -1 < col + c < w and
cells[row + r, col + c]):
next_cells[row, col] *= 2
#_ cells that have 3+ live neighbors will die
if next_cells[row, col] in (16, -16):
next_cells[row, col] = 0
break
#_ check next status of cell by its value
#_ update range of width, height after trim empty row/ col
if next_cells[row, col] in (4, 8, -8):
nh, min_h = max(nh, row), min(min_h, row)
nw, min_w = max(nw, col), min(min_w, col)
next_cells[row, col] = 1
else:
next_cells[row, col] = 0
#_ if no live cells, cells = []
#_ else trim the empty rows/ cols of next generation
cells = ([] if min_h == min_w == -nh == -nw == math.inf
else next_cells[min_h: nh + 1, min_w: nw + 1])
status(cells, cur + 1)
return status(cells, 0)
#_ test
cells = numpy.random.randint(2, size=(3, 5))
get_generation(cells, 5)
| [
"numpy.full",
"numpy.random.randint"
] | [((2251, 2287), 'numpy.random.randint', 'numpy.random.randint', (['(2)'], {'size': '(3, 5)'}), '(2, size=(3, 5))\n', (2271, 2287), False, 'import numpy\n'), ((616, 664), 'numpy.full', 'numpy.full', (['(h + 2, w + 2)', '(-1)'], {'dtype': 'numpy.int8'}), '((h + 2, w + 2), -1, dtype=numpy.int8)\n', (626, 664), False, 'import numpy\n')] |
import zmq
import sys
import math
import numpy
class Broker:
context = zmq.Context()
router = context.socket(zmq.ROUTER)
#poller = zmq.Poller()
p = 0
def __init__(self, n):
self.op = {"WorkDone":self.serverResponse, "serverFREE":self.serverFree}
self.router.bind("tcp://*:5000")
#elementos para realizar la operacion
self.n = n
self.bestK = None
self.obtenido = {}
#self.colaK = [1, self.n//2, self.n]
#self.colaK = [1, numpy.random.randint(2,self.n/2),numpy.random.randint(self.n/2,self.n + 1)]
self.colaK = [1,int(self.n/8), int(self.n/4 + 1)]
self.cantKCalculate = 0
def serverResponse(self, data):
print("el servidor acabo de evaluar un k")
print(data[1])
print(data)
#guardar el resultado en una estructura de datos, evaluar estructura de datos
kObtenido = int(data[2].decode())
ssdobtenido = float(data[3].decode())
if kObtenido in self.obtenido:
print("el k ya habia sido calculado")
else:
self.obtenido[kObtenido] = ssdobtenido
print("obtenido k: " , kObtenido, "su ssd:", ssdobtenido)
self.cantKCalculate += 1
def serverFree(self, data):
print("un servidor esta libre se le va asignar un k para que trabaje")
print(data[1])
#validar si no tengo que conseguir mas k
#enviar un mensaje diciendole al sever que termino para que no mande mas trabajo
#sacar k que necesito pedir de alguna estructura de datos
#msj = None
if self.cantKCalculate <= math.sqrt(self.n):
if len(self.colaK): #hay elementos para enviar
ktocalc = self.colaK.pop(0)
msj = [data[0], b'KMEANS', str(ktocalc).encode()]#, b"probando"]
self.router.send_multipart(msj)
#self.p += 1#tengo un k adicional
else:#espere que no hay trabajo
msj = [data[0], b'WAIT', b"0"]#, b"probando"]
self.router.send_multipart(msj)
#self.p += 1#tengo un k adicional
else:
print("ha finalizado el proceso no puedo enviar mas")
msj = [data[0], b'Finish', b"0"]#, b"probando"]
self.router.send_multipart(msj)
#self.p += 1
def run(self):
print("Running the server Broker....")
while True:#cambiar esto hasta que el numero que K que haya pedido sea raiz de n
#print("revisando si un server ha solicitado")
if self.router.poll(100):
print("-----------un servidor ha hecho una solicitud--------------")
msj = self.router.recv_multipart()
#print("lo que me envia el server:", msj[1])
self.op[msj[1].decode()](msj)
#validar los k que tengo si son suficientes
#validar el k apropiado hasta este momento
#agregar a una cola que K's voy a pedir
if len(list(self.obtenido.keys())) >= 3:
print("calculando elbow")
a,b,c = self.elbow2()
print("k a buscar", a,b,c)
try:
self.colaK.append(numpy.random.randint(a,b+1))
self.colaK.append(numpy.random.randint(b, c+1))
#self.colaK.append(numpy.random.randint(1, 3000))
#self.colaK.append(numpy.random.randint(1, 3000))
except Exception as e:
print("hubo un erro y no se peuden agregar k a la cola")
#self.colaK.append(numpy.random.randint(l,m+1))
#self.colaK.append(numpy.random.randint(m, r+1))
#distribuciones y agregar a la cola de k
print("el mejor k hasta el momento:" , self.bestK)
def dist(self, x, y):
return math.sqrt(x*x + y*y)
def calculoTheta(self, x1, y1, x2, y2) :
var = (x1*x2+y2*y2)/(self.dist(x1, y1)*self.dist(x2, y2))
print("el valor a calcular en el acos", var)
if var > 1:
var = 1
if var < -1:
var = -1
res = math.acos(var)
print("el valor del theta calculado es:", res)
return res
def elbow2(self):
listaOrdenada = list(self.obtenido.keys())#los value represetan los y
listaOrdenada.sort()#tomo las llaves que representan los x
l = 0
r = len(listaOrdenada) - 1
k = (l+r)>>1#dividir entre dos
theta = self.calculoTheta(listaOrdenada[l]-listaOrdenada[k],
self.obtenido[listaOrdenada[l]] - self.obtenido[listaOrdenada[k]],
listaOrdenada[r]-listaOrdenada[k],
self.obtenido[listaOrdenada[r]] - self.obtenido[listaOrdenada[k]])
flag = True
while flag:
flag = False
midI = math.ceil((k+l)/2)#techo
midD = math.floor((k+r)/2)
thetaD = 4
thetaI = 4
orientation = 0
if midI < k:
thetaI = self.calculoTheta(listaOrdenada[l]-listaOrdenada[midI],
self.obtenido[listaOrdenada[l]] - self.obtenido[listaOrdenada[midI]],
listaOrdenada[k]-listaOrdenada[midI],
self.obtenido[listaOrdenada[k]] - self.obtenido[listaOrdenada[midI]])
if midD > k:
thetaD = self.calculoTheta(listaOrdenada[k]-listaOrdenada[midD],
self.obtenido[listaOrdenada[k]] - self.obtenido[listaOrdenada[midD]],
listaOrdenada[r]-listaOrdenada[midD],
self.obtenido[listaOrdenada[r]] - self.obtenido[listaOrdenada[midD]])
#validar primero si los id son validos
if (thetaD < theta) or (thetaI < theta):
#tanteo las thetas xD
print("posiciones")
print(l)
print(k)
print(r)
if thetaD < thetaI:
print("derecha")
print("mid", midD)
flag = True
theta = thetaD
l = k
k = midD
self.bestK = listaOrdenada[k]
orientation = 0
else:
print("izquierda")
print("mid", midI)
flag = True
theta = thetaI
r = k
k = midI
self.bestK = listaOrdenada[k]
orientation = 1
print("posiciones actualizadas")
print(l)
print(k)
print(r)
"""if orientation:
return listaOrdenada[k], listaOrdenada[r]
else:
return listaOrdenada[l], listaOrdenada[k]"""
print(listaOrdenada)
return listaOrdenada[l], listaOrdenada[k], listaOrdenada[r]
def elbow(self):
listaOrdenada = list(self.obtenido.keys())#los value represetan los y
listaOrdenada.sort()#tomo las llaves que representan los x
l = 0
r = len(listaOrdenada) - 1
k = (l+r)>>1#dividir entre dos
self.bestK = k
# En la posicion 0 esta el 'x' y en la posicion 1 esta el 'y'
# calculamos el theta inicial
#theta = calculoTheta(listaOrdenada[l][0]-listaOrdenada[k][0], listaOrdenada[l][1]-listaOrdenada[k][1],listaOrdenada[r][0]-listaOrdenada[k][0], listaOrdenada[r][1]-listaOrdenada[k][1])
theta = self.calculoTheta(listaOrdenada[l]-listaOrdenada[k],
self.obtenido[listaOrdenada[l]] - self.obtenido[listaOrdenada[k]],
listaOrdenada[r]-listaOrdenada[k],
self.obtenido[listaOrdenada[r]] - self.obtenido[listaOrdenada[k]])
print("valor de thetha", theta)
flag = True
while(flag) :
flag = False
#mid = (k+r)>>1#piso
mid = math.floor((k+r)/2)
print("el valor de mid", mid)
print("el valor de r", r)
print("el valor de k", k)
print("el valor de l", l)
print(listaOrdenada)
print(list(self.obtenido.items()))
#auxmid = 0
#k mid r
# calculamos el theta temp por el lado derecho
temp = self.calculoTheta(listaOrdenada[k]-listaOrdenada[mid],
self.obtenido[listaOrdenada[k]] - self.obtenido[listaOrdenada[mid]],
listaOrdenada[r]-listaOrdenada[mid],
self.obtenido[listaOrdenada[r]] - self.obtenido[listaOrdenada[mid]])
# Comprobamos si el theta temp es menor que el tetha actual
if(theta > temp) :
flag = True
theta = temp
l = k
k = mid
self.bestK = k
mid = math.ceil((k+l)/2)#techo
# calculamos el theta temp por el lado izquierdo
#temp = calculoTheta(listaOrdenada[l][0]-listaOrdenada[mid][0], listaOrdenada[l][1]-listaOrdenada[mid][1],
#listaOrdenada[k][0]-listaOrdenada[mid][0], listaOrdenada[k][1]-listaOrdenada[mid][1])
temp = self.calculoTheta(listaOrdenada[l]-listaOrdenada[mid],
self.obtenido[listaOrdenada[l]] - self.obtenido[listaOrdenada[mid]],
listaOrdenada[k]-listaOrdenada[mid],
self.obtenido[listaOrdenada[k]] - self.obtenido[listaOrdenada[mid]])
# comprobamos si el theta es menor
if(theta > temp) :
flag = True
theta = temp
r = k
k = mid
self.bestK = k
#l2,k5,r9
return l,k,r
if __name__ == '__main__':
cantPoints = int(sys.argv[1])
print("cantidad de puntos:", cantPoints)
b = Broker(cantPoints)
b.run()
| [
"math.ceil",
"math.floor",
"math.acos",
"math.sqrt",
"numpy.random.randint",
"zmq.Context"
] | [((77, 90), 'zmq.Context', 'zmq.Context', ([], {}), '()\n', (88, 90), False, 'import zmq\n'), ((3913, 3937), 'math.sqrt', 'math.sqrt', (['(x * x + y * y)'], {}), '(x * x + y * y)\n', (3922, 3937), False, 'import math\n'), ((4196, 4210), 'math.acos', 'math.acos', (['var'], {}), '(var)\n', (4205, 4210), False, 'import math\n'), ((1646, 1663), 'math.sqrt', 'math.sqrt', (['self.n'], {}), '(self.n)\n', (1655, 1663), False, 'import math\n'), ((4973, 4995), 'math.ceil', 'math.ceil', (['((k + l) / 2)'], {}), '((k + l) / 2)\n', (4982, 4995), False, 'import math\n'), ((5017, 5040), 'math.floor', 'math.floor', (['((k + r) / 2)'], {}), '((k + r) / 2)\n', (5027, 5040), False, 'import math\n'), ((8287, 8310), 'math.floor', 'math.floor', (['((k + r) / 2)'], {}), '((k + r) / 2)\n', (8297, 8310), False, 'import math\n'), ((9272, 9294), 'math.ceil', 'math.ceil', (['((k + l) / 2)'], {}), '((k + l) / 2)\n', (9281, 9294), False, 'import math\n'), ((3265, 3295), 'numpy.random.randint', 'numpy.random.randint', (['a', '(b + 1)'], {}), '(a, b + 1)\n', (3285, 3295), False, 'import numpy\n'), ((3332, 3362), 'numpy.random.randint', 'numpy.random.randint', (['b', '(c + 1)'], {}), '(b, c + 1)\n', (3352, 3362), False, 'import numpy\n')] |
from cv2 import cv2
import time
import pyautogui
import numpy as np
import mss
from os import listdir
# from run import getBackgroundText
import torch
from random import randint
# example_captcha_img = cv2.imread('images/example.png')
model = torch.hub.load('./captcha', 'custom', "captcha/bomb_captcha.pt", source='local')
def getBackgroundText(img, percent_required):
boxes = []
if type(img) == np.ndarray and percent_required:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
results = model(img, size=416)
digits = []
if results.xyxy[0].shape[0] >= 1:
for box in results.xyxy[0]:
x1, _, _, _, percent, digit = box
if percent >= percent_required:
digits.append({'x':x1.item(), 'd':digit.item()})
def getX(e):
return e['x']
digits.sort(key=getX)
def getD(e):
return str(int(e['d']))
return ''.join(list(map(getD, digits)))
def remove_suffix(input_string, suffix):
if suffix and input_string.endswith(suffix):
return input_string[:-len(suffix)]
return input_string
#TODO tirar duplicata
def load_images():
dir_name = './captcha/images/'
file_names = listdir(dir_name)
targets = {}
for file in file_names:
path = dir_name + file
targets[remove_suffix(file, '.png')] = cv2.imread(path)
return targets
d = load_images()
#TODO tirar duplicata
def positions(target, threshold=0.85,img = None):
if img is None:
img = printSreen()
result = cv2.matchTemplate(img,target,cv2.TM_CCOEFF_NORMED)
w = target.shape[1]
h = target.shape[0]
yloc, xloc = np.where(result >= threshold)
rectangles = []
for (x, y) in zip(xloc, yloc):
rectangles.append([int(x), int(y), int(w), int(h)])
rectangles.append([int(x), int(y), int(w), int(h)])
rectangles, weights = cv2.groupRectangles(rectangles, 1, 0.2)
return rectangles
def getDigits(d,img):
digits = []
for i in range(10):
p = positions(d[str(i)],img=img,threshold=0.95)
if len (p) > 0:
digits.append({'digit':str(i),'x':p[0][0]})
def getX(e):
return e['x']
digits.sort(key=getX)
r = list(map(lambda x : x['digit'],digits))
return(''.join(r))
# getFirstDigits(first)
def printSreen():
with mss.mss() as sct:
monitor = sct.monitors[0]
sct_img = np.array(sct.grab(monitor))
# The screen part to capture
# monitor = {"top": 160, "left": 160, "width": 1000, "height": 135}
# Grab the data
return sct_img[:,:,:3]
def captchaImg(img, pos,w = 500, h = 180):
# path = "./captchas-saved/{}.png".format(str(time.time()))
rx, ry, _, _ = pos
x_offset = -10
y_offset = 89
y = ry + y_offset
x = rx + x_offset
cropped = img[ y : y + h , x: x + w]
return cropped
def position(target, threshold=0.85,img = None):
if img is None:
img = printSreen()
result = cv2.matchTemplate(img,target,cv2.TM_CCOEFF_NORMED)
w = target.shape[1]
h = target.shape[0]
yloc, xloc = np.where(result >= threshold)
rectangles = []
for (x, y) in zip(xloc, yloc):
rectangles.append([int(x), int(y), int(w), int(h)])
rectangles.append([int(x), int(y), int(w), int(h)])
rectangles, weights = cv2.groupRectangles(rectangles, 1, 0.2)
if len(rectangles) > 0:
x,y, w,h = rectangles[0]
return (x+(w/2),y+h/2)
def getSliderPositions(screenshot, popup_pos):
slider = position(d['slider'],img=screenshot,threshold=0.8)
cont = int()
while slider is None:
if cont == 10:
break
print('no slider')
slider = position(d['slider'],img=screenshot,threshold=0.78)
time.sleep(5)
cont += 1
(start_x, start_y) = slider
pyautogui.moveTo(start_x,start_y+randint(0,10),1)
pyautogui.mouseDown()
pyautogui.moveTo(start_x+400,start_y+randint(0,10),1)
screenshot = printSreen()
end = position(d['slider'],img=screenshot,threshold = 0.8)
(end_x, end_y) = end
size = end_x-start_x
increment = size/4
positions = []
for i in range(5):
# pyautogui.moveTo(start_x+increment*pos ,start_y+randint(0,10),1)
positions.append((start_x+increment*i ,start_y+randint(0,10)))
# screenshot = printSreen()
# time.sleep(2)
# pyautogui.mouseUp()
return positions
def solveCaptcha():
screenshot = printSreen()
img = screenshot.copy()
popup_pos = positions(d['robot'],img=img)
print(popup_pos)
if len(popup_pos) == 0:
print('no captcha popup found!')
return
screenshot = printSreen()
img = screenshot.copy()
img = captchaImg(img, popup_pos[0])
digits = getDigits(d, img)
slider_positions = getSliderPositions(screenshot, popup_pos)
# moveSlider(screenshot,3,popup_pos)
for position in slider_positions:
x, y = position
pyautogui.moveTo(x,y,1)
screenshot = printSreen()
popup_pos = positions(d['robot'],img=screenshot)
captcha_img = captchaImg(screenshot, popup_pos[0])
# captcha_img = example_captcha_img
background_digits = getBackgroundText(captcha_img, 0.7)
print( 'dig: {}, background_digits: {}'.format(digits, background_digits))
if digits == background_digits:
print('FOUND!')
pyautogui.mouseUp()
return
else:
pyautogui.mouseUp()
print('NÃO ACHOU!')
if __name__ == '__main__':
solveCaptcha()
#TODO colocar positions em um arquivo separado e importar nos outros.
# tirar o load digits daqui e passar como argumento na funçao
| [
"os.listdir",
"torch.hub.load",
"mss.mss",
"random.randint",
"numpy.where",
"cv2.cv2.imread",
"pyautogui.moveTo",
"pyautogui.mouseUp",
"pyautogui.mouseDown",
"time.sleep",
"cv2.cv2.cvtColor",
"cv2.cv2.matchTemplate",
"cv2.cv2.groupRectangles"
] | [((245, 330), 'torch.hub.load', 'torch.hub.load', (['"""./captcha"""', '"""custom"""', '"""captcha/bomb_captcha.pt"""'], {'source': '"""local"""'}), "('./captcha', 'custom', 'captcha/bomb_captcha.pt', source='local'\n )\n", (259, 330), False, 'import torch\n'), ((1264, 1281), 'os.listdir', 'listdir', (['dir_name'], {}), '(dir_name)\n', (1271, 1281), False, 'from os import listdir\n'), ((1593, 1645), 'cv2.cv2.matchTemplate', 'cv2.matchTemplate', (['img', 'target', 'cv2.TM_CCOEFF_NORMED'], {}), '(img, target, cv2.TM_CCOEFF_NORMED)\n', (1610, 1645), False, 'from cv2 import cv2\n'), ((1710, 1739), 'numpy.where', 'np.where', (['(result >= threshold)'], {}), '(result >= threshold)\n', (1718, 1739), True, 'import numpy as np\n'), ((1944, 1983), 'cv2.cv2.groupRectangles', 'cv2.groupRectangles', (['rectangles', '(1)', '(0.2)'], {}), '(rectangles, 1, 0.2)\n', (1963, 1983), False, 'from cv2 import cv2\n'), ((3050, 3102), 'cv2.cv2.matchTemplate', 'cv2.matchTemplate', (['img', 'target', 'cv2.TM_CCOEFF_NORMED'], {}), '(img, target, cv2.TM_CCOEFF_NORMED)\n', (3067, 3102), False, 'from cv2 import cv2\n'), ((3167, 3196), 'numpy.where', 'np.where', (['(result >= threshold)'], {}), '(result >= threshold)\n', (3175, 3196), True, 'import numpy as np\n'), ((3401, 3440), 'cv2.cv2.groupRectangles', 'cv2.groupRectangles', (['rectangles', '(1)', '(0.2)'], {}), '(rectangles, 1, 0.2)\n', (3420, 3440), False, 'from cv2 import cv2\n'), ((3973, 3994), 'pyautogui.mouseDown', 'pyautogui.mouseDown', ([], {}), '()\n', (3992, 3994), False, 'import pyautogui\n'), ((456, 492), 'cv2.cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (468, 492), False, 'from cv2 import cv2\n'), ((1405, 1421), 'cv2.cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (1415, 1421), False, 'from cv2 import cv2\n'), ((2399, 2408), 'mss.mss', 'mss.mss', ([], {}), '()\n', (2406, 2408), False, 'import mss\n'), ((3839, 3852), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (3849, 3852), False, 'import time\n'), ((5062, 5087), 'pyautogui.moveTo', 'pyautogui.moveTo', (['x', 'y', '(1)'], {}), '(x, y, 1)\n', (5078, 5087), False, 'import pyautogui\n'), ((5565, 5584), 'pyautogui.mouseUp', 'pyautogui.mouseUp', ([], {}), '()\n', (5582, 5584), False, 'import pyautogui\n'), ((3952, 3966), 'random.randint', 'randint', (['(0)', '(10)'], {}), '(0, 10)\n', (3959, 3966), False, 'from random import randint\n'), ((4036, 4050), 'random.randint', 'randint', (['(0)', '(10)'], {}), '(0, 10)\n', (4043, 4050), False, 'from random import randint\n'), ((5508, 5527), 'pyautogui.mouseUp', 'pyautogui.mouseUp', ([], {}), '()\n', (5525, 5527), False, 'import pyautogui\n'), ((4397, 4411), 'random.randint', 'randint', (['(0)', '(10)'], {}), '(0, 10)\n', (4404, 4411), False, 'from random import randint\n')] |
import numpy as np
import cv2
from enum import Enum
class Models(Enum):
ssd_lite = 'ssd_lite'
tiny_yolo = 'tiny_yolo'
tf_lite = 'tf_lite'
def __str__(self):
return self.value
@staticmethod
def from_string(s):
try:
return Models[s]
except KeyError:
raise ValueError()
MAX_AREA = 0.019 # max area from train set
RATIO_MEAN = 4.17
RATIO_STD = 1.06
def load_image_into_numpy_array(image_path):
image = cv2.imread(image_path)
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
def affine_tile_corners(x0, y0, theta, wp, hp):
"""
Find corners of tile defined by affine transformation.
Find corners in original image for tile defined by affine transformation,
i.e. a rotation and translation, given (x0, y0) the upper left corner of
the tile, theta, the rotation angle of the tile in degrees, and the tile
width wp, and height hp.
Args:
x0 Horizontal coordinate of tile upper left corner (pixels)
y0 Vertical coordinate of tile upper left corner (pixels)
theta Rotation angle (degrees clockwise from vertical)
wp Tile width (pixels)
hp Tile height (pixels)
Returns:
corners Corner points, in clockwise order starting from upper left
corner, ndarray size (4, 2)
"""
rot_angle = np.radians(theta)
corners = np.array(
[[x0, y0],
[x0 + wp * np.cos(rot_angle), y0 + wp * np.sin(rot_angle)],
[x0 + wp * np.cos(rot_angle) - hp * np.sin(rot_angle),
y0 + wp * np.sin(rot_angle) + hp * np.cos(rot_angle)],
[x0 - hp * np.sin(rot_angle), y0 + hp * np.cos(rot_angle)]])
return corners
def tile_images(tiling_params, img):
res = []
original_sizes = []
offset = []
for cur_pt, cur_theta, cur_multiplier in zip(
tiling_params["upper_left_pts"],
tiling_params["thetas"],
tiling_params["multipliers"]):
cur_x0, cur_y0 = cur_pt
corners = affine_tile_corners(
cur_x0, cur_y0, cur_theta,
int(cur_multiplier * tiling_params["wp"]),
int(cur_multiplier * tiling_params["hp"])).astype(int)
top = min(corners[:, 1])
left = min(corners[:, 0])
bottom = max(corners[:, 1])
right = max(corners[:, 0])
h = bottom - top
w = right - left
tile = np.zeros((h, w, 3)).astype(np.uint8)
# crop tile from image
tmp = img[top: bottom, left: right]
tile[:tmp.shape[0], :tmp.shape[1], :3] = tmp
# resize the tile
tile = cv2.resize(tile, (tiling_params["wp"], tiling_params["hp"]),
interpolation=cv2.INTER_NEAREST)
# rotate the tile
image_center = tuple(np.array(tile.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, cur_theta, 1.0)
tmp = cv2.warpAffine(tile, rot_mat, (tile.shape[1::-1]),
flags=cv2.INTER_LINEAR)
original_sizes.append((bottom - top, right - left))
offset.append((top, left))
res.append(tmp)
return res, original_sizes, offset
def rotate_points(points, rotation_matrix):
# add ones
points_ones = np.append(points, 1)
# transform points
transformed_points = rotation_matrix.dot(points_ones)
return transformed_points# [:,::-1]
def split_img(img, m, n):
h, w, _ = img.shape
tile_h = h // m
tile_w = w // n
padding_h = tile_h // 10
padding_w = int(tile_w * 0.15)
res = []
original_sizes = []
offset = []
for i in range(0, m):
top = i * tile_h
bottom = min(h, (i + 1) * tile_h + padding_h)
for j in range(0, n):
left = j * tile_w
right = min(w, (j + 1) * tile_w + padding_w)
original_sizes.append((bottom - top, right - left))
offset.append((top, left))
res.append(cv2.resize(img[top: bottom, left: right, :],
(tile_w, tile_h),
interpolation=cv2.INTER_NEAREST))
return res, original_sizes, offset
def get_global_coord(point, img_size, original_size, offset):
return [int(point[0] / img_size[1] * original_size[1] + offset[1]), \
int(point[1] / img_size[0] * original_size[0] + offset[0])]
def non_max_suppression_fast(boxes, labels, overlap_thresh=0.5):
# if there are no boxes, return an empty list
boxes = np.array(boxes)
if len(boxes) == 0:
return [], []
# initialize the list of picked indexes
pick = []
# grab the coordinates of the bounding boxes
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
# compute the area of the bounding boxes and sort the bounding
# boxes by the bottom-right y-coordinate of the bounding box
area = (x2 - x1 + 1) * (y2 - y1 + 1)
idxs = np.argsort(y2)
# keep looping while some indexes still remain in the indexes
# list
while len(idxs) > 0:
# grab the last index in the indexes list and add the
# index value to the list of picked indexes
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
# find the largest (x, y) coordinates for the start of
# the bounding box and the smallest (x, y) coordinates
# for the end of the bounding box
xx1 = np.maximum(x1[i], x1[idxs[:last]])
yy1 = np.maximum(y1[i], y1[idxs[:last]])
xx2 = np.minimum(x2[i], x2[idxs[:last]])
yy2 = np.minimum(y2[i], y2[idxs[:last]])
# compute the width and height of the bounding box
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
# compute the ratio of overlap
overlap = 1. * (w * h) / area[idxs[:last]]
# delete all indexes from the index list that have
idxs = np.delete(idxs, np.concatenate(
([last], np.where(overlap > overlap_thresh)[0])))
# return only the bounding boxes that were picked using the
# integer data type
return boxes[pick], [labels[i] for i in pick]
def filter_bb_by_size(bbs, labels, img_area):
res_bbs = []
res_labels = []
for bb, l in zip(bbs, labels):
s = (bb[2] - bb[0]) * (bb[3] - bb[1]) / img_area
r = (bb[3] - bb[1]) / (bb[2] - bb[0])
if s < MAX_AREA * 1.1 and RATIO_MEAN - 3 * RATIO_MEAN < r < RATIO_MEAN + 3 * RATIO_MEAN:
res_bbs.append(bb)
res_labels.append(l)
return res_bbs, res_labels
| [
"numpy.radians",
"cv2.warpAffine",
"numpy.minimum",
"numpy.where",
"numpy.sin",
"numpy.append",
"numpy.array",
"numpy.argsort",
"numpy.zeros",
"numpy.cos",
"cv2.cvtColor",
"numpy.maximum",
"cv2.getRotationMatrix2D",
"cv2.resize",
"cv2.imread"
] | [((483, 505), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (493, 505), False, 'import cv2\n'), ((517, 555), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (529, 555), False, 'import cv2\n'), ((1412, 1429), 'numpy.radians', 'np.radians', (['theta'], {}), '(theta)\n', (1422, 1429), True, 'import numpy as np\n'), ((3310, 3330), 'numpy.append', 'np.append', (['points', '(1)'], {}), '(points, 1)\n', (3319, 3330), True, 'import numpy as np\n'), ((4554, 4569), 'numpy.array', 'np.array', (['boxes'], {}), '(boxes)\n', (4562, 4569), True, 'import numpy as np\n'), ((4994, 5008), 'numpy.argsort', 'np.argsort', (['y2'], {}), '(y2)\n', (5004, 5008), True, 'import numpy as np\n'), ((2672, 2770), 'cv2.resize', 'cv2.resize', (['tile', "(tiling_params['wp'], tiling_params['hp'])"], {'interpolation': 'cv2.INTER_NEAREST'}), "(tile, (tiling_params['wp'], tiling_params['hp']), interpolation=\n cv2.INTER_NEAREST)\n", (2682, 2770), False, 'import cv2\n'), ((2899, 2952), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['image_center', 'cur_theta', '(1.0)'], {}), '(image_center, cur_theta, 1.0)\n', (2922, 2952), False, 'import cv2\n'), ((2967, 3039), 'cv2.warpAffine', 'cv2.warpAffine', (['tile', 'rot_mat', 'tile.shape[1::-1]'], {'flags': 'cv2.INTER_LINEAR'}), '(tile, rot_mat, tile.shape[1::-1], flags=cv2.INTER_LINEAR)\n', (2981, 3039), False, 'import cv2\n'), ((5484, 5518), 'numpy.maximum', 'np.maximum', (['x1[i]', 'x1[idxs[:last]]'], {}), '(x1[i], x1[idxs[:last]])\n', (5494, 5518), True, 'import numpy as np\n'), ((5533, 5567), 'numpy.maximum', 'np.maximum', (['y1[i]', 'y1[idxs[:last]]'], {}), '(y1[i], y1[idxs[:last]])\n', (5543, 5567), True, 'import numpy as np\n'), ((5582, 5616), 'numpy.minimum', 'np.minimum', (['x2[i]', 'x2[idxs[:last]]'], {}), '(x2[i], x2[idxs[:last]])\n', (5592, 5616), True, 'import numpy as np\n'), ((5631, 5665), 'numpy.minimum', 'np.minimum', (['y2[i]', 'y2[idxs[:last]]'], {}), '(y2[i], y2[idxs[:last]])\n', (5641, 5665), True, 'import numpy as np\n'), ((5738, 5766), 'numpy.maximum', 'np.maximum', (['(0)', '(xx2 - xx1 + 1)'], {}), '(0, xx2 - xx1 + 1)\n', (5748, 5766), True, 'import numpy as np\n'), ((5779, 5807), 'numpy.maximum', 'np.maximum', (['(0)', '(yy2 - yy1 + 1)'], {}), '(0, yy2 - yy1 + 1)\n', (5789, 5807), True, 'import numpy as np\n'), ((2464, 2483), 'numpy.zeros', 'np.zeros', (['(h, w, 3)'], {}), '((h, w, 3))\n', (2472, 2483), True, 'import numpy as np\n'), ((2848, 2875), 'numpy.array', 'np.array', (['tile.shape[1::-1]'], {}), '(tile.shape[1::-1])\n', (2856, 2875), True, 'import numpy as np\n'), ((4011, 4109), 'cv2.resize', 'cv2.resize', (['img[top:bottom, left:right, :]', '(tile_w, tile_h)'], {'interpolation': 'cv2.INTER_NEAREST'}), '(img[top:bottom, left:right, :], (tile_w, tile_h), interpolation=\n cv2.INTER_NEAREST)\n', (4021, 4109), False, 'import cv2\n'), ((1493, 1510), 'numpy.cos', 'np.cos', (['rot_angle'], {}), '(rot_angle)\n', (1499, 1510), True, 'import numpy as np\n'), ((1522, 1539), 'numpy.sin', 'np.sin', (['rot_angle'], {}), '(rot_angle)\n', (1528, 1539), True, 'import numpy as np\n'), ((1587, 1604), 'numpy.sin', 'np.sin', (['rot_angle'], {}), '(rot_angle)\n', (1593, 1604), True, 'import numpy as np\n'), ((1651, 1668), 'numpy.cos', 'np.cos', (['rot_angle'], {}), '(rot_angle)\n', (1657, 1668), True, 'import numpy as np\n'), ((1691, 1708), 'numpy.sin', 'np.sin', (['rot_angle'], {}), '(rot_angle)\n', (1697, 1708), True, 'import numpy as np\n'), ((1720, 1737), 'numpy.cos', 'np.cos', (['rot_angle'], {}), '(rot_angle)\n', (1726, 1737), True, 'import numpy as np\n'), ((6027, 6061), 'numpy.where', 'np.where', (['(overlap > overlap_thresh)'], {}), '(overlap > overlap_thresh)\n', (6035, 6061), True, 'import numpy as np\n'), ((1562, 1579), 'numpy.cos', 'np.cos', (['rot_angle'], {}), '(rot_angle)\n', (1568, 1579), True, 'import numpy as np\n'), ((1626, 1643), 'numpy.sin', 'np.sin', (['rot_angle'], {}), '(rot_angle)\n', (1632, 1643), True, 'import numpy as np\n')] |
from datetime import datetime
import numpy as np
import pandas as pd
from sklearn.utils import shuffle
from data_process import data_process_utils
from data_process.census_process.census_data_creation_config import census_data_creation
from data_process.census_process.census_degree_process_utils import consistentize_census9495_columns, \
numericalize_census9495_data, standardize_census_data
from data_process.census_process.mapping_resource import cate_to_index_map, continuous_cols, categorical_cols, \
target_col_name
# follow link provides description on columns of Census Income Dataset:
# https://docs.1010data.com/Tutorials/MachineLearningExamples/CensusIncomeDataSet.html
def get_timestamp():
return int(datetime.utcnow().timestamp())
CENSUS_COLUMNS = ["age", "class_worker", "det_ind_code", "det_occ_code", "education",
"wage_per_hour", "hs_college", "marital_stat", "major_ind_code", "major_occ_code",
"race", "hisp_origin", "gender", "union_member", "unemp_reason", "full_or_part_emp",
"capital_gain", "capital_loss", "stock_dividends", "tax_filer_stat",
"region_prev_res", "state_prev_res", "det_hh_fam_stat", "det_hh_summ", "instance_weight",
"mig_chg_msa", "mig_chg_reg", "mig_move_reg", "mig_same", "mig_prev_sunbelt",
"num_emp", "fam_under_18", "country_father", "country_mother", "country_self",
"citizenship", "own_or_self", "vet_question", "vet_benefits", "weeks_worked",
"year", "income_label"]
RERANGED_CENSUS_COLUMNS_NEW = ["age", "gender_index", "age_index", "class_worker", "det_ind_code", "det_occ_code",
"education",
"education_year", "wage_per_hour", "hs_college", "marital_stat", "major_ind_code",
"major_occ_code", "race", "hisp_origin", "gender", "union_member", "unemp_reason",
"full_or_part_emp", "capital_gain", "capital_loss", "stock_dividends", "tax_filer_stat",
"region_prev_res", "state_prev_res", "det_hh_fam_stat", "det_hh_summ", "instance_weight",
"mig_chg_msa", "mig_chg_reg", "mig_move_reg", "mig_same", "mig_prev_sunbelt",
"num_emp", "fam_under_18", "country_father", "country_mother", "country_self",
"citizenship", "own_or_self", "vet_question", "vet_benefits", "weeks_worked",
"year", "income_label"]
def process(data_path, to_dir=None, train=True):
census = pd.read_csv(data_path, names=CENSUS_COLUMNS, skipinitialspace=True)
print("[INFO] load {} data".format("train" if train else "test"))
print("[INFO] load data with shape:", census.shape)
appendix = "_train" if train else "_test"
extension = ".csv"
appendix = appendix + extension
print("[INFO] consistentize original data")
c_census = consistentize_census9495_columns(census)
c_census.to_csv(to_dir + 'consistentized_census9495' + appendix, header=True, index=False)
print("[INFO] numericalize data")
p_census = numericalize_census9495_data(c_census, cate_to_index_map)
return p_census
def compute_instance_prob(data_frame):
weight_sum = data_frame["instance_weight"].sum()
data_frame["instance_weight"] = data_frame["instance_weight"] / weight_sum
def create_file_appendix(train):
appendix = "_train" if train else "_valid"
extension = ".csv"
return appendix + extension
def create_degree_src_tgt_data(p_census,
from_dir,
to_dir,
data_tag,
pos_ratio,
num_all,
train=True,
grad_train_scaler=None,
undergrad_train_scaler=None,
grad_census_test_values=None,
save_intermediate_tables=False):
appendix = create_file_appendix(train)
print("====================== create_degree_source_target_data for {} data ======================"
.format("train" if train else "valid"))
# form source and target domain data
doctorate_census = p_census[p_census['education'] == 11]
master_census = p_census[(p_census['education'] == 9) | (p_census['education'] == 10)]
undergrad_census = p_census[
(p_census['education'] != 9) & (p_census['education'] != 10) & (p_census['education'] != 11)]
columns = continuous_cols + categorical_cols + ['instance_weight', target_col_name]
doctorate_census = doctorate_census[columns]
master_census = master_census[columns]
undergrad_census = undergrad_census[columns]
print("[INFO] doctorate_census shape", doctorate_census.shape)
print("[INFO] master_census shape", master_census.shape)
print("[INFO] undergrad_census shape", undergrad_census.shape)
if save_intermediate_tables:
doctorate_census.to_csv(to_dir + 'doctorate_census9495' + appendix, header=True, index=False)
master_census.to_csv(to_dir + 'master_census9495' + appendix, header=True, index=False)
undergrad_census.to_csv(to_dir + 'undergrad_census9495' + appendix, header=True, index=False)
doctorate_census = pd.read_csv(from_dir + 'doctorate_census9495' + appendix, skipinitialspace=True)
master_census = pd.read_csv(from_dir + 'master_census9495' + appendix, skipinitialspace=True)
undergrad_census = pd.read_csv(from_dir + 'undergrad_census9495' + appendix, skipinitialspace=True)
doctorate_census_values = doctorate_census[columns].values
master_census_values = master_census[columns].values
undergrad_census_values = undergrad_census[columns].values
# doctor and master form the source domain
grad_census_values = np.concatenate([doctorate_census_values, master_census_values], axis=0)
grad_census_values = shuffle(grad_census_values)
grad_census_df_for_da = pd.DataFrame(data=grad_census_values, columns=columns)
# undergraduate form the target domain
undergrad_census_values = shuffle(undergrad_census_values)
undergrad_census_df = pd.DataFrame(data=undergrad_census_values, columns=columns)
_, grad_train_scaler = standardize_census_data(grad_census_df_for_da, continuous_cols, grad_train_scaler)
_, udgrad_train_scaler = standardize_census_data(undergrad_census_df, continuous_cols, undergrad_train_scaler)
grad_census_df_1 = grad_census_df_for_da[grad_census_df_for_da[target_col_name] == 1]
grad_census_df_0 = grad_census_df_for_da[grad_census_df_for_da[target_col_name] == 0]
undergrad_census_df_1 = undergrad_census_df[undergrad_census_df[target_col_name] == 1]
undergrad_census_df_0 = undergrad_census_df[undergrad_census_df[target_col_name] == 0]
print("[INFO] (orig) (target) grad_census_df_1 shape:", grad_census_df_1.shape)
print("[INFO] (orig) (target) grad_census_df_0 shape:", grad_census_df_0.shape)
print("[INFO] (orig) (source) undergrad_census_df_1 shape:", undergrad_census_df_1.shape)
print("[INFO] (orig) (source) undergrad_census_df_0 shape:", undergrad_census_df_0.shape)
grad_census_for_test = None
test_pos_ratio = 0.5
if train:
num_pos = int(num_all * pos_ratio)
num_neg = int(num_all * (1 - pos_ratio))
print(f"[INFO] train num_pos:{num_pos}")
print(f"[INFO] train num_neg:{num_neg}")
# get labeled target data for supervised training
grad_census_values_1 = grad_census_df_1.values[0:num_pos]
grad_census_values_0 = grad_census_df_0.values[0:num_neg]
grad_census_values_for_supervise = shuffle(np.concatenate((grad_census_values_1, grad_census_values_0), axis=0))
print(f"[INFO] grad train positive samples range:[0:{num_pos}].")
print(f"[INFO] grad train negative samples range:[0:{num_neg}].")
print(f"[INFO] grad train all samples shape:{grad_census_values_for_supervise.shape}.")
num_pos_for_test = int((grad_census_df_0.shape[0] - num_all) * test_pos_ratio)
grad_census_test_values_1 = grad_census_df_1.values[num_pos:num_pos + num_pos_for_test]
grad_census_test_values_0 = grad_census_df_0.values[num_all:]
print(f"[INFO] => grad left_data for test # of positive samples:{num_pos_for_test}")
print(f"[INFO] => grad left-data for test pos samples range:[{num_pos}:{num_pos + num_pos_for_test}].")
print(f"[INFO] => grad left-data for test pos samples shape:{grad_census_test_values_1.shape}")
print(f"[INFO] => grad left-data for test neg samples range:[{num_all}:-1].")
print(f"[INFO] => grad left-data for test neg samples shape:{grad_census_test_values_0.shape}")
grad_census_for_test = np.concatenate([grad_census_test_values_1, grad_census_test_values_0], axis=0)
print(f"[INFO] => grad left-data for test shape: {grad_census_for_test.shape}")
else:
# num_pos = int((grad_census_df_0.shape[0] + grad_census_df_0.shape[1]) * test_pos_ratio)
# grad_census_values_1 = grad_census_df_1.values[:num_pos]
grad_census_values_1 = grad_census_df_1.values
grad_census_values_0 = grad_census_df_0.values
grad_census_values_for_supervise = shuffle(
np.concatenate((grad_census_values_1, grad_census_values_0, grad_census_test_values), axis=0))
print(f"[INFO] grad test pos samples shape:{grad_census_values_1.shape}.")
print(f"[INFO] grad test neg samples shape:{grad_census_values_0.shape}.")
print(f"[INFO] grad left-data for test samples shape:{grad_census_test_values.shape}.")
print(f"[INFO] grad test all samples shape: {grad_census_values_for_supervise.shape}")
# print("grad_census_values_1 shape:", grad_census_values_1.shape)
# print("grad_census_values_0 shape:", grad_census_values_0.shape)
# grad_census_values_for_supervise = shuffle(np.concatenate((grad_census_values_1, grad_census_values_0), axis=0))
grad_census_df_for_ft = pd.DataFrame(data=grad_census_values_for_supervise, columns=columns)
print("[INFO] (final) grad_census_df_for_ft (supervised) shape:", grad_census_df_for_ft.shape)
print("[INFO] grad_census_df_for_ft (supervised) pos:",
grad_census_df_for_ft[grad_census_df_for_ft[target_col_name] == 1].shape)
print("[INFO] grad_census_df_for_ft (supervised) neg:",
grad_census_df_for_ft[grad_census_df_for_ft[target_col_name] == 0].shape)
# save data
if train:
grad_ft_file_full_path = from_dir + 'grad_census9495_ft_' + str(data_tag) + appendix
grad_census_df_for_ft.to_csv(grad_ft_file_full_path, header=True, index=False)
print(f"[INFO] ==> saved grad ft data to: {grad_ft_file_full_path}")
print("[INFO] (final) grad_census_df_for_ad shape:", grad_census_df_for_da.shape)
print("[INFO] grad_census_df_for_ad pos:",
grad_census_df_for_da[grad_census_df_for_da[target_col_name] == 1].shape)
print("[INFO] grad_census_df_for_ad neg:",
grad_census_df_for_da[grad_census_df_for_da[target_col_name] == 0].shape)
grad_da_file_full_path = from_dir + 'grad_census9495_ad_' + str(data_tag) + appendix
grad_census_df_for_da.to_csv(grad_da_file_full_path, header=True, index=False)
print(f"[INFO] ==> saved grad ad data to: {grad_da_file_full_path}")
else:
# test
half_num = int(grad_census_df_for_ft.shape[0] / 2)
grad_census_df_for_ft_valid = grad_census_df_for_ft[:half_num]
grad_census_df_for_ft_test = grad_census_df_for_ft[half_num:]
print(f"[INFO] (final) grad_census_df_for_ft_valid shape:{grad_census_df_for_ft_valid.shape}")
print(f"[INFO] => grad_census_df_for_ft_valid shape range:[0:{half_num}].")
print("[INFO] grad_census_df_for_ft_valid pos:",
grad_census_df_for_ft_valid[grad_census_df_for_ft_valid[target_col_name] == 1].shape)
print("[INFO] grad_census_df_for_ft_valid neg:",
grad_census_df_for_ft_valid[grad_census_df_for_ft_valid[target_col_name] == 0].shape)
grad_ft_file_full_path = from_dir + 'grad_census9495_ft_' + str(data_tag) + "_valid.csv"
grad_census_df_for_ft_valid.to_csv(grad_ft_file_full_path, header=True, index=False)
print(f"[INFO] ==> saved grad ft valid data to: {grad_ft_file_full_path}")
print(f"[INFO] (final) grad_census_df_for_ft_test shape:{grad_census_df_for_ft_test.shape}")
print(f"[INFO] => grad_census_df_for_ft_test range:[{half_num}:].")
print("[INFO] grad_census_df_for_ft_test pos:",
grad_census_df_for_ft_test[grad_census_df_for_ft_test[target_col_name] == 1].shape)
print("[INFO] grad_census_df_for_ft_valid neg:",
grad_census_df_for_ft_test[grad_census_df_for_ft_test[target_col_name] == 0].shape)
grad_ft_file_full_path = from_dir + 'grad_census9495_ft_' + str(data_tag) + "_test.csv"
grad_census_df_for_ft_test.to_csv(grad_ft_file_full_path, header=True, index=False)
print(f"[INFO] ==> saved grad ft test data to: {grad_ft_file_full_path}")
undergrad_pos_num = undergrad_census_df_1.shape[0]
undergrad_census_values_all = shuffle(
np.concatenate((undergrad_census_df_1.values, undergrad_census_df_0[:undergrad_pos_num * 9].values), axis=0))
undergrad_census_df_all = pd.DataFrame(data=undergrad_census_values_all, columns=columns)
print("[INFO] (final) undergrad_census_df_all shape:", undergrad_census_df_all.shape)
print("[INFO] undergrad_census_df_all pos:",
undergrad_census_df_all[undergrad_census_df_all[target_col_name] == 1].shape)
print("[INFO] undergrad_census_df_all neg:",
undergrad_census_df_all[undergrad_census_df_all[target_col_name] == 0].shape)
undergrad_file_full_path = from_dir + 'undergrad_census9495_ad_' + str(data_tag) + appendix
undergrad_census_df_all.to_csv(undergrad_file_full_path, header=True, index=False)
print(f"[INFO] ==> saved undergrad ad data to: {undergrad_file_full_path}")
return grad_train_scaler, udgrad_train_scaler, grad_census_for_test
def combine_src_tgt_data(from_dir, to_dir, data_tag):
print(f"========================= combine census source and target data ============================ ")
source_train_file_name = from_dir + f'undergrad_census9495_ad_{data_tag}_train.csv'
target_train_file_name = from_dir + f'grad_census9495_ft_{data_tag}_train.csv'
df_src_data = pd.read_csv(source_train_file_name, skipinitialspace=True)
df_tgt_data = pd.read_csv(target_train_file_name, skipinitialspace=True)
print("[INFO] df_src_data shape:", df_src_data.shape)
print("[INFO] df_tgt_data shape:", df_tgt_data.shape)
df_data = data_process_utils.combine_src_tgt_data(df_src_data, df_tgt_data)
print("[INFO] df_src_tgt_data shape:", df_data.shape)
file_full_name = "{}/degree_src_tgt_census9495_{}_train.csv".format(to_dir, data_tag)
data_process_utils.save_df_data(df_data, file_full_name)
if __name__ == "__main__":
data_dir = census_data_creation['original_data_dir']
output_data_dir = census_data_creation['processed_data_dir']
data_tag = census_data_creation['data_tag']
pos_ratio = census_data_creation['positive_sample_ratio']
num_all = census_data_creation['number_target_samples']
print("[INFO] ------ process data ------")
train_data_path = data_dir + census_data_creation['train_data_file_name']
test_data_path = data_dir + census_data_creation['test_data_file_name']
train_df = process(train_data_path, to_dir=output_data_dir, train=True)
test_df = process(test_data_path, to_dir=output_data_dir, train=False)
grad_train_scaler, udgrad_train_scaler, grad_census_for_test = create_degree_src_tgt_data(train_df,
from_dir=output_data_dir,
to_dir=output_data_dir,
train=True,
pos_ratio=pos_ratio,
num_all=num_all,
data_tag=data_tag)
create_degree_src_tgt_data(test_df,
from_dir=output_data_dir,
to_dir=output_data_dir,
train=False,
pos_ratio=pos_ratio,
grad_train_scaler=grad_train_scaler,
undergrad_train_scaler=udgrad_train_scaler,
grad_census_test_values=grad_census_for_test,
data_tag=data_tag,
num_all=num_all)
# NOTE: input dir and output dir are the same for src_tgt_data
combine_src_tgt_data(from_dir=output_data_dir, to_dir=output_data_dir, data_tag=data_tag)
| [
"data_process.data_process_utils.save_df_data",
"pandas.read_csv",
"datetime.datetime.utcnow",
"data_process.census_process.census_degree_process_utils.consistentize_census9495_columns",
"sklearn.utils.shuffle",
"data_process.data_process_utils.combine_src_tgt_data",
"numpy.concatenate",
"data_process... | [((2656, 2723), 'pandas.read_csv', 'pd.read_csv', (['data_path'], {'names': 'CENSUS_COLUMNS', 'skipinitialspace': '(True)'}), '(data_path, names=CENSUS_COLUMNS, skipinitialspace=True)\n', (2667, 2723), True, 'import pandas as pd\n'), ((3020, 3060), 'data_process.census_process.census_degree_process_utils.consistentize_census9495_columns', 'consistentize_census9495_columns', (['census'], {}), '(census)\n', (3052, 3060), False, 'from data_process.census_process.census_degree_process_utils import consistentize_census9495_columns, numericalize_census9495_data, standardize_census_data\n'), ((3210, 3267), 'data_process.census_process.census_degree_process_utils.numericalize_census9495_data', 'numericalize_census9495_data', (['c_census', 'cate_to_index_map'], {}), '(c_census, cate_to_index_map)\n', (3238, 3267), False, 'from data_process.census_process.census_degree_process_utils import consistentize_census9495_columns, numericalize_census9495_data, standardize_census_data\n'), ((5986, 6057), 'numpy.concatenate', 'np.concatenate', (['[doctorate_census_values, master_census_values]'], {'axis': '(0)'}), '([doctorate_census_values, master_census_values], axis=0)\n', (6000, 6057), True, 'import numpy as np\n'), ((6083, 6110), 'sklearn.utils.shuffle', 'shuffle', (['grad_census_values'], {}), '(grad_census_values)\n', (6090, 6110), False, 'from sklearn.utils import shuffle\n'), ((6139, 6193), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'grad_census_values', 'columns': 'columns'}), '(data=grad_census_values, columns=columns)\n', (6151, 6193), True, 'import pandas as pd\n'), ((6268, 6300), 'sklearn.utils.shuffle', 'shuffle', (['undergrad_census_values'], {}), '(undergrad_census_values)\n', (6275, 6300), False, 'from sklearn.utils import shuffle\n'), ((6327, 6386), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'undergrad_census_values', 'columns': 'columns'}), '(data=undergrad_census_values, columns=columns)\n', (6339, 6386), True, 'import pandas as pd\n'), ((6415, 6501), 'data_process.census_process.census_degree_process_utils.standardize_census_data', 'standardize_census_data', (['grad_census_df_for_da', 'continuous_cols', 'grad_train_scaler'], {}), '(grad_census_df_for_da, continuous_cols,\n grad_train_scaler)\n', (6438, 6501), False, 'from data_process.census_process.census_degree_process_utils import consistentize_census9495_columns, numericalize_census9495_data, standardize_census_data\n'), ((6527, 6616), 'data_process.census_process.census_degree_process_utils.standardize_census_data', 'standardize_census_data', (['undergrad_census_df', 'continuous_cols', 'undergrad_train_scaler'], {}), '(undergrad_census_df, continuous_cols,\n undergrad_train_scaler)\n', (6550, 6616), False, 'from data_process.census_process.census_degree_process_utils import consistentize_census9495_columns, numericalize_census9495_data, standardize_census_data\n'), ((10198, 10266), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'grad_census_values_for_supervise', 'columns': 'columns'}), '(data=grad_census_values_for_supervise, columns=columns)\n', (10210, 10266), True, 'import pandas as pd\n'), ((13650, 13713), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'undergrad_census_values_all', 'columns': 'columns'}), '(data=undergrad_census_values_all, columns=columns)\n', (13662, 13713), True, 'import pandas as pd\n'), ((14786, 14844), 'pandas.read_csv', 'pd.read_csv', (['source_train_file_name'], {'skipinitialspace': '(True)'}), '(source_train_file_name, skipinitialspace=True)\n', (14797, 14844), True, 'import pandas as pd\n'), ((14863, 14921), 'pandas.read_csv', 'pd.read_csv', (['target_train_file_name'], {'skipinitialspace': '(True)'}), '(target_train_file_name, skipinitialspace=True)\n', (14874, 14921), True, 'import pandas as pd\n'), ((15053, 15118), 'data_process.data_process_utils.combine_src_tgt_data', 'data_process_utils.combine_src_tgt_data', (['df_src_data', 'df_tgt_data'], {}), '(df_src_data, df_tgt_data)\n', (15092, 15118), False, 'from data_process import data_process_utils\n'), ((15272, 15328), 'data_process.data_process_utils.save_df_data', 'data_process_utils.save_df_data', (['df_data', 'file_full_name'], {}), '(df_data, file_full_name)\n', (15303, 15328), False, 'from data_process import data_process_utils\n'), ((5438, 5523), 'pandas.read_csv', 'pd.read_csv', (["(from_dir + 'doctorate_census9495' + appendix)"], {'skipinitialspace': '(True)'}), "(from_dir + 'doctorate_census9495' + appendix, skipinitialspace=True\n )\n", (5449, 5523), True, 'import pandas as pd\n'), ((5543, 5620), 'pandas.read_csv', 'pd.read_csv', (["(from_dir + 'master_census9495' + appendix)"], {'skipinitialspace': '(True)'}), "(from_dir + 'master_census9495' + appendix, skipinitialspace=True)\n", (5554, 5620), True, 'import pandas as pd\n'), ((5648, 5733), 'pandas.read_csv', 'pd.read_csv', (["(from_dir + 'undergrad_census9495' + appendix)"], {'skipinitialspace': '(True)'}), "(from_dir + 'undergrad_census9495' + appendix, skipinitialspace=True\n )\n", (5659, 5733), True, 'import pandas as pd\n'), ((8938, 9016), 'numpy.concatenate', 'np.concatenate', (['[grad_census_test_values_1, grad_census_test_values_0]'], {'axis': '(0)'}), '([grad_census_test_values_1, grad_census_test_values_0], axis=0)\n', (8952, 9016), True, 'import numpy as np\n'), ((13510, 13623), 'numpy.concatenate', 'np.concatenate', (['(undergrad_census_df_1.values, undergrad_census_df_0[:undergrad_pos_num * 9\n ].values)'], {'axis': '(0)'}), '((undergrad_census_df_1.values, undergrad_census_df_0[:\n undergrad_pos_num * 9].values), axis=0)\n', (13524, 13623), True, 'import numpy as np\n'), ((7839, 7907), 'numpy.concatenate', 'np.concatenate', (['(grad_census_values_1, grad_census_values_0)'], {'axis': '(0)'}), '((grad_census_values_1, grad_census_values_0), axis=0)\n', (7853, 7907), True, 'import numpy as np\n'), ((9455, 9552), 'numpy.concatenate', 'np.concatenate', (['(grad_census_values_1, grad_census_values_0, grad_census_test_values)'], {'axis': '(0)'}), '((grad_census_values_1, grad_census_values_0,\n grad_census_test_values), axis=0)\n', (9469, 9552), True, 'import numpy as np\n'), ((731, 748), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (746, 748), False, 'from datetime import datetime\n')] |
import math
from typing import Optional, Union, Tuple, List
import numpy as np
import torch
import torch.nn.functional as F
from torch import Tensor
from torch_geometric.utils.num_nodes import maybe_num_nodes
from torch_scatter import scatter, segment_csr, gather_csr
from torch_scatter.utils import broadcast
import tsl
__all__ = [
'expand_then_cat',
'gated_tanh',
'reverse_tensor',
'sparse_softmax',
'sparse_multi_head_attention'
]
def expand_then_cat(tensors: Union[Tuple[Tensor, ...], List[Tensor]],
dim=-1) -> Tensor:
r"""
Match the dimensions of tensors in the input list and then concatenate.
Args:
tensors: Tensors to concatenate.
dim (int): Dimension along which to concatenate.
"""
shapes = [t.shape for t in tensors]
expand_dims = list(np.max(shapes, 0))
expand_dims[dim] = -1
tensors = [t.expand(*expand_dims) for t in tensors]
return torch.cat(tensors, dim=dim)
@torch.jit.script
def gated_tanh(input: Tensor, dim: int = -1) -> Tensor:
r"""The gated tanh unite. Computes:
.. math ::
\text{GatedTanH}(a, b) = \text{TanH}(a) \otimes \sigma(b)
where `input` is split in half along `dim` to form `a` and `b`, :math:`\text{TanH}` is the hyperbolic tangent
function, :math:`\sigma` is the sigmoid function and :math:`\otimes` is the element-wise product between matrices.
Args:
input (Tensor): Input tensor.
dim (int, optional): Dimension on which to split the input.
(default: -1)
"""
out, gate = torch.tensor_split(input, 2, dim=dim)
return torch.tanh(out) * torch.sigmoid(gate)
@torch.jit.script
def reverse_tensor(tensor: Tensor, dim: int) -> Tensor:
"""Reverse tensor along specific dimension.
Args:
tensor (Tensor): Input tensor.
dim (int): Dimension along which to reverse sequence.
"""
indices = torch.arange(tensor.size(dim) - 1, -1, -1, device=tensor.device)
return tensor.index_select(dim, indices)
@torch.jit.script
def sparse_softmax(src: Tensor, index: Optional[Tensor] = None,
ptr: Optional[Tensor] = None,
num_nodes: Optional[int] = None,
dim: int = -2) -> Tensor:
r"""Extension of ~torch_geometric.softmax with index broadcasting to compute
a sparsely evaluated softmax over multiple broadcast dimensions.
Given a value tensor :attr:`src`, this function first groups the values
along the first dimension based on the indices specified in :attr:`index`,
and then proceeds to compute the softmax individually for each group.
Args:
src (Tensor): The source tensor.
index (Tensor, optional): The indices of elements for applying the softmax.
ptr (LongTensor, optional): If given, computes the softmax based on
sorted inputs in CSR representation. (default: :obj:`None`)
num_nodes (int, optional): The number of nodes, *i.e.*
:obj:`max_val + 1` of :attr:`index`. (default: :obj:`None`)
dim (int, optional): The dimension in which to normalize, i.e., the edge
dimension. (default: :obj:`-2`)
"""
if ptr is not None:
dim = dim + src.dim() if dim < 0 else dim
size = ([1] * dim) + [-1]
ptr = ptr.view(size)
src_max = gather_csr(segment_csr(src, ptr, reduce='max'), ptr)
out = (src - src_max).exp()
out_sum = gather_csr(segment_csr(out, ptr, reduce='sum'), ptr)
elif index is not None:
N = maybe_num_nodes(index, num_nodes)
expanded_index = broadcast(index, src, dim)
src_max = scatter(src, expanded_index, dim, dim_size=N, reduce='max')
src_max = src_max.index_select(dim, index)
out = (src - src_max).exp()
out_sum = scatter(out, expanded_index, dim, dim_size=N, reduce='sum')
out_sum = out_sum.index_select(dim, index)
else:
raise NotImplementedError
return out / (out_sum + tsl.epsilon)
@torch.jit.script
def sparse_multi_head_attention(q: Tensor, k: Tensor, v: Tensor, index: Tensor,
dim_size: Optional[int] = None,
dropout_p: float = 0.0):
r"""Computes multi-head, scaled, dot product attention on query, key and
value tensors, applying dropout if a probability greater than 0.0 is
specified. Index specifies for each query in q the belonging sequence in the
original batched, dense tensor.
Returns a tensor pair containing attended values and attention weights.
Args:
q (Tensor): Query tensor. See Shape section for shape details.
k (Tensor): Key tensor. See Shape section for shape details.
v (Tensor): Value tensor. See Shape section for shape details.
index (Tensor): Tensor containing mask values to be added to calculated
attention. May be 2D or 3D; see Shape section for details.
dim_size (int, optional): The batched target length sequence, i.e.
:obj:`max_val + 1` of :attr:`index`. (default: :obj:`None`)
dropout_p: dropout probability. If greater than 0.0, dropout is applied.
Shape:
- q: :math:`(S, H, E)` where S is sparsed dimension, H is the number of
heads, and E is embedding dimension.
- k: :math:`(S, H, E)` where S is sparsed dimension, H is the number of
heads, and E is embedding dimension.
- v: :math:`(S, H, O)` where S is sparsed dimension, H is the number of
heads, and O is output dimension.
- index: :math:`(S)` where S is sparsed dimension.
- dim_size: must be :math:`(B \times Nt)`
- Output: attention values have shape :math:`(B, Nt, E)`; attention
weights have shape :math:`(S, H)`
"""
dim = 0
B, H, E = q.shape
N = maybe_num_nodes(index, dim_size)
# scores
alpha = (q * k).sum(dim=-1) / math.sqrt(E)
alpha = sparse_softmax(alpha, index, num_nodes=N, dim=dim)
if dropout_p > 0.0:
alpha = F.dropout(alpha, p=dropout_p)
v *= alpha.view(-1, H, 1)
# out
out = torch.zeros((N, H, v.size(2)), dtype=v.dtype, device=v.device)
add_index = broadcast(index, v, dim)
out.scatter_add_(dim, add_index, v)
return out, alpha
| [
"torch.tanh",
"torch_geometric.utils.num_nodes.maybe_num_nodes",
"torch_scatter.utils.broadcast",
"torch.sigmoid",
"math.sqrt",
"torch.nn.functional.dropout",
"numpy.max",
"torch_scatter.scatter",
"torch_scatter.segment_csr",
"torch.cat",
"torch.tensor_split"
] | [((946, 973), 'torch.cat', 'torch.cat', (['tensors'], {'dim': 'dim'}), '(tensors, dim=dim)\n', (955, 973), False, 'import torch\n'), ((1591, 1628), 'torch.tensor_split', 'torch.tensor_split', (['input', '(2)'], {'dim': 'dim'}), '(input, 2, dim=dim)\n', (1609, 1628), False, 'import torch\n'), ((5872, 5904), 'torch_geometric.utils.num_nodes.maybe_num_nodes', 'maybe_num_nodes', (['index', 'dim_size'], {}), '(index, dim_size)\n', (5887, 5904), False, 'from torch_geometric.utils.num_nodes import maybe_num_nodes\n'), ((6227, 6251), 'torch_scatter.utils.broadcast', 'broadcast', (['index', 'v', 'dim'], {}), '(index, v, dim)\n', (6236, 6251), False, 'from torch_scatter.utils import broadcast\n'), ((834, 851), 'numpy.max', 'np.max', (['shapes', '(0)'], {}), '(shapes, 0)\n', (840, 851), True, 'import numpy as np\n'), ((1640, 1655), 'torch.tanh', 'torch.tanh', (['out'], {}), '(out)\n', (1650, 1655), False, 'import torch\n'), ((1658, 1677), 'torch.sigmoid', 'torch.sigmoid', (['gate'], {}), '(gate)\n', (1671, 1677), False, 'import torch\n'), ((5952, 5964), 'math.sqrt', 'math.sqrt', (['E'], {}), '(E)\n', (5961, 5964), False, 'import math\n'), ((6068, 6097), 'torch.nn.functional.dropout', 'F.dropout', (['alpha'], {'p': 'dropout_p'}), '(alpha, p=dropout_p)\n', (6077, 6097), True, 'import torch.nn.functional as F\n'), ((3374, 3409), 'torch_scatter.segment_csr', 'segment_csr', (['src', 'ptr'], {'reduce': '"""max"""'}), "(src, ptr, reduce='max')\n", (3385, 3409), False, 'from torch_scatter import scatter, segment_csr, gather_csr\n'), ((3481, 3516), 'torch_scatter.segment_csr', 'segment_csr', (['out', 'ptr'], {'reduce': '"""sum"""'}), "(out, ptr, reduce='sum')\n", (3492, 3516), False, 'from torch_scatter import scatter, segment_csr, gather_csr\n'), ((3563, 3596), 'torch_geometric.utils.num_nodes.maybe_num_nodes', 'maybe_num_nodes', (['index', 'num_nodes'], {}), '(index, num_nodes)\n', (3578, 3596), False, 'from torch_geometric.utils.num_nodes import maybe_num_nodes\n'), ((3622, 3648), 'torch_scatter.utils.broadcast', 'broadcast', (['index', 'src', 'dim'], {}), '(index, src, dim)\n', (3631, 3648), False, 'from torch_scatter.utils import broadcast\n'), ((3667, 3726), 'torch_scatter.scatter', 'scatter', (['src', 'expanded_index', 'dim'], {'dim_size': 'N', 'reduce': '"""max"""'}), "(src, expanded_index, dim, dim_size=N, reduce='max')\n", (3674, 3726), False, 'from torch_scatter import scatter, segment_csr, gather_csr\n'), ((3832, 3891), 'torch_scatter.scatter', 'scatter', (['out', 'expanded_index', 'dim'], {'dim_size': 'N', 'reduce': '"""sum"""'}), "(out, expanded_index, dim, dim_size=N, reduce='sum')\n", (3839, 3891), False, 'from torch_scatter import scatter, segment_csr, gather_csr\n')] |
"""Utility Functions about Instruments
"""
import numpy as np
from exojax.utils.constants import c
def R2STD(R):
""" compute Standard deveiation of Gaussian velocity distribution from spectral resolution
Args:
R: spectral resolution R
Returns:
beta (km/s) standard deviation of Gaussian velocity distribution
"""
return c/(2.0*np.sqrt(2.0*np.log(2.0))*R)
def resolution_eslog(nu):
"""spectral resolution for ESLOG
Args:
nu: wavenumber bin
Returns:
resolution
"""
return (len(nu)-1)/np.log(nu[-1]/nu[0])
def resolution_eslin(nu):
"""min max spectral resolution for ESLIN
Args:
nu: wavenumber bin
Returns:
min, approximate, max of the resolution
"""
return nu[0]/(nu[1]-nu[0]),((nu[-1]+nu[0])/2.0)/((nu[-1]-nu[0])/len(nu)),nu[-1]/(nu[-1]-nu[-2])
if __name__=="__main__":
nus=np.linspace(1000,2000,1000)
print(resolution_eslin(nus))
| [
"numpy.linspace",
"numpy.log"
] | [((898, 927), 'numpy.linspace', 'np.linspace', (['(1000)', '(2000)', '(1000)'], {}), '(1000, 2000, 1000)\n', (909, 927), True, 'import numpy as np\n'), ((562, 584), 'numpy.log', 'np.log', (['(nu[-1] / nu[0])'], {}), '(nu[-1] / nu[0])\n', (568, 584), True, 'import numpy as np\n'), ((381, 392), 'numpy.log', 'np.log', (['(2.0)'], {}), '(2.0)\n', (387, 392), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for sampling from different distributions.
Sampling functions for YOTO. Also includes functions to transform the samples,
for instance via softmax.
"""
import ast
import enum
import gin
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow_probability import distributions as tfd
@gin.constants_from_enum
class DistributionType(enum.Enum):
UNIFORM = 0
LOG_UNIFORM = 1
@gin.constants_from_enum
class TransformType(enum.Enum):
IDENTITY = 0
LOG = 1
@gin.configurable("DistributionSpec")
class DistributionSpec(object):
"""Spec of a distribution for YOTO training or evaluation."""
# NOTE(adosovitskiy) Tried to do it with namedtuple, but failed to make
# it work with gin
def __init__(self, distribution_type, params, transform):
self.distribution_type = distribution_type
self.params = params
self.transform = transform
# TODO(adosovitskiy): have one signature with distributionspec and one without
def get_samples_as_dicts(distribution_spec, num_samples=1,
names=None, seed=None):
"""Sample weight dictionaries for multi-loss problems.
Supports many different distribution specifications, including random
distributions given via DistributionSpec or fixed sets of weights given by
dictionaries or lists of dictionaries. The function first parses the different
options and then actually computes the weights to be returned.
Args:
distribution_spec: One of the following:
* An instance of DistributionSpec
* DistributionSpec class
* A dictionary mapping loss names to their values
* A list of such dictionaries
* A string representing one of the above
num_samples: how many samples to return (only if given DistributionSpec)
names: names of losses (only if given DistributionSpec)
seed: random seed to use for sampling (only if given DistributionSpec)
Returns:
samples_dicts: list of dictionaries with the samples weights
"""
# If given a string, first eval it
if isinstance(distribution_spec, str):
distribution_spec = ast.literal_eval(distribution_spec)
# Now convert to a list of dictionaries or an instance of DistributionSpec
if isinstance(distribution_spec, dict):
given_keys = distribution_spec.keys()
if not (names is None or set(names) == set(given_keys)):
raise ValueError(
"Provided names {} do not match with the keys of the provided "
"dictionary {}".format(names, given_keys))
distribution_spec = [distribution_spec]
elif isinstance(distribution_spec, list):
if not (distribution_spec and
isinstance(distribution_spec[0], dict)):
raise ValueError(
"If distribution_spec is a list, it should be non-empty and "
"consist of dictionaries.")
given_keys = distribution_spec[0].keys()
if not (names is None or set(names) == set(given_keys)):
raise ValueError(
"Provided names {} do not match with the keys of the provided "
"dictionary {}".format(names, given_keys))
elif isinstance(distribution_spec, type):
distribution_spec = distribution_spec()
else:
raise TypeError(
"The distribution_spec should be a dictionary ot a list of dictionaries"
" or an instance of DistributionSpec or class DistributionSpec")
assert (isinstance(distribution_spec, DistributionSpec) or
isinstance(distribution_spec, list)), \
"By now distribution_spec should be a DistributionSpec or a list"
# Finally, actually make the samples
if isinstance(distribution_spec, DistributionSpec):
# Sample and convert to a list of dictionaries
samples = get_sample((num_samples, len(names)), distribution_spec,
seed=seed, return_numpy=True)
samples_dicts = []
for k in range(num_samples):
samples_dicts.append(
{name: samples[k, n] for n, name in enumerate(names)})
elif isinstance(distribution_spec, list):
samples_dicts = distribution_spec
return samples_dicts
def get_sample_untransformed(shape, distribution_type, distribution_params,
seed):
"""Get a distribution based on specification and parameters.
Parameters can be a list, in which case each of the list members is used to
generate one row (or column?) of the resulting sample matrix. Otherwise, the
same parameters are used for the whole matrix.
Args:
shape: Tuple/List representing the shape of the output
distribution_type: DistributionType object
distribution_params: Dict of distributon parameters
seed: random seed to be used
Returns:
sample: TF Tensor with a sample from the distribution
"""
if isinstance(distribution_params, list):
if len(shape) != 2 or len(distribution_params) != shape[1]:
raise ValueError("If distribution_params is a list, the desired 'shape' "
"should be 2-dimensional and number of elements in the "
"list should match 'shape[1]'")
all_samples = []
for curr_params in distribution_params:
curr_samples = get_one_sample_untransformed([shape[0], 1],
distribution_type,
curr_params, seed)
all_samples.append(curr_samples)
return tf.concat(all_samples, axis=1)
else:
return get_one_sample_untransformed(shape, distribution_type,
distribution_params, seed)
def get_one_sample_untransformed(shape, distribution_type, distribution_params,
seed):
"""Get one untransoformed sample."""
if distribution_type == DistributionType.UNIFORM:
low, high = distribution_params["low"], distribution_params["high"]
distribution = tfd.Uniform(low=tf.constant(low, shape=shape[1:]),
high=tf.constant(high, shape=shape[1:],))
sample = distribution.sample(shape[0], seed=seed)
elif distribution_type == DistributionType.LOG_UNIFORM:
low, high = distribution_params["low"], distribution_params["high"]
distribution = tfd.Uniform(
low=tf.constant(np.log(low), shape=shape[1:], dtype=tf.float32),
high=tf.constant(np.log(high), shape=shape[1:], dtype=tf.float32))
sample = tf.exp(distribution.sample(shape[0], seed=seed))
else:
raise ValueError("Unknown distribution type {}".format(distribution_type))
return sample
def get_sample(shape, distribution_spec, seed=None, return_numpy=False):
"""Sample a tensor of random numbers.
Args:
shape: shape of the resulting tensor
distribution_spec: DistributionSpec
seed: random seed to use for sampling
return_numpy: if True, returns a fixed numpy array, otherwise - a TF op
that allows sampling repeatedly
Returns:
samples: numpy array or TF op representing the random numbers
"""
distribution_type = distribution_spec.distribution_type # pytype: disable=attribute-error
distribution_params = distribution_spec.params # pytype: disable=attribute-error
transform_type = distribution_spec.transform # pytype: disable=attribute-error
sample_tf = get_sample_untransformed(shape, distribution_type,
distribution_params, seed)
if transform_type is not None:
transform = get_transform(transform_type)
sample_tf = transform(sample_tf)
if return_numpy:
with tf.Session() as sess:
sample_np = sess.run([sample_tf])[0]
return sample_np
else:
return sample_tf
def get_transform(transform_type):
"""Get transforms for converting raw samples to weights and back."""
if transform_type == TransformType.IDENTITY:
transform = lambda x: x
elif transform_type == TransformType.LOG:
transform = tf.log
else:
raise ValueError("Unknown transform type {}".format(transform_type))
return transform
| [
"numpy.log",
"ast.literal_eval",
"gin.configurable",
"tensorflow.compat.v1.constant",
"tensorflow.compat.v1.concat",
"tensorflow.compat.v1.Session"
] | [((1099, 1135), 'gin.configurable', 'gin.configurable', (['"""DistributionSpec"""'], {}), "('DistributionSpec')\n", (1115, 1135), False, 'import gin\n'), ((2695, 2730), 'ast.literal_eval', 'ast.literal_eval', (['distribution_spec'], {}), '(distribution_spec)\n', (2711, 2730), False, 'import ast\n'), ((5961, 5991), 'tensorflow.compat.v1.concat', 'tf.concat', (['all_samples'], {'axis': '(1)'}), '(all_samples, axis=1)\n', (5970, 5991), True, 'import tensorflow.compat.v1 as tf\n'), ((8072, 8084), 'tensorflow.compat.v1.Session', 'tf.Session', ([], {}), '()\n', (8082, 8084), True, 'import tensorflow.compat.v1 as tf\n'), ((6453, 6486), 'tensorflow.compat.v1.constant', 'tf.constant', (['low'], {'shape': 'shape[1:]'}), '(low, shape=shape[1:])\n', (6464, 6486), True, 'import tensorflow.compat.v1 as tf\n'), ((6524, 6558), 'tensorflow.compat.v1.constant', 'tf.constant', (['high'], {'shape': 'shape[1:]'}), '(high, shape=shape[1:])\n', (6535, 6558), True, 'import tensorflow.compat.v1 as tf\n'), ((6801, 6812), 'numpy.log', 'np.log', (['low'], {}), '(low)\n', (6807, 6812), True, 'import numpy as np\n'), ((6875, 6887), 'numpy.log', 'np.log', (['high'], {}), '(high)\n', (6881, 6887), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Analyzes residual symmetries of solutions.
As all critical points with a rank-2 simple Lie group symmetry have been
known for many years, we can restrict ourselves to a residual Lie symmetry of
Spin(3)^A x U(1)^B. This considerably simplifies the analysis.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cmath
import collections
import glob
import itertools
import math
import numpy
import os
import pprint
# CAUTION: scipy.linalg.eigh() will produce an orthonormal basis, while
# scipy.linalg.eig(), when used on a hermitean matrix, typically will not
# orthonormalize eigenvectors in degenerate eigenspaces.
# This behavior is not documented properly, but "obvious" when considering
# the underlying algorithm.
import scipy.linalg
from dim4.so8_supergravity_extrema.code import algebra
CanonicalizedSymmetry = collections.namedtuple(
'CanonicalizedSymmetry',
['u1s', # Sequence of U(1) generators, each as a 28-vector acting on [ik].
'semisimple_part', # [28, d]-array, semisimple part of the algebra.
'spin3_cartan_gens' # Cartan generators, one per spin(3) subalgebra.
])
# A `Spin8Action` tuple consists of an einsum reduction-string,
# typically of the form 'aij,aN->jiN', as well as the 1st tensor-argument
# to the corresponding contraction.
Spin8Action = collections.namedtuple(
'Spin8Action', ['einsum', 'tensor'])
class BranchingFormatter(object):
"""Base class for branching-formatters."""
def format(self, num_spin3s, branching):
return self.sum_join(self.format_irreps(num_spin3s, b) for b in branching)
def format_branching_tag(self, tag):
"""Formats tag (8, 'v') -> '8v' etc."""
tag_dim, tag_subscript = tag
return '%s%s' % (tag_dim, tag_subscript)
def sum_join(self, formatted):
return ' + '.join(formatted)
def format_multiplicity(self, multiplicity, formatted_obj):
"""Adds a multiplicity prefix to a formatted object."""
if multiplicity == 1:
return formatted_obj
return '%dx%s' % (multiplicity, formatted_obj)
def format_irreps(self, num_spin3s, irreps_part):
"""Formats a group of identical irreducible representations."""
charges, mult = irreps_part
return self.format_multiplicity(mult,
self.format_irrep(num_spin3s, charges))
def format_irrep(self, num_spin3s, charges):
"""Formats a single irreducible representation."""
if set(charges[:num_spin3s]) == {0}:
spin3_part = ''
else:
spin3_part = 'x'.join('%s' % int(round(2 * c + 1))
for c in charges[:num_spin3s])
assert all(c == int(c) for c in charges[num_spin3s:])
u1_part = ', '.join(str(int(c)) for c in charges[num_spin3s:])
if spin3_part:
return ('[%s]{%s}' % (spin3_part, u1_part) if u1_part
else '[%s]' % spin3_part)
else:
return '{%s}' % u1_part
class LaTeXBranchingFormatter(BranchingFormatter):
"""BranchingFormatter that generates LaTeX code."""
def format_branching_tag(self, tag):
"""Formats tag (8, 'v') -> '8_{v}' etc."""
tag_dim, tag_subscript = tag
return '%s_{%s}' % (tag_dim, tag_subscript)
def format_multiplicity(self, multiplicity, formatted_obj):
if multiplicity == 1:
return formatted_obj
return r'%d\times%s' % (multiplicity, formatted_obj)
def _format_charge(self, c, sub_super):
assert c == int(c)
if c == 0:
return ''
return r'%s{\scriptscriptstyle %s}' % (sub_super, '-+'[c > 0] * abs(int(c)))
def format_irrep(self, num_spin3s, charges):
# We use style such as 33^{+++}_{--},
# i.e. 1st U(1) gets superscript charges,
# 2nd U(1) gets subscript charges.
assert all(c == int(c) for c in charges[num_spin3s:])
if set(charges[:num_spin3s]) <= {0}:
spin3_part = r'\mathbf{1}' # No Spin3s, or only singlet.
elif num_spin3s == 1:
spin3_part = r'\mathbf{%s}' % int(round(2 * charges[0] + 1))
else:
spin3_part = '(%s)' % (
','.join(r'\mathbf{%d}' % int(round(2 * c + 1))
for c in charges[:num_spin3s]))
num_u1s = len(charges) - num_spin3s
u1a_part = u1b_part = ''
if num_u1s >= 1:
u1a_part = self._format_charge(charges[num_spin3s], '^')
if num_u1s == 2:
u1b_part = self._format_charge(charges[num_spin3s + 1], '_')
return spin3_part + u1a_part + u1b_part
TEXT_FORMATTER = BranchingFormatter()
LATEX_FORMATTER = LaTeXBranchingFormatter()
# The Spin(8) structure constants.
_spin8_fabc = 2 * numpy.einsum('cik,abik->abc',
algebra.su8.m_28_8_8,
# We do not need to antisymmetrize [ik] here,
# as the above factor already does this.
numpy.einsum('aij,bjk->abik',
algebra.su8.m_28_8_8,
algebra.su8.m_28_8_8))
_spin8_action56 = numpy.einsum('aik,ABik->aAB',
algebra.su8.m_28_8_8,
algebra.su8.m_action_56_56_8_8)
# Branching-rules task specification, as used for the `decomposition_tasks`
# argument to spin3u1_decompose().
# One may generally want to pass an extended arg that adds tasks which also
# decompose e.g. degenerate mass-eigenstates w.r.t. symmetry.
# These are also used to find scaling for u(1) generators that makes all
# 8v, 8s, 8c charges integral.
SPIN8_ACTION_8V = Spin8Action(einsum='aij,aN->jiN',
tensor=algebra.su8.m_28_8_8)
SPIN8_ACTION_8V = Spin8Action(einsum='aij,aN->jiN',
tensor=algebra.su8.m_28_8_8)
SPIN8_ACTION_8S = Spin8Action(
einsum='aAB,aN->BAN',
tensor=numpy.einsum('aij,ijAB->aAB',
0.25 * algebra.su8.m_28_8_8,
algebra.spin8.gamma_vvss))
SPIN8_ACTION_8C = Spin8Action(
einsum='aAB,aN->BAN',
tensor=numpy.einsum('aij,ijAB->aAB',
0.25 * algebra.su8.m_28_8_8,
algebra.spin8.gamma_vvcc))
SPIN8_ACTION_AD = Spin8Action(einsum='aAB,aN->BAN', tensor=_spin8_fabc * 0.5)
SPIN8_ACTION_FERMIONS = Spin8Action(einsum='aAB,aN->BAN',
tensor=_spin8_action56)
SPIN8_ACTION_SCALARS = Spin8Action(
einsum='aAB,aN->BAN',
tensor=0.5 * algebra.e7.spin8_action_on_v70o)
SPIN8_BRANCHINGS_VSC = (
(SPIN8_ACTION_8V,
[((8, 'v'), numpy.eye(8))]),
(SPIN8_ACTION_8S,
[((8, 's'), numpy.eye(8))]),
(SPIN8_ACTION_8C,
[((8, 'c'), numpy.eye(8))]))
# Extended branching-rules task speficication, adds 28->... branching.
SPIN8_BRANCHINGS = (
SPIN8_BRANCHINGS_VSC +
((SPIN8_ACTION_AD, [((28, ''), numpy.eye(28))]),))
def round2(x):
"""Rounds number to 2 digits, canonicalizing -0.0 to 0.0."""
return numpy.round(x, 2) or 0.0
def allclose2(p, q):
"""Determines if `p` and `q` match to two digits."""
return numpy.allclose(p, q, rtol=0.01, atol=0.01)
def aggregate_eigenvectors(eigvals, eigvecs, tolerance=1e-6):
"""Collects eigenvectors by eigenvalue into eigenspaces.
The `eigvals` and `eigvecs` arguments must be as produced by
scipy.linalg.eigh().
Args:
eigvals, array of eigenvalues. Must be approximately-real.
eigvecs, array of eigenvectors.
tolerance, float. Tolerance threshold for considering eigenvalues
as degenerate.
Returns:
List of the form [(eigenvalue, eigenspace), ...],
where each `eigenspace` is a list of eigenvectors for the corresponding
eigenvalue.
Raises:
ValueError, if reality requirements are violated.
"""
if not numpy.allclose(eigvals, eigvals.real):
raise ValueError('Non-real eigenvalues.')
eigvalue_and_aggregated_eigvecs = []
for eigvalue, eigvec in sorted(zip(eigvals.real,
[tuple(v.astype(numpy.complex128))
for v in eigvecs.T]),
# Do not compare eigenvectors for degenerate
# eigenvalues. Sort by descending order.
key=lambda ev_evec: -ev_evec[0]):
for eigvalue_known, eigvecs_known in eigvalue_and_aggregated_eigvecs:
if abs(eigvalue - eigvalue_known) <= tolerance:
eigvecs_known.append(eigvec)
break
else: # Reached end of loop.
eigvalue_and_aggregated_eigvecs.append((eigvalue, [eigvec]))
return eigvalue_and_aggregated_eigvecs
def get_residual_gauge_symmetry(v70, threshold=0.05):
"""Maps scalar 70-vector to [a, n]-tensor of unbroken symmetry generators.
Index `a` is a Spin(8)-adjoint index, `n` counts (orthonormal) basis vectors.
Args:
v70: The e7/su8 70-vector describing a point on the scalar manifold.
threshold: Threshold on the generalized SVD-eigenvalue for considering
a direction as belonging to the residual symmetry.
"""
su, ss, svh = scipy.linalg.svd(
numpy.einsum('avw,v->aw',
algebra.e7.spin8_action_on_v70,
v70))
del svh # Unused.
# Select those columns for which the diagonal entry is essentially zero.
return su.T[ss <= threshold].T
def get_simultaneous_eigenbasis(commuting_gens,
gen_action_einsum='abc,aN->cbN',
gen_action_tensor=_spin8_fabc,
initial_space=None,
checks=True,
tolerance=1e-6):
"""Finds a simultaneous eigenbasis for a collection of commuting generators.
Args:
commuting_gens: [28, N]-array of real and mutually orthogonal generators.
gen_action_einsum: numpy.einsum() contraction specification that maps
`gen_action_tensor` and `commuting_gens` to a set of N matrices given as
[D, D, N]-array that represent the generators on the desired space.
initial_space: [D, K]-dimensional initial space to decompose into
eigenspaces, or `None`. If `None`, uses numpy.eye(D).
checks: If True, perform internal consistency checks.
tolerance: Tolerance difference-threshold for considering
two eigenvalues as identical.
Returns:
Pair of (simultaneous_eigenbasis, charges), where `simultaneous_eigenbasis`
is a [28, K]-dimensional array of eigenvectors, and `charges` is a list
of corresponding charge-tuples.
"""
# Map generators to endomorphisms. Our conventions are such that
# the result of contracting with `gen_action_tensor` also gets multiplied
# with 1j. For spin(8) action on 8v, 8s, 8c, 28, etc., this ensures that
# with all-real generators and all-real action-tensor, we get hermitean
# endomorphisms with all-real spectrum.
gens_action = numpy.einsum(gen_action_einsum,
gen_action_tensor,
commuting_gens) * 1j
if initial_space is None:
initial_space = numpy.eye(gens_action.shape[0])
#
def recursively_split_eigenspaces(num_generator, charge_tagged_eigenspaces):
"""Recursively splits an eigenspace.
Args:
num_generator: The number of the commuting generator to use for the next
splitting-step.
charge_tagged_eigenspaces: List [(partial_charges, subspace), ...]
where `partial_charges` is a tuple of charges w.r.t. the first
`num_generator` generators (so, () for num_generator == 0),
and `subspace` is a [D, K]-array of subspace directions.
Returns:
(Ultimately), fully split charge_tagged_eigenspaces, where the
`partial_charges` tags list as many charges as there are generators.
"""
if num_generator == gens_action.shape[-1]:
return charge_tagged_eigenspaces
gen_action = gens_action[:, :, num_generator]
split_eigenspaces = []
for charges, espace in charge_tagged_eigenspaces:
if checks:
eigenspace_sprod = numpy.einsum('aj,ak->jk', espace.conj(), espace)
assert allclose2(
eigenspace_sprod,
numpy.eye(espace.shape[1])), (
'Weird Eigenspace normalization: ' + repr(
numpy.round(eigenspace_sprod, 3)))
gen_on_eigenspace = numpy.einsum(
'aj,ak->jk',
espace.conj(),
numpy.einsum('ab,bj->aj', gen_action, espace))
sub_eigvals, sub_eigvecs_T = scipy.linalg.eigh(gen_on_eigenspace)
list_approx_eigval_and_eigvecs = []
for sub_eigval, sub_eigvec in zip(sub_eigvals, sub_eigvecs_T.T):
# Lift back to original space.
eigvec = numpy.einsum('gs,s->g', espace, sub_eigvec) # |v> <v| G |v>
if checks:
gv = numpy.dot(gen_action, eigvec)
ev = sub_eigval * eigvec
assert allclose2(gv, ev), (
'Sub-Eigval is bad: g*v=%r, e*v=%r' % (
numpy.round(gv, 3), numpy.round(ev, 3)))
assert allclose2(
numpy.dot(eigvec.conj(), eigvec), 1.0), (
'Eigenvector is not normalized.')
for seen_eigval, seen_eigvecs in list_approx_eigval_and_eigvecs:
if abs(sub_eigval - seen_eigval) <= tolerance:
assert all(allclose2(0, numpy.dot(s.conj(), eigvec))
for s in seen_eigvecs), 'Non-Orthogonality'
seen_eigvecs.append(eigvec)
break
else: # Reached end of list.
list_approx_eigval_and_eigvecs.append(
(sub_eigval, # This is also the actual eigenvalue.
[eigvec]))
for eigval, eigvecs in list_approx_eigval_and_eigvecs:
eigenspace = numpy.stack(eigvecs, axis=-1)
assert allclose2(
numpy.einsum('aj,ak->jk', eigenspace.conj(), eigenspace),
numpy.eye(eigenspace.shape[-1])), 'Bad Eigenspace'
split_eigenspaces.append((charges + (eigval,), eigenspace))
return recursively_split_eigenspaces(num_generator + 1, split_eigenspaces)
#
charge_tagged_eigenspaces = recursively_split_eigenspaces(
0, [((), initial_space)])
simultaneous_eigenbasis = numpy.stack(
[evec for _, espace in charge_tagged_eigenspaces for evec in espace.T],
axis=-1)
charges = [evec_charges
for evec_charges, espace in charge_tagged_eigenspaces
for evec in espace.T]
return simultaneous_eigenbasis, charges
def scale_u1_generator_to_8vsc_integral_charges(u1_gen, round_to_digits=3):
"""Scales a generator such that all 8v, 8s, 8c charges are integers."""
charges = []
for spin8action, _ in SPIN8_BRANCHINGS_VSC:
eigvals, _ = scipy.linalg.eigh(
numpy.einsum(spin8action.einsum,
spin8action.tensor,
1j * u1_gen.reshape((28, 1)))[:, :, 0])
assert numpy.allclose(eigvals, eigvals.real)
for eigval in eigvals:
charges.append(eigval)
approx_charges = sorted(set(abs(numpy.round(c, 6)) for c in charges) - {0.0})
factor = 1.0 / approx_charges[0]
for n in range(1, 25):
scaled_charges = [numpy.round(factor * n * c, round_to_digits)
for c in approx_charges]
if all(x == int(x) for x in scaled_charges):
return factor * n * u1_gen
raise ValueError('Could not re-scale U(1)-generator.')
def canonicalize_u1s(u1s, tolerance=1e-3):
"""Canonicalizes a collection of up to two u(1) generators."""
if u1s.shape[1] == 0:
return numpy.zeros([28, 0])
if u1s.shape[0] != 28:
raise ValueError(
'Each U(1) generator should be given as a 28-vector.')
num_u1s = u1s.shape[1]
if num_u1s > 2:
raise ValueError('Cannot handle more than two U(1)s')
if num_u1s == 1:
return scale_u1_generator_to_8vsc_integral_charges(u1s[:, 0]).reshape(28, 1)
eigvecs_T, evec_charges = get_simultaneous_eigenbasis(u1s)
a_vecs_eigvals = numpy.array(evec_charges).T
# Otherwise, we have exactly two U(1)s.
# How to reduce the charge-lattice?
zs = numpy.array([x + 1j * y for x, y in a_vecs_eigvals.T])
zs_by_origin_distance = sorted([z for z in zs if abs(z) >= tolerance],
key=abs)
z1 = zs_by_origin_distance[0]
angle = math.atan2(z1.imag, z1.real)
cos_angle = math.cos(angle)
sin_angle = math.sin(angle)
u1a = u1s[:, 0] * cos_angle + u1s[:, 1] * sin_angle
u1b = u1s[:, 0] * sin_angle - u1s[:, 1] * cos_angle
canon_u1s = numpy.stack([
scale_u1_generator_to_8vsc_integral_charges(u1a),
scale_u1_generator_to_8vsc_integral_charges(u1b)], axis=1)
return canon_u1s
def decompose_reductive_lie_algebra(residual_symmetry,
threshold=0.05):
"""Decomposes a residual symmetry into semisimple and u(1) parts.
Args:
residual_symmetry: Residual symmetry as produced by
`get_residual_gauge_symmetry()`.
threshold: Threshold for SVD generalized commutator-eigenvalue to consider
a generator as being part of the non-semisimple subalgebra.
"""
no_symmetry = numpy.zeros([28, 0])
if residual_symmetry.shape[1] == 0:
return no_symmetry, no_symmetry
commutators = numpy.einsum(
'avc,cw->avw',
numpy.einsum('abc,bv->avc', _spin8_fabc, residual_symmetry),
residual_symmetry)
su, ss, svh = scipy.linalg.svd(commutators.reshape(commutators.shape[0], -1))
del svh # Unused.
# We want those commutators that do not go to zero.
derivative_symmetry = su.T[:len(ss)][ss >= threshold].T
# By construction (via SVD), and using orthogonality of our spin(8) basis,
# `derivative_symmetry` already consists of orthogonal spin(8) generators, i.e.
# tr(AB) = 0 for basis vectors A != B.
# The 'complement' consists of u(1) factors that have zero inner product with
# `derivative_symmetry`.
if derivative_symmetry.size:
inner_products_with_input = numpy.einsum('av,aw->vw',
residual_symmetry,
derivative_symmetry)
su, ss, svh = scipy.linalg.svd(inner_products_with_input)
# Zero-pad the vector of 'generalized eigenvalues' to su's size.
ss_ext = numpy.concatenate(
[ss, numpy.zeros([max(0, su.shape[0] - len(ss))])])
u1s = numpy.einsum('av,vn->an',
residual_symmetry,
su.T[ss_ext <= threshold].T)
else: # All residual symmetry is in u(1)-factors.
return no_symmetry, residual_symmetry
# Assert that our U1s are orthogonal.
if u1s.size:
# Check generator orthonormality.
assert numpy.allclose(numpy.einsum('av,aw->vw', u1s, u1s),
numpy.eye(u1s.shape[1]), atol=1e-6)
else:
u1s = no_symmetry
return derivative_symmetry, u1s
def find_raw_cartan_subalgebra(spin8_subalgebra_generators, threshold=1e-3):
"""Finds a Cartan subalgebra for an algebra if the form A*so(3) + B*u(1)."""
if spin8_subalgebra_generators.shape[1] == 0:
return numpy.zeros([28, 0])
subalgebra_sprods = numpy.einsum(
'aj,ak->jk', spin8_subalgebra_generators, spin8_subalgebra_generators)
# Check that incoming subalgebra-generators really are reasonably orthonormal
# (up to overall scaling) w.r.t. Cartan-Killing metric.
assert numpy.allclose(subalgebra_sprods,
numpy.eye(spin8_subalgebra_generators.shape[1]))
cartan_generators_found = []
residual_charge_zero_subspace = spin8_subalgebra_generators
while True:
gen = residual_charge_zero_subspace[:, 0]
cartan_generators_found.append(gen)
assert numpy.allclose(gen, gen.real), 'Generator is not real!'
orthogonal_subalgebra = residual_charge_zero_subspace[:, 1:]
if not orthogonal_subalgebra.shape[1]:
return numpy.stack(cartan_generators_found, axis=-1)
gen_ad_action_on_spin8 = numpy.einsum('abc,a->cb', _spin8_fabc, gen)
gen_action_on_orthogonal_subalgebra = numpy.einsum(
'ai,aj->ij',
orthogonal_subalgebra,
numpy.einsum('bc,cj->bj',
gen_ad_action_on_spin8 * 1j,
orthogonal_subalgebra))
assert numpy.allclose(gen_action_on_orthogonal_subalgebra +
gen_action_on_orthogonal_subalgebra.T,
numpy.zeros_like(gen_action_on_orthogonal_subalgebra))
eigvals, eigvecs_T = scipy.linalg.eigh(gen_action_on_orthogonal_subalgebra)
nullspace_gens = []
for eigval, eigvec in zip(eigvals, eigvecs_T.T):
if abs(eigval) <= threshold:
assert numpy.allclose(eigvec, eigvec.real)
nullspace_gens.append(
numpy.einsum('ai,i->a', orthogonal_subalgebra, eigvec.real))
if not len(nullspace_gens):
return numpy.stack(cartan_generators_found, axis=-1)
nullspace = numpy.stack(nullspace_gens, axis=1)
assert numpy.allclose(nullspace, nullspace.real), 'Non-real nullspace'
assert numpy.allclose(numpy.einsum('ai,aj->ij', nullspace, nullspace),
numpy.eye(nullspace.shape[1])), 'Non-Ortho Nullspace'
residual_charge_zero_subspace = nullspace
def weightspace_decompose(generator_action,
cartan_subalgebra_generators,
space,
tolerance=1e-6):
"""Decomposes `space` into subspaces tagged by weight-vectors."""
seq_cartan_generators = list(cartan_subalgebra_generators.T)
def cartan_split(subspace_tagged_by_weight_prefix, num_cartan_generator):
cartan_action = numpy.einsum(
'aIJ,a->IJ',
generator_action,
seq_cartan_generators[num_cartan_generator] * 1j)
result = []
for weight_prefix, subspace in subspace_tagged_by_weight_prefix:
assert numpy.allclose(
numpy.einsum('aJ,aK->JK', subspace.conj(), subspace),
numpy.eye(subspace.shape[1])), (
'Non-orthonormalized subspace:\n' +
repr(numpy.round(numpy.einsum('aJ,aK->JK',
subspace.conj(),
subspace), 3)))
cartan_action_on_subspace = numpy.einsum(
'Jm,Jn->mn', subspace.conj(),
numpy.einsum('JK,Kn->Jn', cartan_action, subspace))
eigvals, eigvecs_T = scipy.linalg.eigh(cartan_action_on_subspace)
eigval_and_rel_eigenspace = aggregate_eigenvectors(eigvals, eigvecs_T)
for eigval, rel_eigenspace in eigval_and_rel_eigenspace:
ext_weight_prefix = (weight_prefix + (eigval,))
result.append((ext_weight_prefix,
numpy.einsum('In,nj->Ij',
subspace,
numpy.stack(rel_eigenspace, axis=-1))))
if num_cartan_generator == len(seq_cartan_generators) - 1:
return result
return cartan_split(result, num_cartan_generator + 1)
return cartan_split([((), space)], 0)
def get_simple_roots_info(rootspaces, threshold=0.01):
"""Extracts simple roots from weightspace-decomposition of a Lie algebra."""
# Finite-dimensional simple Lie algebras have one-dimensional root spaces.
# We use this to eliminate the Cartan subalgebra at the zero-root.
rank = len(rootspaces[0][0])
null_root = (0.0,) * rank
positive_roots = [root for root, subspace in rootspaces
if subspace.shape[1] == 1 and root > null_root]
def root_length_squared(root):
return sum(x * x for x in root)
def root_distance(root1, root2):
return max(abs(r1 - r2) for r1, r2 in zip(root1, root2))
# If the root is 'clearly too long', drop it rightaway.
# It does not hurt if we allow a large amount of slack,
# as this is just for increased performance.
threshold_root_length_squared = max(
map(root_length_squared, positive_roots)) * (1 + threshold)
sum_roots = []
for root1 in positive_roots:
for root2 in positive_roots:
root12 = tuple(r1 + r2 for r1, r2 in zip(root1, root2))
if root_length_squared(root12) > threshold_root_length_squared:
continue
for sum_root in sum_roots:
if root_distance(sum_root, root12) <= threshold:
break # We already know this sum-root.
else: # Reached end of loop.
sum_roots.append(root12)
simple_roots = [root for root in positive_roots
if not any(root_distance(sum_root, root) < threshold
for sum_root in sum_roots)]
a_simple_roots = numpy.array(simple_roots)
simple_root_sprods = numpy.einsum('rj,rk->jk', a_simple_roots, a_simple_roots)
# We always normalize the length-squared of the longest root to 2.
scaling_factor_squared = 2.0 / max(
simple_root_sprods[n, n] for n in range(simple_root_sprods.shape[0]))
scaling_factor = math.sqrt(scaling_factor_squared)
scaled_root_sprods = simple_root_sprods * scaling_factor_squared
# For spin(3)^N, the roots have to be mutually orthogonal
# with length-squared 2.
assert numpy.allclose(scaled_root_sprods,
2 * numpy.eye(simple_root_sprods.shape[0]) )
pos_simple_rootspaces = [(pos_root, scaling_factor * pos_rootspace)
for (pos_root, pos_rootspace) in rootspaces
for simple_root in simple_roots
if tuple(simple_root) == tuple(pos_root)]
canonicalized_cartan_subalgebra_generators = []
for pos_root, pos_rootspace in pos_simple_rootspaces:
# For finite-dimensional Lie algebras, root spaces are one-dimensional.
assert pos_rootspace.shape[1] == 1
l_plus = pos_rootspace[:, 0]
l_minus = l_plus.conj()
cartan_h = -1j * numpy.einsum('abc,a,b->c', _spin8_fabc, l_plus, l_minus)
canonicalized_cartan_subalgebra_generators.append(cartan_h)
# TODO(tfish): Only return what we need, and *not* in a dict.
return dict(simple_root_sprods=simple_root_sprods,
canonicalized_cartan_subalgebra=numpy.stack(
canonicalized_cartan_subalgebra_generators, axis=-1),
scaling_factor_squared=scaling_factor_squared,
pos_simple_rootspaces=pos_simple_rootspaces,
scaled_root_sprods=scaled_root_sprods,
scaled_roots=a_simple_roots * math.sqrt(scaling_factor_squared))
def canonicalize_residual_spin3u1_symmetry(residual_symmetry):
"""Canonicalizes a residual so(3)^M u(1)^N symmetry."""
semisimple_part, raw_u1s = decompose_reductive_lie_algebra(residual_symmetry)
u1s = canonicalize_u1s(raw_u1s)
spin3_cartan_gens_raw = find_raw_cartan_subalgebra(semisimple_part)
return CanonicalizedSymmetry(u1s=u1s,
semisimple_part=semisimple_part,
spin3_cartan_gens=spin3_cartan_gens_raw)
def group_charges_into_spin3u1_irreps(num_spin3s, charge_vecs):
"""Groups observed charges into irreducible representations.
Args:
num_spin3s: Length of the prefix of the charge-vector that belongs to
spin(3) angular momentum operators.
charge_vecs: List of charge-tuple vectors.
Returns:
List [((tuple(highest_spin3_weights) + tuple(u1_charges)), multiplicity),
...] of irreducible-representation descriptions, sorted by descending
combined-charge-vector.
"""
def spin3_weights(highest_weight):
"""Computes a list of spin3 weights for a given irrep highest weight.
E.g.: highest_weight = 1.5 -> [1.5, 0.5, -0.5, -1.5].
Args:
highest_weight: The highest weight (Element of [0, 0.5, 1.0, 1.5, ...]).
Returns: List of weights, in descending order.
"""
w2 = int(round(2 * highest_weight))
return [highest_weight - n for n in range(1 + w2)]
def descendants(cvec):
for spin3_part in itertools.product(
*[spin3_weights(w) for w in cvec[:num_spin3s]]):
yield spin3_part + cvec[num_spin3s:]
charges_todo = collections.Counter(charge_vecs)
irreps = collections.defaultdict(int)
while charges_todo:
cvec, cvec_mult = sorted(charges_todo.items(), reverse=True)[0]
for cvec_desc in descendants(cvec):
charges_todo[cvec_desc] -= cvec_mult
if charges_todo[cvec_desc] == 0:
del charges_todo[cvec_desc]
irreps[cvec] += cvec_mult
return sorted(irreps.items(), reverse=True) # Highest charges first.
def spin3u1_decompose(canonicalized_symmetry,
decomposition_tasks=SPIN8_BRANCHINGS,
simplify=round2):
"""Computes decompositions into so(3)^M x u(1)^N irreducible representations.
Args:
canonicalized_symmetry: A `CanonicalizedSymmetry` object.
decomposition_tasks: Sequence of pairs (spin8action, tasks),
where `tasks` is a sequence of pairs (tag, orthogonalized_subspace).
simplify: The rounding function used to map approximately-integer charges
to integers.
"""
spin3_gens = (canonicalized_symmetry.spin3_cartan_gens.T
if (canonicalized_symmetry.spin3_cartan_gens is not None
and len(canonicalized_symmetry.spin3_cartan_gens)) else [])
u1_gens = (canonicalized_symmetry.u1s.T
if (canonicalized_symmetry.u1s is not None
and len(canonicalized_symmetry.u1s)) else [])
num_spin3s = len(spin3_gens)
num_u1s = len(u1_gens)
def grouped(charges):
# Spin(3) angular momentum charges need to be half-integral.
# For U(1) generators, we are not requiring this.
assert all(round2(2 * c) == int(round2(2 * c))
for charge_vec in charges
for c in charge_vec[:num_spin3s])
return group_charges_into_spin3u1_irreps(
num_spin3s,
[tuple(map(simplify, charge_vec)) for charge_vec in charges])
if num_spin3s:
rootspaces = weightspace_decompose(
_spin8_fabc,
spin3_gens.T,
canonicalized_symmetry.semisimple_part)
sroot_info = get_simple_roots_info(rootspaces)
angular_momentum_u1s = list(sroot_info['canonicalized_cartan_subalgebra'].T)
else:
angular_momentum_u1s = []
list_commuting_gens = (
[g for g in [angular_momentum_u1s, u1_gens] if len(g)])
commuting_gens = (numpy.concatenate(list_commuting_gens).T
if list_commuting_gens else numpy.zeros([28, 0]))
ret = []
for spin8action, tasks in decomposition_tasks:
ret.append([])
for task_tag, space_to_decompose in tasks:
_, charges = get_simultaneous_eigenbasis(
commuting_gens,
gen_action_einsum=spin8action.einsum,
gen_action_tensor=spin8action.tensor,
initial_space=space_to_decompose)
ret[-1].append((task_tag, grouped(charges)))
return ret
def spin3u1_branching_and_spectra(canonicalized_symmetry,
decomposition_tasks=()):
"""Computes so(3)^M x u(1)^N spectra."""
vsc_ad_branching = spin3u1_decompose(canonicalized_symmetry)
spectra = spin3u1_decompose(canonicalized_symmetry,
decomposition_tasks)
return vsc_ad_branching, spectra
def spin3u1_physics(
canonicalized_symmetry,
mass_tagged_eigenspaces_gravitinos=(),
mass_tagged_eigenspaces_fermions=(),
mass_tagged_eigenspaces_scalars=(),
# Note that we see cases where we have very uneven parity-mixtures.
parity_tolerance=1e-7):
"""Computes so(3)^M x u(1)^N spectra."""
vsc_ad_branching = spin3u1_decompose(canonicalized_symmetry)
decomposition_tasks = []
# Gravitino tasks.
gravitino_tasks = []
for gravitino_mass, basis in mass_tagged_eigenspaces_gravitinos:
subspace = numpy.array(basis).T
task_tag = ('gravitinos', subspace.shape, gravitino_mass)
gravitino_tasks.append((task_tag, subspace))
decomposition_tasks.append(
(SPIN8_ACTION_8V, gravitino_tasks))
# Fermion tasks.
fermion_tasks = []
for fermion_mass, basis in mass_tagged_eigenspaces_fermions:
subspace = numpy.array(basis).T
task_tag = ('fermions', subspace.shape, fermion_mass)
fermion_tasks.append((task_tag, subspace))
decomposition_tasks.append(
(SPIN8_ACTION_FERMIONS, fermion_tasks))
# Scalar tasks.
scalar_tasks = []
# For scalars, we try to split off mass-eigenstates that are
# 35s-only or 35c-only.
p_op = numpy.eye(70)
p_op[35:, 35:] *= -1
for scalar_mass, basis in mass_tagged_eigenspaces_scalars:
a_basis = numpy.array(basis)
p_op_on_basis = numpy.einsum('jn,nm,km->jk', a_basis.conj(), p_op, a_basis)
assert numpy.allclose(p_op_on_basis, p_op_on_basis.real)
assert numpy.allclose(p_op_on_basis, p_op_on_basis.T)
p_op_eigvals, p_op_eigvecs_T = numpy.linalg.eigh(p_op_on_basis)
p_op_eigvals_re = p_op_eigvals.real
assert numpy.allclose(p_op_eigvals, p_op_eigvals_re)
# We have to lift the p_op_eigvecs_T to a_basis.
subspace_eigvecs = numpy.einsum('vn,vV->Vn', p_op_eigvecs_T, a_basis)
eigval_eigvecs = aggregate_eigenvectors(p_op_eigvals_re, subspace_eigvecs,
tolerance=1e-4)
# subspaces_35s and subspaces_35c each have <=1 entries.
subspaces_35s = [eigvecs for eigval, eigvecs in eigval_eigvecs
if eigval > 1 - parity_tolerance]
subspaces_35c = [eigvecs for eigval, eigvecs in eigval_eigvecs
if eigval < -1 + parity_tolerance]
merged_subspaces_other = [
eigvec for eigval, eigvecs in eigval_eigvecs
for eigvec in eigvecs
if -1 + parity_tolerance <= eigval <= 1 - parity_tolerance]
for subspace in subspaces_35s:
a_subspace = numpy.array(subspace).T
task_tag = ('scalars', a_subspace.shape, scalar_mass, 's')
scalar_tasks.append((task_tag, a_subspace))
for subspace in subspaces_35c:
a_subspace = numpy.array(subspace).T
task_tag = ('scalars', a_subspace.shape, scalar_mass, 'c')
scalar_tasks.append((task_tag, a_subspace))
# "Mixture" states. While we do get them in terms of parity-eigenstates,
# for 'weird' eigenvalues such as -1/3. Here, we just merge them all back
# together into one space, i.e. forget about resolving the spectrum.
# Why? Otherwise, we may see in the report
# "0.000m{1}, 0.000m{1}, 0.000m{1}, ...", which is not overly informative.
a_subspace = numpy.array(merged_subspaces_other).T
if len(merged_subspaces_other):
task_tag = ('scalars', a_subspace.shape, scalar_mass, 'm')
scalar_tasks.append((task_tag, a_subspace))
decomposition_tasks.append(
(SPIN8_ACTION_SCALARS, scalar_tasks))
spectra = spin3u1_decompose(canonicalized_symmetry,
decomposition_tasks)
return vsc_ad_branching, spectra
| [
"numpy.eye",
"collections.namedtuple",
"numpy.allclose",
"math.sqrt",
"math.cos",
"numpy.stack",
"numpy.array",
"numpy.zeros",
"numpy.einsum",
"math.atan2",
"collections.Counter",
"collections.defaultdict",
"numpy.linalg.eigh",
"numpy.concatenate",
"numpy.dot",
"math.sin",
"numpy.zer... | [((1503, 1603), 'collections.namedtuple', 'collections.namedtuple', (['"""CanonicalizedSymmetry"""', "['u1s', 'semisimple_part', 'spin3_cartan_gens']"], {}), "('CanonicalizedSymmetry', ['u1s', 'semisimple_part',\n 'spin3_cartan_gens'])\n", (1525, 1603), False, 'import collections\n'), ((1982, 2041), 'collections.namedtuple', 'collections.namedtuple', (['"""Spin8Action"""', "['einsum', 'tensor']"], {}), "('Spin8Action', ['einsum', 'tensor'])\n", (2004, 2041), False, 'import collections\n'), ((5628, 5716), 'numpy.einsum', 'numpy.einsum', (['"""aik,ABik->aAB"""', 'algebra.su8.m_28_8_8', 'algebra.su8.m_action_56_56_8_8'], {}), "('aik,ABik->aAB', algebra.su8.m_28_8_8, algebra.su8.\n m_action_56_56_8_8)\n", (5640, 5716), False, 'import numpy\n'), ((7633, 7675), 'numpy.allclose', 'numpy.allclose', (['p', 'q'], {'rtol': '(0.01)', 'atol': '(0.01)'}), '(p, q, rtol=0.01, atol=0.01)\n', (7647, 7675), False, 'import numpy\n'), ((14723, 14819), 'numpy.stack', 'numpy.stack', (['[evec for _, espace in charge_tagged_eigenspaces for evec in espace.T]'], {'axis': '(-1)'}), '([evec for _, espace in charge_tagged_eigenspaces for evec in\n espace.T], axis=-1)\n', (14734, 14819), False, 'import numpy\n'), ((16561, 16619), 'numpy.array', 'numpy.array', (['[(x + 1.0j * y) for x, y in a_vecs_eigvals.T]'], {}), '([(x + 1.0j * y) for x, y in a_vecs_eigvals.T])\n', (16572, 16619), False, 'import numpy\n'), ((16773, 16801), 'math.atan2', 'math.atan2', (['z1.imag', 'z1.real'], {}), '(z1.imag, z1.real)\n', (16783, 16801), False, 'import math\n'), ((16816, 16831), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (16824, 16831), False, 'import math\n'), ((16846, 16861), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (16854, 16861), False, 'import math\n'), ((17589, 17609), 'numpy.zeros', 'numpy.zeros', (['[28, 0]'], {}), '([28, 0])\n', (17600, 17609), False, 'import numpy\n'), ((19557, 19644), 'numpy.einsum', 'numpy.einsum', (['"""aj,ak->jk"""', 'spin8_subalgebra_generators', 'spin8_subalgebra_generators'], {}), "('aj,ak->jk', spin8_subalgebra_generators,\n spin8_subalgebra_generators)\n", (19569, 19644), False, 'import numpy\n'), ((24936, 24961), 'numpy.array', 'numpy.array', (['simple_roots'], {}), '(simple_roots)\n', (24947, 24961), False, 'import numpy\n'), ((24985, 25042), 'numpy.einsum', 'numpy.einsum', (['"""rj,rk->jk"""', 'a_simple_roots', 'a_simple_roots'], {}), "('rj,rk->jk', a_simple_roots, a_simple_roots)\n", (24997, 25042), False, 'import numpy\n'), ((25245, 25278), 'math.sqrt', 'math.sqrt', (['scaling_factor_squared'], {}), '(scaling_factor_squared)\n', (25254, 25278), False, 'import math\n'), ((28328, 28360), 'collections.Counter', 'collections.Counter', (['charge_vecs'], {}), '(charge_vecs)\n', (28347, 28360), False, 'import collections\n'), ((28372, 28400), 'collections.defaultdict', 'collections.defaultdict', (['int'], {}), '(int)\n', (28395, 28400), False, 'import collections\n'), ((32643, 32656), 'numpy.eye', 'numpy.eye', (['(70)'], {}), '(70)\n', (32652, 32656), False, 'import numpy\n'), ((5446, 5519), 'numpy.einsum', 'numpy.einsum', (['"""aij,bjk->abik"""', 'algebra.su8.m_28_8_8', 'algebra.su8.m_28_8_8'], {}), "('aij,bjk->abik', algebra.su8.m_28_8_8, algebra.su8.m_28_8_8)\n", (5458, 5519), False, 'import numpy\n'), ((6418, 6507), 'numpy.einsum', 'numpy.einsum', (['"""aij,ijAB->aAB"""', '(0.25 * algebra.su8.m_28_8_8)', 'algebra.spin8.gamma_vvss'], {}), "('aij,ijAB->aAB', 0.25 * algebra.su8.m_28_8_8, algebra.spin8.\n gamma_vvss)\n", (6430, 6507), False, 'import numpy\n'), ((6620, 6709), 'numpy.einsum', 'numpy.einsum', (['"""aij,ijAB->aAB"""', '(0.25 * algebra.su8.m_28_8_8)', 'algebra.spin8.gamma_vvcc'], {}), "('aij,ijAB->aAB', 0.25 * algebra.su8.m_28_8_8, algebra.spin8.\n gamma_vvcc)\n", (6632, 6709), False, 'import numpy\n'), ((7521, 7538), 'numpy.round', 'numpy.round', (['x', '(2)'], {}), '(x, 2)\n', (7532, 7538), False, 'import numpy\n'), ((8323, 8360), 'numpy.allclose', 'numpy.allclose', (['eigvals', 'eigvals.real'], {}), '(eigvals, eigvals.real)\n', (8337, 8360), False, 'import numpy\n'), ((9644, 9706), 'numpy.einsum', 'numpy.einsum', (['"""avw,v->aw"""', 'algebra.e7.spin8_action_on_v70', 'v70'], {}), "('avw,v->aw', algebra.e7.spin8_action_on_v70, v70)\n", (9656, 9706), False, 'import numpy\n'), ((11438, 11504), 'numpy.einsum', 'numpy.einsum', (['gen_action_einsum', 'gen_action_tensor', 'commuting_gens'], {}), '(gen_action_einsum, gen_action_tensor, commuting_gens)\n', (11450, 11504), False, 'import numpy\n'), ((11616, 11647), 'numpy.eye', 'numpy.eye', (['gens_action.shape[0]'], {}), '(gens_action.shape[0])\n', (11625, 11647), False, 'import numpy\n'), ((15402, 15439), 'numpy.allclose', 'numpy.allclose', (['eigvals', 'eigvals.real'], {}), '(eigvals, eigvals.real)\n', (15416, 15439), False, 'import numpy\n'), ((16034, 16054), 'numpy.zeros', 'numpy.zeros', (['[28, 0]'], {}), '([28, 0])\n', (16045, 16054), False, 'import numpy\n'), ((16446, 16471), 'numpy.array', 'numpy.array', (['evec_charges'], {}), '(evec_charges)\n', (16457, 16471), False, 'import numpy\n'), ((17741, 17800), 'numpy.einsum', 'numpy.einsum', (['"""abc,bv->avc"""', '_spin8_fabc', 'residual_symmetry'], {}), "('abc,bv->avc', _spin8_fabc, residual_symmetry)\n", (17753, 17800), False, 'import numpy\n'), ((18411, 18476), 'numpy.einsum', 'numpy.einsum', (['"""av,aw->vw"""', 'residual_symmetry', 'derivative_symmetry'], {}), "('av,aw->vw', residual_symmetry, derivative_symmetry)\n", (18423, 18476), False, 'import numpy\n'), ((18800, 18873), 'numpy.einsum', 'numpy.einsum', (['"""av,vn->an"""', 'residual_symmetry', 'su.T[ss_ext <= threshold].T'], {}), "('av,vn->an', residual_symmetry, su.T[ss_ext <= threshold].T)\n", (18812, 18873), False, 'import numpy\n'), ((19514, 19534), 'numpy.zeros', 'numpy.zeros', (['[28, 0]'], {}), '([28, 0])\n', (19525, 19534), False, 'import numpy\n'), ((19853, 19900), 'numpy.eye', 'numpy.eye', (['spin8_subalgebra_generators.shape[1]'], {}), '(spin8_subalgebra_generators.shape[1])\n', (19862, 19900), False, 'import numpy\n'), ((20106, 20135), 'numpy.allclose', 'numpy.allclose', (['gen', 'gen.real'], {}), '(gen, gen.real)\n', (20120, 20135), False, 'import numpy\n'), ((20358, 20401), 'numpy.einsum', 'numpy.einsum', (['"""abc,a->cb"""', '_spin8_fabc', 'gen'], {}), "('abc,a->cb', _spin8_fabc, gen)\n", (20370, 20401), False, 'import numpy\n'), ((21303, 21338), 'numpy.stack', 'numpy.stack', (['nullspace_gens'], {'axis': '(1)'}), '(nullspace_gens, axis=1)\n', (21314, 21338), False, 'import numpy\n'), ((21350, 21391), 'numpy.allclose', 'numpy.allclose', (['nullspace', 'nullspace.real'], {}), '(nullspace, nullspace.real)\n', (21364, 21391), False, 'import numpy\n'), ((22020, 22120), 'numpy.einsum', 'numpy.einsum', (['"""aIJ,a->IJ"""', 'generator_action', '(seq_cartan_generators[num_cartan_generator] * 1.0j)'], {}), "('aIJ,a->IJ', generator_action, seq_cartan_generators[\n num_cartan_generator] * 1.0j)\n", (22032, 22120), False, 'import numpy\n'), ((30658, 30678), 'numpy.zeros', 'numpy.zeros', (['[28, 0]'], {}), '([28, 0])\n', (30669, 30678), False, 'import numpy\n'), ((32755, 32773), 'numpy.array', 'numpy.array', (['basis'], {}), '(basis)\n', (32766, 32773), False, 'import numpy\n'), ((32865, 32914), 'numpy.allclose', 'numpy.allclose', (['p_op_on_basis', 'p_op_on_basis.real'], {}), '(p_op_on_basis, p_op_on_basis.real)\n', (32879, 32914), False, 'import numpy\n'), ((32926, 32972), 'numpy.allclose', 'numpy.allclose', (['p_op_on_basis', 'p_op_on_basis.T'], {}), '(p_op_on_basis, p_op_on_basis.T)\n', (32940, 32972), False, 'import numpy\n'), ((33008, 33040), 'numpy.linalg.eigh', 'numpy.linalg.eigh', (['p_op_on_basis'], {}), '(p_op_on_basis)\n', (33025, 33040), False, 'import numpy\n'), ((33092, 33137), 'numpy.allclose', 'numpy.allclose', (['p_op_eigvals', 'p_op_eigvals_re'], {}), '(p_op_eigvals, p_op_eigvals_re)\n', (33106, 33137), False, 'import numpy\n'), ((33214, 33264), 'numpy.einsum', 'numpy.einsum', (['"""vn,vV->Vn"""', 'p_op_eigvecs_T', 'a_basis'], {}), "('vn,vV->Vn', p_op_eigvecs_T, a_basis)\n", (33226, 33264), False, 'import numpy\n'), ((15658, 15702), 'numpy.round', 'numpy.round', (['(factor * n * c)', 'round_to_digits'], {}), '(factor * n * c, round_to_digits)\n', (15669, 15702), False, 'import numpy\n'), ((19134, 19169), 'numpy.einsum', 'numpy.einsum', (['"""av,aw->vw"""', 'u1s', 'u1s'], {}), "('av,aw->vw', u1s, u1s)\n", (19146, 19169), False, 'import numpy\n'), ((19197, 19220), 'numpy.eye', 'numpy.eye', (['u1s.shape[1]'], {}), '(u1s.shape[1])\n', (19206, 19220), False, 'import numpy\n'), ((20283, 20328), 'numpy.stack', 'numpy.stack', (['cartan_generators_found'], {'axis': '(-1)'}), '(cartan_generators_found, axis=-1)\n', (20294, 20328), False, 'import numpy\n'), ((20518, 20597), 'numpy.einsum', 'numpy.einsum', (['"""bc,cj->bj"""', '(gen_ad_action_on_spin8 * 1.0j)', 'orthogonal_subalgebra'], {}), "('bc,cj->bj', gen_ad_action_on_spin8 * 1.0j, orthogonal_subalgebra)\n", (20530, 20597), False, 'import numpy\n'), ((20794, 20847), 'numpy.zeros_like', 'numpy.zeros_like', (['gen_action_on_orthogonal_subalgebra'], {}), '(gen_action_on_orthogonal_subalgebra)\n', (20810, 20847), False, 'import numpy\n'), ((21241, 21286), 'numpy.stack', 'numpy.stack', (['cartan_generators_found'], {'axis': '(-1)'}), '(cartan_generators_found, axis=-1)\n', (21252, 21286), False, 'import numpy\n'), ((21440, 21487), 'numpy.einsum', 'numpy.einsum', (['"""ai,aj->ij"""', 'nullspace', 'nullspace'], {}), "('ai,aj->ij', nullspace, nullspace)\n", (21452, 21487), False, 'import numpy\n'), ((21515, 21544), 'numpy.eye', 'numpy.eye', (['nullspace.shape[1]'], {}), '(nullspace.shape[1])\n', (21524, 21544), False, 'import numpy\n'), ((25505, 25543), 'numpy.eye', 'numpy.eye', (['simple_root_sprods.shape[0]'], {}), '(simple_root_sprods.shape[0])\n', (25514, 25543), False, 'import numpy\n'), ((26118, 26174), 'numpy.einsum', 'numpy.einsum', (['"""abc,a,b->c"""', '_spin8_fabc', 'l_plus', 'l_minus'], {}), "('abc,a,b->c', _spin8_fabc, l_plus, l_minus)\n", (26130, 26174), False, 'import numpy\n'), ((26402, 26466), 'numpy.stack', 'numpy.stack', (['canonicalized_cartan_subalgebra_generators'], {'axis': '(-1)'}), '(canonicalized_cartan_subalgebra_generators, axis=-1)\n', (26413, 26466), False, 'import numpy\n'), ((30569, 30607), 'numpy.concatenate', 'numpy.concatenate', (['list_commuting_gens'], {}), '(list_commuting_gens)\n', (30586, 30607), False, 'import numpy\n'), ((31983, 32001), 'numpy.array', 'numpy.array', (['basis'], {}), '(basis)\n', (31994, 32001), False, 'import numpy\n'), ((32305, 32323), 'numpy.array', 'numpy.array', (['basis'], {}), '(basis)\n', (32316, 32323), False, 'import numpy\n'), ((34649, 34684), 'numpy.array', 'numpy.array', (['merged_subspaces_other'], {}), '(merged_subspaces_other)\n', (34660, 34684), False, 'import numpy\n'), ((7129, 7141), 'numpy.eye', 'numpy.eye', (['(8)'], {}), '(8)\n', (7138, 7141), False, 'import numpy\n'), ((7185, 7197), 'numpy.eye', 'numpy.eye', (['(8)'], {}), '(8)\n', (7194, 7197), False, 'import numpy\n'), ((7241, 7253), 'numpy.eye', 'numpy.eye', (['(8)'], {}), '(8)\n', (7250, 7253), False, 'import numpy\n'), ((12950, 12995), 'numpy.einsum', 'numpy.einsum', (['"""ab,bj->aj"""', 'gen_action', 'espace'], {}), "('ab,bj->aj', gen_action, espace)\n", (12962, 12995), False, 'import numpy\n'), ((13238, 13281), 'numpy.einsum', 'numpy.einsum', (['"""gs,s->g"""', 'espace', 'sub_eigvec'], {}), "('gs,s->g', espace, sub_eigvec)\n", (13250, 13281), False, 'import numpy\n'), ((14262, 14291), 'numpy.stack', 'numpy.stack', (['eigvecs'], {'axis': '(-1)'}), '(eigvecs, axis=-1)\n', (14273, 14291), False, 'import numpy\n'), ((21056, 21091), 'numpy.allclose', 'numpy.allclose', (['eigvec', 'eigvec.real'], {}), '(eigvec, eigvec.real)\n', (21070, 21091), False, 'import numpy\n'), ((22327, 22355), 'numpy.eye', 'numpy.eye', (['subspace.shape[1]'], {}), '(subspace.shape[1])\n', (22336, 22355), False, 'import numpy\n'), ((22686, 22736), 'numpy.einsum', 'numpy.einsum', (['"""JK,Kn->Jn"""', 'cartan_action', 'subspace'], {}), "('JK,Kn->Jn', cartan_action, subspace)\n", (22698, 22736), False, 'import numpy\n'), ((26704, 26737), 'math.sqrt', 'math.sqrt', (['scaling_factor_squared'], {}), '(scaling_factor_squared)\n', (26713, 26737), False, 'import math\n'), ((33946, 33967), 'numpy.array', 'numpy.array', (['subspace'], {}), '(subspace)\n', (33957, 33967), False, 'import numpy\n'), ((34139, 34160), 'numpy.array', 'numpy.array', (['subspace'], {}), '(subspace)\n', (34150, 34160), False, 'import numpy\n'), ((7412, 7425), 'numpy.eye', 'numpy.eye', (['(28)'], {}), '(28)\n', (7421, 7425), False, 'import numpy\n'), ((12707, 12733), 'numpy.eye', 'numpy.eye', (['espace.shape[1]'], {}), '(espace.shape[1])\n', (12716, 12733), False, 'import numpy\n'), ((13333, 13362), 'numpy.dot', 'numpy.dot', (['gen_action', 'eigvec'], {}), '(gen_action, eigvec)\n', (13342, 13362), False, 'import numpy\n'), ((14400, 14431), 'numpy.eye', 'numpy.eye', (['eigenspace.shape[-1]'], {}), '(eigenspace.shape[-1])\n', (14409, 14431), False, 'import numpy\n'), ((21135, 21194), 'numpy.einsum', 'numpy.einsum', (['"""ai,i->a"""', 'orthogonal_subalgebra', 'eigvec.real'], {}), "('ai,i->a', orthogonal_subalgebra, eigvec.real)\n", (21147, 21194), False, 'import numpy\n'), ((12817, 12849), 'numpy.round', 'numpy.round', (['eigenspace_sprod', '(3)'], {}), '(eigenspace_sprod, 3)\n', (12828, 12849), False, 'import numpy\n'), ((15530, 15547), 'numpy.round', 'numpy.round', (['c', '(6)'], {}), '(c, 6)\n', (15541, 15547), False, 'import numpy\n'), ((13508, 13526), 'numpy.round', 'numpy.round', (['gv', '(3)'], {}), '(gv, 3)\n', (13519, 13526), False, 'import numpy\n'), ((13528, 13546), 'numpy.round', 'numpy.round', (['ev', '(3)'], {}), '(ev, 3)\n', (13539, 13546), False, 'import numpy\n'), ((23179, 23215), 'numpy.stack', 'numpy.stack', (['rel_eigenspace'], {'axis': '(-1)'}), '(rel_eigenspace, axis=-1)\n', (23190, 23215), False, 'import numpy\n')] |
#!/usr/bin/env python3
import argparse
import png
import numpy as np
import csv
from matplotlib import pyplot as plt
# map a scalar value to a color from a colormap
def map_to_color(scalar, colormap):
if scalar is None:
return None
# search in list to find scalar
lo = int(0)
hi = int(colormap.shape[0] - 1)
while hi - lo > 1:
c = (hi + lo) // 2
if colormap[c, 0] <= scalar:
lo = c
continue
else:
hi = c
continue
# import pdb; pdb.set_trace()
# interpolate color from colormap
clo = colormap[lo, 1:]
slo = colormap[lo, 0]
chi = colormap[hi, 1:]
shi = colormap[hi, 0]
t = (scalar - slo) / (shi - slo)
return (1 - t) * clo + t * chi
# map a color from a colormap to a scalar
# if no color within tolerance tol is found, return none
def map_to_scalar(color, colormap, tol):
# import pdb; pdb.set_trace()
# search in list to find color
for i in range(colormap.shape[0] - 1):
clo = colormap[i, 1:]
chi = colormap[i + 1, 1:]
cd = chi - clo
cc = color - clo
t = np.dot(cd, cc) / (np.linalg.norm(cd)**2)
d = np.linalg.norm(cc - t * cd)
if t >= 0 and t < 1 and d <= tol:
slo = colormap[i, 0]
shi = colormap[i + 1, 0]
return (1 - t) * slo + t * shi
return None
def normalize_cmap(cmap, dtype=None, invert=False):
"""normalize the color values to 0...1 depending on data type and add a
scalar column with equidistant values if none is present."""
(rows, cols) = cmap.shape
# add scalar column if necessary
if cols == 3:
cmap = np.concatenate(([[i] for i in range(rows)], cmap), 1)
# detect datatype
if dtype is None:
dtype = 'int' if np.max(cmap[:, 1:]) > 1 else 'float'
# normalize int values to 0...1
if dtype == 'int':
cmap[:, 1:] = cmap[:, 1:] / 255.0
# invert colormap if necessary
if invert:
cmap[:, 0] = -cmap[:, 0]
cmap = np.flip(cmap, 0)
# normalize the scale in the first column to 0-1
min_scal = np.min(cmap[:, 0])
max_scal = np.max(cmap[:, 0])
cmap[:, 0] = (cmap[:, 0] - min_scal) / (max_scal - min_scal)
return cmap
def read_cmap(filename, invert=False, delimiter=None, dtype=None):
with open(filename) as csvfile:
# infer csv format from file
dialect = csv.Sniffer().sniff(csvfile.read(1024), delimiters=delimiter)
csvfile.seek(0)
r = csv.reader(csvfile, dialect)
dcmap = np.vstack(map(np.double, r))
# Check for correct number of rows and columns
(rows, cols) = dcmap.shape
if cols < 3 or cols > 4:
print("Error reading csv file \"{}\": detected {} columns. ".format(filename, cols) +
"Valid csv files need to have 3 columns (r, g, b) or 4 columns (scalar, r, g, b).")
if rows < 2:
print("Error reading csv file \"{}\": detected {} rows. ".format(filename, rows) +
"I need at least 2 rows to construct a colormap.")
# normalize colors and ensure scalar column
dcmap = normalize_cmap(dcmap, dtype=dtype, invert=invert)
return dcmap
def read_img(filename):
# read png image
r = png.Reader(filename)
(rows, cols, pngdata, meta) = r.asDirect()
image_2d = np.vstack(map(np.uint16, pngdata))
image_3d = np.reshape(image_2d,
(cols, rows, meta['planes']))
return np.double(image_3d) / 255
def write_img(img, filename):
img_16 = (np.floor(img * 255)).astype(np.uint16)
png.from_array(img_16.tolist(), mode='RGB').save(filename)
def remap_img(img, cmap_in, cmap_out, tol, spread=False):
scalar_field = np.apply_along_axis(lambda c: map_to_scalar(c, cmap_in, tol),
2,
img)
if spread:
smin = np.min(scalar_field)
smax = np.max(scalar_field)
scalar_field = (scalar_field - smin) / (smax - smin)
img_r = img
for i in range(img_r.shape[0]):
for j in range(img_r.shape[1]):
color = map_to_color(scalar_field[i, j], cmap_out)
img_r[i, j, :] = color if color is not None else img[i, j, :]
return img_r
def main():
# todo: read (and write) different image file formats?
# todo: what to do with alpha channel?
# - in input image
# - in input colormap
# - in output colormap
# todo: how to make it faster?
# Parameters:
# - Input file
# - Output file
# - Input colormap
# - Output colormap
# - Tolerance for reverse color lookup
# - color format (float or byte)
# - colormap reading options
# - separator
# - skip first line
# - with or without scalar in column
# - with or without alpha channel
# - which column for scalar
# - colormap transformation options
# - leave scalars untouched (if any) or normalize to [0 1]
parser = argparse.ArgumentParser(description="Remap colors of a " +
"color-mapped image to another color map. \n"+
"Input and output color maps are specified as csv files with " +
"three columns for r, g, b and an optional first column specifying " +
"the position of the color in the color map.")
parser.add_argument("input", help="Input image")
parser.add_argument("cmap_in", help="Input colormap (as csv file)")
parser.add_argument("cmap_out", help="Output colormap (as csv file)")
parser.add_argument("output", help="Output image", nargs='?',
default="out.png")
parser.add_argument("-t", "--tolerance", type=float,
help="Tolerance for reverse color lookup",
default=0.01)
parser.add_argument("-d", "--color-dtype",
help="Data type for color values in the csv files"+
" (float 0...1 or int 0...255)." +
" Estimated automatically by default.",
choices=['float', 'int'])
parser.add_argument("-s", "--separator",
help="Separator for elements in the csv file",
default=',')
parser.add_argument("-i", "--invert", help="Invert the output color map",
action='store_true')
parser.add_argument("--spread",
help="Normalize the scalars to spread the whole "+
"output colormap range. Default: use same range "+
"as input colormap",
action='store_true')
args = parser.parse_args()
img = read_img(args.input)
# remove alpha channel
if img.shape[2] > 3:
img = img[:, :, 0:3]
cmap_in = read_cmap(args.cmap_in,
delimiter=args.separator,
dtype=args.color_dtype)
cmap_out = read_cmap(args.cmap_out,
invert=args.invert,
delimiter=args.separator,
dtype=args.color_dtype)
img_r = remap_img(img, cmap_in, cmap_out, args.tolerance, args.spread)
write_img(img_r, args.output)
if __name__ == "__main__":
main()
| [
"numpy.flip",
"numpy.reshape",
"png.Reader",
"argparse.ArgumentParser",
"numpy.double",
"numpy.linalg.norm",
"numpy.floor",
"numpy.max",
"numpy.dot",
"csv.Sniffer",
"numpy.min",
"csv.reader"
] | [((2140, 2158), 'numpy.min', 'np.min', (['cmap[:, 0]'], {}), '(cmap[:, 0])\n', (2146, 2158), True, 'import numpy as np\n'), ((2174, 2192), 'numpy.max', 'np.max', (['cmap[:, 0]'], {}), '(cmap[:, 0])\n', (2180, 2192), True, 'import numpy as np\n'), ((3308, 3328), 'png.Reader', 'png.Reader', (['filename'], {}), '(filename)\n', (3318, 3328), False, 'import png\n'), ((3441, 3491), 'numpy.reshape', 'np.reshape', (['image_2d', "(cols, rows, meta['planes'])"], {}), "(image_2d, (cols, rows, meta['planes']))\n", (3451, 3491), True, 'import numpy as np\n'), ((5070, 5378), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': "('Remap colors of a ' + 'color-mapped image to another color map. \\n' +\n 'Input and output color maps are specified as csv files with ' +\n 'three columns for r, g, b and an optional first column specifying ' +\n 'the position of the color in the color map.')"}), '(description=\'Remap colors of a \' +\n """color-mapped image to another color map. \n""" +\n \'Input and output color maps are specified as csv files with \' +\n \'three columns for r, g, b and an optional first column specifying \' +\n \'the position of the color in the color map.\')\n', (5093, 5378), False, 'import argparse\n'), ((1197, 1224), 'numpy.linalg.norm', 'np.linalg.norm', (['(cc - t * cd)'], {}), '(cc - t * cd)\n', (1211, 1224), True, 'import numpy as np\n'), ((2055, 2071), 'numpy.flip', 'np.flip', (['cmap', '(0)'], {}), '(cmap, 0)\n', (2062, 2071), True, 'import numpy as np\n'), ((2532, 2560), 'csv.reader', 'csv.reader', (['csvfile', 'dialect'], {}), '(csvfile, dialect)\n', (2542, 2560), False, 'import csv\n'), ((3529, 3548), 'numpy.double', 'np.double', (['image_3d'], {}), '(image_3d)\n', (3538, 3548), True, 'import numpy as np\n'), ((3960, 3980), 'numpy.min', 'np.min', (['scalar_field'], {}), '(scalar_field)\n', (3966, 3980), True, 'import numpy as np\n'), ((3996, 4016), 'numpy.max', 'np.max', (['scalar_field'], {}), '(scalar_field)\n', (4002, 4016), True, 'import numpy as np\n'), ((1144, 1158), 'numpy.dot', 'np.dot', (['cd', 'cc'], {}), '(cd, cc)\n', (1150, 1158), True, 'import numpy as np\n'), ((3601, 3620), 'numpy.floor', 'np.floor', (['(img * 255)'], {}), '(img * 255)\n', (3609, 3620), True, 'import numpy as np\n'), ((1162, 1180), 'numpy.linalg.norm', 'np.linalg.norm', (['cd'], {}), '(cd)\n', (1176, 1180), True, 'import numpy as np\n'), ((1818, 1837), 'numpy.max', 'np.max', (['cmap[:, 1:]'], {}), '(cmap[:, 1:])\n', (1824, 1837), True, 'import numpy as np\n'), ((2434, 2447), 'csv.Sniffer', 'csv.Sniffer', ([], {}), '()\n', (2445, 2447), False, 'import csv\n')] |
import numpy as np
from sdia_python.lab2.utils import get_random_number_generator
class BallWindow:
"""Represents a ball in any dimension, defined by a center and a radius"""
def __init__(self, center, radius):
"""Initializes a ball with a center and a radius. The radius must be positive.
Args:
center (numpy.array): coordinates of the center point
radius (float): float representing the radius
"""
center = np.array(center)
assert len(center)
assert radius >= 0
self.center = center
self.radius = float(radius)
def __str__(self):
return f"BallWindow: ({str(self.center)}, {str(self.radius)})"
def __len__(self):
"""Returns the dimension of the ball
Returns:
int : dimension of the ball
"""
return len(self.center)
def __contains__(self, point):
"""Tells whether a point is contained in the ball
Args:
point (numpy.array): the point to test
Returns:
boolean: if it is contained in the ball
"""
point = np.array(point)
assert self.dimension() == point.size
return np.linalg.norm(point - self.center) <= self.radius
def dimension(self):
"""Returns the dimension of the ball
Returns:
int : dimension of the ball
"""
return len(self)
def volume(self):
"""Returns the volume of the ball. The formula for the volume of an n-ball is used : https://fr.wikipedia.org/wiki/N-sph%C3%A8re
Returns:
float : volume of the ball
"""
n = self.dimension()
R = self.radius
if n % 2 == 0: # formula in case dimension is even
return (((np.pi) ** (n / 2)) * R ** n) / np.math.factorial(n / 2)
else: # formula in case dimension is odd
odds = [i for i in range(1, n + 1, 2)]
product = np.product(odds)
return 2 ** ((n + 1) / 2) * np.pi ** ((n - 1) / 2) * R ** n / product
def indicator_function(self, points):
"""Returns true if all points are in the ball.
Args:
points (np.array): Array of points to test
Returns:
bool: True if all points are in the box.
"""
return np.all([point in self for point in points])
def rand(self, n=1, rng=None):
"""Generates n points in the ball.
Args:
n (int, optional): Number of points generated. Defaults to 1.
rng (int, optional): Random seed. Defaults to None.
Returns:
numpy.array: array containing the n generated points
"""
rng = get_random_number_generator(rng)
directions = rng.uniform(size=(n, self.dimension()))
directions = np.array(
[direction / np.linalg.norm(direction) for direction in directions]
)
distances = rng.uniform(0, self.radius, n)
vectors = np.array(
[
direction * distance
for (direction, distance) in zip(directions, distances)
]
)
return np.array([vector + self.center for vector in vectors])
| [
"numpy.product",
"sdia_python.lab2.utils.get_random_number_generator",
"numpy.array",
"numpy.linalg.norm",
"numpy.all",
"numpy.math.factorial"
] | [((478, 494), 'numpy.array', 'np.array', (['center'], {}), '(center)\n', (486, 494), True, 'import numpy as np\n'), ((1139, 1154), 'numpy.array', 'np.array', (['point'], {}), '(point)\n', (1147, 1154), True, 'import numpy as np\n'), ((2342, 2387), 'numpy.all', 'np.all', (['[(point in self) for point in points]'], {}), '([(point in self) for point in points])\n', (2348, 2387), True, 'import numpy as np\n'), ((2727, 2759), 'sdia_python.lab2.utils.get_random_number_generator', 'get_random_number_generator', (['rng'], {}), '(rng)\n', (2754, 2759), False, 'from sdia_python.lab2.utils import get_random_number_generator\n'), ((3184, 3240), 'numpy.array', 'np.array', (['[(vector + self.center) for vector in vectors]'], {}), '([(vector + self.center) for vector in vectors])\n', (3192, 3240), True, 'import numpy as np\n'), ((1217, 1252), 'numpy.linalg.norm', 'np.linalg.norm', (['(point - self.center)'], {}), '(point - self.center)\n', (1231, 1252), True, 'import numpy as np\n'), ((1977, 1993), 'numpy.product', 'np.product', (['odds'], {}), '(odds)\n', (1987, 1993), True, 'import numpy as np\n'), ((1829, 1853), 'numpy.math.factorial', 'np.math.factorial', (['(n / 2)'], {}), '(n / 2)\n', (1846, 1853), True, 'import numpy as np\n'), ((2878, 2903), 'numpy.linalg.norm', 'np.linalg.norm', (['direction'], {}), '(direction)\n', (2892, 2903), True, 'import numpy as np\n')] |
from soccerdepth.data.dataset_loader import get_set
import numpy as np
import utils.files as file_utils
from os.path import join
import argparse
from soccerdepth.models.hourglass import hg8
from soccerdepth.models.utils import weights_init
from soccerdepth.data.data_utils import image_logger_converter_visdom
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
import torch.backends.cudnn as cudnn
from soccerdepth.data.transforms import *
from torchvision import transforms
import warnings
from visdom import Visdom
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser(description='Depth AutoEncoder')
parser.add_argument('--dataset', default='', help='Dataset to train on')
parser.add_argument('--batchSize', type=int, default=6, help='training batch size')
parser.add_argument('--testBatchSize', type=int, default=2, help='testing batch size')
parser.add_argument('--nEpochs', type=int, default=1000, help='number of epochs to train for')
parser.add_argument('--input_nc', type=int, default=4, help='input image channels')
parser.add_argument('--output_nc', type=int, default=51, help='output image channels')
parser.add_argument('--nf', type=int, default=64, help='number of filters')
parser.add_argument('--lr', type=float, default=0.00003, help='Learning Rate. Default=0.002')
parser.add_argument('--beta1', type=float, default=0.9, help='beta1 for adam. default=0.5')
parser.add_argument('--cuda', action='store_true', help='use cuda?')
parser.add_argument('--threads', type=int, default=4, help='number of threads for data loader to use')
parser.add_argument('--seed', type=int, default=123, help='random seed to use. Default=123')
parser.add_argument('--lamb', type=int, default=100, help='weight on L1 term in objective')
parser.add_argument('--run', type=int, default=44, help='Run number for tensorboard')
parser.add_argument('--output_path', default='/home/krematas/Mountpoints/grail/CNN', help='Where the files WILL be stored')
parser.add_argument('--modeldir', default='/home/krematas/Mountpoints/grail/CNN', help='Where the files ARE being stored')
parser.add_argument('--additional_input', default='mask', help='filepostfix')
parser.add_argument('--postfix', default='hg_estmask', help='filepostfix')
parser.add_argument('--resume', type=int, default=0, help='Resume training')
parser.add_argument('--port', type=int, default=9876, help='Run number for tensorboard')
parser.add_argument('--additional_input_type', default='estmask', help='The type of addtional type to load [estmap, trimap]')
opt, _ = parser.parse_known_args()
opt.cuda = True
print(opt)
# Initiate 5 windows
viz = Visdom(env='FIFA CNN training', port=opt.port)
viz.close()
win0 = viz.images(np.ones((4, 3, 128, 128)))
win1 = viz.images(np.ones((4, 3, 128, 128)))
win2 = viz.images(np.ones((4, 3, 128, 128)))
win3 = viz.images(np.ones((4, 3, 128, 128)))
epoch_lot = viz.line(X=torch.zeros((1,)).cpu(), Y=torch.zeros((1,)).cpu(),
opts=dict(
xlabel='Epoch',
ylabel='Loss',
title='Epoch Training Loss',
legend=['Loss'])
)
lot = viz.line(
X=torch.zeros((1,)).cpu(),
Y=torch.zeros((1,)).cpu(),
opts=dict(
xlabel='Iteration',
ylabel='Loss',
title='Current Training Loss',
legend=['Loss']
)
)
# writer = SummaryWriter("runs/run%d" % opt.run)
if opt.cuda and not torch.cuda.is_available():
raise Exception("No GPU found, please run without --cuda")
cudnn.benchmark = True
torch.manual_seed(opt.seed)
if opt.cuda:
torch.cuda.manual_seed(opt.seed)
dataset = 'play_for_data'
path_to_data = join(file_utils.get_platform_datadir(dataset), 'cnn2')
print('===> Loading datasets')
composed = transforms.Compose([RandomRotation(), RandomCrop(), Rescale(256, 64), ColorOffset(), ToTensor(), NormalizeImage()])
train_set = get_set(join(path_to_data, 'train'), nbins=opt.output_nc, transform=composed, additional_input_type=opt.additional_input_type)
composed = transforms.Compose([Rescale(256, 64), ToTensor(), NormalizeImage()])
test_set = get_set(join(path_to_data, 'test'), nbins=opt.output_nc, transform=composed, additional_input_type=opt.additional_input_type)
training_data_loader = DataLoader(dataset=train_set, num_workers=8, batch_size=opt.batchSize, shuffle=True)
testing_data_loader = DataLoader(dataset=test_set, num_workers=8, batch_size=opt.testBatchSize, shuffle=False)
print('===> Building model')
model = hg8(input_nc=opt.input_nc, output_nc=opt.output_nc)
model.apply(weights_init)
print('===> The loss function is cross entropy loss')
class_weights = np.ones((opt.output_nc, ))
class_weights[0] = 0.1
weights = torch.from_numpy(class_weights)
weights = torch.FloatTensor(weights.size()).copy_(weights).cuda()
criterion = nn.NLLLoss2d(weight=weights)
logsoftmax = nn.LogSoftmax()
# Setup the Adam optimizer
optimizer = optim.Adam(model.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999), weight_decay=0.003)
# Resume if available
if opt.resume > 0:
checkpoint = torch.load(join(opt.modeldir, 'model_epoch_%d_%s_%s.pth' % (opt.resume, opt.additional_input, opt.postfix)))
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
if opt.cuda:
model = model.cuda()
criterion = criterion.cuda()
def train(epoch):
epoch_loss = 0
for iteration, batch in enumerate(training_data_loader, 1):
input, target, mask = Variable(batch['image']).float(), Variable(batch['target']).long(), Variable(batch['mask']).float()
if opt.input_nc > 3:
input = torch.cat((input, mask), 1)
if opt.cuda:
input = input.cuda()
target = target.cuda()
output = model(input)
loss = criterion(logsoftmax(output[0]), target.squeeze())
for j in range(1, len(output)):
loss += criterion(logsoftmax(output[j]), target.squeeze())
epoch_loss += loss.data[0]
optimizer.zero_grad()
loss.backward()
optimizer.step()
print("===> Epoch[{}]({}/{}): Loss: {:.4f}".format(epoch, iteration, len(training_data_loader), loss.data[0]))
if iteration % 50 == 0:
prediction = logsoftmax(output[-1])
x, y, z, w = image_logger_converter_visdom(input, mask, target, prediction)
viz.images(
x,
win=win0,
)
viz.images(
y,
win=win1,
)
viz.images(
w,
win=win2,
)
viz.images(
z,
win=win3,
)
# writer.add_image('Train_image', x, epoch)
# writer.add_image('Train_prediction', y, epoch)
# writer.add_image('Train_label', z, epoch)
# print(torch.ones((1,)).cpu().size())
# print(torch.Tensor([loss.data[0]]).unsqueeze(0).cpu().size())
viz.line(
X=torch.ones((1, 1)).cpu() * ((epoch-1)*len(training_data_loader) + iteration),
Y=torch.Tensor([loss.data[0]]).unsqueeze(0).cpu(),
win=lot,
update='append'
)
# hacky fencepost solution for 0th epoch plot
if epoch == 1 and iteration == len(training_data_loader)-1:
viz.line(
X=torch.zeros((1, 1)).cpu(),
Y=torch.Tensor([loss.data[0]]).unsqueeze(0).cpu(),
win=epoch_lot,
update=True
)
viz.line(
X=torch.ones((1, 1)).cpu()*epoch,
Y=torch.Tensor([epoch_loss/len(training_data_loader)]).unsqueeze(0).cpu(),
win=epoch_lot,
update='append'
)
print("===> Epoch {} Complete: Avg. Loss: {:.6f}".format(epoch, epoch_loss / len(training_data_loader)))
return epoch_loss / len(training_data_loader)
def test():
epoch_loss = 0
for iteration, batch in enumerate(testing_data_loader):
input, target, mask = Variable(batch['image']).float(), Variable(batch['target']).long(), Variable(batch['mask']).float()
if opt.input_nc > 3:
input = torch.cat((input, mask), 1)
if opt.cuda:
input = input.cuda()
target = target.cuda()
output = model(input)
loss = 0
for o in output:
loss += criterion(logsoftmax(o), target.squeeze())
epoch_loss += loss.data[0]
if iteration == 1:
prediction = logsoftmax(output[-1])
x, y, z, w = image_logger_converter_visdom(input, mask, target, prediction)
viz.images(
x,
win=win0,
)
viz.images(
y,
win=win1,
)
viz.images(
w,
win=win2,
)
viz.images(
z,
win=win3,
)
print("===> Validation Complete: Avg. Loss: {:.6f}".format(epoch_loss / len(testing_data_loader)))
return epoch_loss / len(testing_data_loader)
def checkpoint(epoch):
model_out_path = "{0}/model_epoch_{1}_{2}_{3}.pth".format(opt.output_path, epoch, opt.additional_input, opt.postfix)
dict_to_save = {
'epoch': epoch,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}
torch.save(dict_to_save, model_out_path)
print("Checkpoint saved to {}".format(model_out_path))
for epoch in range(opt.resume+1, opt.nEpochs + 1):
v1 = train(epoch)
v2 = test()
writer.add_scalar('train_loss', v1, epoch)
writer.add_scalar('val_loss', v2, epoch)
checkpoint(epoch)
writer.close()
| [
"torch.from_numpy",
"utils.files.get_platform_datadir",
"torch.cuda.is_available",
"soccerdepth.data.data_utils.image_logger_converter_visdom",
"soccerdepth.models.hourglass.hg8",
"visdom.Visdom",
"argparse.ArgumentParser",
"torch.autograd.Variable",
"numpy.ones",
"torch.Tensor",
"torch.nn.NLLLo... | [((607, 640), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (630, 640), False, 'import warnings\n'), ((652, 708), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Depth AutoEncoder"""'}), "(description='Depth AutoEncoder')\n", (675, 708), False, 'import argparse\n'), ((2708, 2754), 'visdom.Visdom', 'Visdom', ([], {'env': '"""FIFA CNN training"""', 'port': 'opt.port'}), "(env='FIFA CNN training', port=opt.port)\n", (2714, 2754), False, 'from visdom import Visdom\n'), ((3740, 3767), 'torch.manual_seed', 'torch.manual_seed', (['opt.seed'], {}), '(opt.seed)\n', (3757, 3767), False, 'import torch\n'), ((4456, 4544), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'train_set', 'num_workers': '(8)', 'batch_size': 'opt.batchSize', 'shuffle': '(True)'}), '(dataset=train_set, num_workers=8, batch_size=opt.batchSize,\n shuffle=True)\n', (4466, 4544), False, 'from torch.utils.data import DataLoader\n'), ((4563, 4655), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'test_set', 'num_workers': '(8)', 'batch_size': 'opt.testBatchSize', 'shuffle': '(False)'}), '(dataset=test_set, num_workers=8, batch_size=opt.testBatchSize,\n shuffle=False)\n', (4573, 4655), False, 'from torch.utils.data import DataLoader\n'), ((4690, 4741), 'soccerdepth.models.hourglass.hg8', 'hg8', ([], {'input_nc': 'opt.input_nc', 'output_nc': 'opt.output_nc'}), '(input_nc=opt.input_nc, output_nc=opt.output_nc)\n', (4693, 4741), False, 'from soccerdepth.models.hourglass import hg8\n'), ((4839, 4864), 'numpy.ones', 'np.ones', (['(opt.output_nc,)'], {}), '((opt.output_nc,))\n', (4846, 4864), True, 'import numpy as np\n'), ((4899, 4930), 'torch.from_numpy', 'torch.from_numpy', (['class_weights'], {}), '(class_weights)\n', (4915, 4930), False, 'import torch\n'), ((5010, 5038), 'torch.nn.NLLLoss2d', 'nn.NLLLoss2d', ([], {'weight': 'weights'}), '(weight=weights)\n', (5022, 5038), True, 'import torch.nn as nn\n'), ((5052, 5067), 'torch.nn.LogSoftmax', 'nn.LogSoftmax', ([], {}), '()\n', (5065, 5067), True, 'import torch.nn as nn\n'), ((2785, 2810), 'numpy.ones', 'np.ones', (['(4, 3, 128, 128)'], {}), '((4, 3, 128, 128))\n', (2792, 2810), True, 'import numpy as np\n'), ((2830, 2855), 'numpy.ones', 'np.ones', (['(4, 3, 128, 128)'], {}), '((4, 3, 128, 128))\n', (2837, 2855), True, 'import numpy as np\n'), ((2875, 2900), 'numpy.ones', 'np.ones', (['(4, 3, 128, 128)'], {}), '((4, 3, 128, 128))\n', (2882, 2900), True, 'import numpy as np\n'), ((2920, 2945), 'numpy.ones', 'np.ones', (['(4, 3, 128, 128)'], {}), '((4, 3, 128, 128))\n', (2927, 2945), True, 'import numpy as np\n'), ((3785, 3817), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['opt.seed'], {}), '(opt.seed)\n', (3807, 3817), False, 'import torch\n'), ((3866, 3906), 'utils.files.get_platform_datadir', 'file_utils.get_platform_datadir', (['dataset'], {}), '(dataset)\n', (3897, 3906), True, 'import utils.files as file_utils\n'), ((4095, 4122), 'os.path.join', 'join', (['path_to_data', '"""train"""'], {}), "(path_to_data, 'train')\n", (4099, 4122), False, 'from os.path import join\n'), ((4314, 4340), 'os.path.join', 'join', (['path_to_data', '"""test"""'], {}), "(path_to_data, 'test')\n", (4318, 4340), False, 'from os.path import join\n'), ((9605, 9645), 'torch.save', 'torch.save', (['dict_to_save', 'model_out_path'], {}), '(dict_to_save, model_out_path)\n', (9615, 9645), False, 'import torch\n'), ((3625, 3650), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3648, 3650), False, 'import torch\n'), ((5268, 5369), 'os.path.join', 'join', (['opt.modeldir', "('model_epoch_%d_%s_%s.pth' % (opt.resume, opt.additional_input, opt.postfix))"], {}), "(opt.modeldir, 'model_epoch_%d_%s_%s.pth' % (opt.resume, opt.\n additional_input, opt.postfix))\n", (5272, 5369), False, 'from os.path import join\n'), ((5829, 5856), 'torch.cat', 'torch.cat', (['(input, mask)', '(1)'], {}), '((input, mask), 1)\n', (5838, 5856), False, 'import torch\n'), ((6497, 6559), 'soccerdepth.data.data_utils.image_logger_converter_visdom', 'image_logger_converter_visdom', (['input', 'mask', 'target', 'prediction'], {}), '(input, mask, target, prediction)\n', (6526, 6559), False, 'from soccerdepth.data.data_utils import image_logger_converter_visdom\n'), ((8375, 8402), 'torch.cat', 'torch.cat', (['(input, mask)', '(1)'], {}), '((input, mask), 1)\n', (8384, 8402), False, 'import torch\n'), ((8768, 8830), 'soccerdepth.data.data_utils.image_logger_converter_visdom', 'image_logger_converter_visdom', (['input', 'mask', 'target', 'prediction'], {}), '(input, mask, target, prediction)\n', (8797, 8830), False, 'from soccerdepth.data.data_utils import image_logger_converter_visdom\n'), ((2970, 2987), 'torch.zeros', 'torch.zeros', (['(1,)'], {}), '((1,))\n', (2981, 2987), False, 'import torch\n'), ((2997, 3014), 'torch.zeros', 'torch.zeros', (['(1,)'], {}), '((1,))\n', (3008, 3014), False, 'import torch\n'), ((3295, 3312), 'torch.zeros', 'torch.zeros', (['(1,)'], {}), '((1,))\n', (3306, 3312), False, 'import torch\n'), ((3334, 3351), 'torch.zeros', 'torch.zeros', (['(1,)'], {}), '((1,))\n', (3345, 3351), False, 'import torch\n'), ((5680, 5704), 'torch.autograd.Variable', 'Variable', (["batch['image']"], {}), "(batch['image'])\n", (5688, 5704), False, 'from torch.autograd import Variable\n'), ((5714, 5739), 'torch.autograd.Variable', 'Variable', (["batch['target']"], {}), "(batch['target'])\n", (5722, 5739), False, 'from torch.autograd import Variable\n'), ((5748, 5771), 'torch.autograd.Variable', 'Variable', (["batch['mask']"], {}), "(batch['mask'])\n", (5756, 5771), False, 'from torch.autograd import Variable\n'), ((8225, 8249), 'torch.autograd.Variable', 'Variable', (["batch['image']"], {}), "(batch['image'])\n", (8233, 8249), False, 'from torch.autograd import Variable\n'), ((8259, 8284), 'torch.autograd.Variable', 'Variable', (["batch['target']"], {}), "(batch['target'])\n", (8267, 8284), False, 'from torch.autograd import Variable\n'), ((8293, 8316), 'torch.autograd.Variable', 'Variable', (["batch['mask']"], {}), "(batch['mask'])\n", (8301, 8316), False, 'from torch.autograd import Variable\n'), ((7774, 7792), 'torch.ones', 'torch.ones', (['(1, 1)'], {}), '((1, 1))\n', (7784, 7792), False, 'import torch\n'), ((7219, 7237), 'torch.ones', 'torch.ones', (['(1, 1)'], {}), '((1, 1))\n', (7229, 7237), False, 'import torch\n'), ((7582, 7601), 'torch.zeros', 'torch.zeros', (['(1, 1)'], {}), '((1, 1))\n', (7593, 7601), False, 'import torch\n'), ((7311, 7339), 'torch.Tensor', 'torch.Tensor', (['[loss.data[0]]'], {}), '([loss.data[0]])\n', (7323, 7339), False, 'import torch\n'), ((7627, 7655), 'torch.Tensor', 'torch.Tensor', (['[loss.data[0]]'], {}), '([loss.data[0]])\n', (7639, 7655), False, 'import torch\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for novice submodule.
:license: modified BSD
"""
import os, tempfile
import numpy as np
from image_novice import novice
from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose
def _array_2d_to_RGB(array):
return np.tile(array[:, :, np.newaxis], (1, 1, 3))
class TestNovice(TestCase):
sample_path = "sample.png"
small_sample_path = "block.png"
def test_pic_info(self):
pic = novice.open(self.sample_path)
assert_equal(pic.format, "png")
assert_equal(pic.path, os.path.abspath(self.sample_path))
assert_equal(pic.size, (665, 500))
assert_equal(pic.width, 665)
assert_equal(pic.height, 500)
assert_equal(pic.modified, False)
assert_equal(pic.inflation, 1)
num_pixels = sum(1 for p in pic)
assert_equal(num_pixels, pic.width * pic.height)
def test_pixel_iteration(self):
pic = novice.open(self.small_sample_path)
num_pixels = sum(1 for p in pic)
assert_equal(num_pixels, pic.width * pic.height)
def test_modify(self):
pic = novice.open(self.small_sample_path)
assert_equal(pic.modified, False)
for p in pic:
if p.x < (pic.width / 2):
p.red /= 2
p.green /= 2
p.blue /= 2
for p in pic:
if p.x < (pic.width / 2):
assert_equal(p.red <= 128, True)
assert_equal(p.green <= 128, True)
assert_equal(p.blue <= 128, True)
s = pic.size
pic.size = (pic.width / 2, pic.height / 2)
assert_equal(pic.size, (int(s[0] / 2), int(s[1] / 2)))
assert_equal(pic.modified, True)
assert_equal(pic.path, None)
with tempfile.NamedTemporaryFile(suffix=".jpg") as tmp:
pic.save(tmp.name)
assert_equal(pic.modified, False)
assert_equal(pic.path, os.path.abspath(tmp.name))
assert_equal(pic.format, "jpeg")
def test_pixel_rgb(self):
pic = novice.Picture.from_size((3, 3), color=(10, 10, 10))
pixel = pic[0, 0]
pixel.rgb = tuple(np.arange(3))
assert_equal(pixel.rgb, np.arange(3))
for i, channel in enumerate((pixel.red, pixel.green, pixel.blue)):
assert_equal(channel, i)
pixel.red = 3
pixel.green = 4
pixel.blue = 5
assert_equal(pixel.rgb, np.arange(3) + 3)
for i, channel in enumerate((pixel.red, pixel.green, pixel.blue)):
assert_equal(channel, i + 3)
def test_pixel_rgb_float(self):
pixel = novice.Picture.from_size((1, 1))[0, 0]
pixel.rgb = (1.1, 1.1, 1.1)
assert_equal(pixel.rgb, (1, 1, 1))
def test_modified_on_set(self):
pic = novice.Picture(self.small_sample_path)
pic[0, 0] = (1, 1, 1)
assert pic.modified
assert pic.path is None
def test_modified_on_set_pixel(self):
data = np.zeros(shape=(10, 5, 3), dtype=np.uint8)
pic = novice.Picture(array=data)
pixel = pic[0, 0]
pixel.green = 1
assert pic.modified
def test_update_on_save(self):
pic = novice.Picture(array=np.zeros((3, 3, 3)))
pic.size = (6, 6)
assert pic.modified
assert pic.path is None
with tempfile.NamedTemporaryFile(suffix=".jpg") as tmp:
pic.save(tmp.name)
assert not pic.modified
assert_equal(pic.path, os.path.abspath(tmp.name))
assert_equal(pic.format, "jpeg")
def test_indexing(self):
pic = novice.open(self.small_sample_path)
# Slicing
pic[0:5, 0:5] = (0, 0, 0)
for p in pic:
if (p.x < 5) and (p.y < 5):
assert_equal(p.rgb, (0, 0, 0))
assert_equal(p.red, 0)
assert_equal(p.green, 0)
assert_equal(p.blue, 0)
pic[:5, :5] = (255, 255, 255)
for p in pic:
if (p.x < 5) and (p.y < 5):
assert_equal(p.rgb, (255, 255, 255))
assert_equal(p.red, 255)
assert_equal(p.green, 255)
assert_equal(p.blue, 255)
pic[5:pic.width, 5:pic.height] = (255, 0, 255)
for p in pic:
if (p.x >= 5) and (p.y >= 5):
assert_equal(p.rgb, (255, 0, 255))
assert_equal(p.red, 255)
assert_equal(p.green, 0)
assert_equal(p.blue, 255)
pic[5:, 5:] = (0, 0, 255)
for p in pic:
if (p.x >= 5) and (p.y >= 5):
assert_equal(p.rgb, (0, 0, 255))
assert_equal(p.red, 0)
assert_equal(p.green, 0)
assert_equal(p.blue, 255)
# Outside bounds
assert_raises(IndexError, lambda: pic[pic.width, pic.height])
# Negative indexing not supported
assert_raises(IndexError, lambda: pic[-1, -1])
assert_raises(IndexError, lambda: pic[-1:, -1:])
def test_picture_slice(self):
array = _array_2d_to_RGB(np.arange(0, 10)[np.newaxis, :])
pic = novice.Picture(array=array)
x_slice = slice(3, 8)
subpic = pic[:, x_slice]
assert_allclose(subpic._image, array[x_slice, :])
def test_move_slice(self):
h, w = 3, 12
array = _array_2d_to_RGB(np.linspace(0, 255, h * w).reshape(h, w))
array = array.astype(np.uint8)
pic = novice.Picture(array=array)
pic_orig = novice.Picture(array=array.copy())
# Move left cut of image to the right side.
cut = 5
rest = pic.width - cut
temp = pic[:cut, :]
temp._image = temp._image.copy()
pic[:rest, :] = pic[cut:, :]
pic[rest:, :] = temp
assert_equal(pic[rest:, :]._image, pic_orig[:cut, :]._image)
assert_equal(pic[:rest, :]._image, pic_orig[cut:, :]._image)
#def test_negative_index(self):
#n = 10
#array = _array_2d_to_RGB(np.arange(0, n)[np.newaxis, :])
## Test both x and y indices.
#pic = novice.Picture(array=array)
#assert_equal(pic[-1, 0]._image, pic[n - 1, 0]._image)
#pic = novice.Picture(array=rgb_transpose(array))
#assert_equal(pic[0, -1]._image, pic[0, n - 1]._image)
#def test_negative_slice(self):
#n = 10
#array = _array_2d_to_RGB(np.arange(0, n)[np.newaxis, :])
## Test both x and y slices.
#pic = novice.Picture(array=array)
#assert_equal(pic[-3:, 0]._image, pic[n - 3:, 0]._image)
#pic = novice.Picture(array=rgb_transpose(array))
#assert_equal(pic[0, -3:]._image, pic[0, n - 3:]._image)
def test_getitem_with_step(self):
h, w = 5, 5
array = _array_2d_to_RGB(np.linspace(0, 255, h * w).reshape(h, w))
pic = novice.Picture(array=array)
sliced_pic = pic[::2, ::2]
assert_equal(sliced_pic._image, novice.Picture(array=array[::2, ::2])._image)
def test_slicing(self):
cut = 40
pic = novice.open(self.sample_path)
rest = pic.width - cut
temp = pic[:cut, :].copy()
pic[:rest, :] = pic[cut:, :]
pic[rest:, :] = temp
def test_color_names(self):
# Color name
pic = novice.new((10, 10), color="black")
for p in pic:
assert_equal(p.rgb, (0, 0, 0))
# Hex string
pic = novice.new((10, 10), color="#AABBCC")
for p in pic:
assert_equal(p.rgb, (170, 187, 204))
# Tuple
pic = novice.new((10, 10), color=(23, 47, 99))
for p in pic:
assert_equal(p.rgb, (23, 47, 99))
| [
"numpy.tile",
"numpy.testing.assert_equal",
"numpy.testing.assert_allclose",
"numpy.testing.assert_raises",
"os.path.abspath",
"numpy.zeros",
"image_novice.novice.new",
"numpy.linspace",
"tempfile.NamedTemporaryFile",
"image_novice.novice.open",
"image_novice.novice.Picture",
"image_novice.nov... | [((299, 342), 'numpy.tile', 'np.tile', (['array[:, :, np.newaxis]', '(1, 1, 3)'], {}), '(array[:, :, np.newaxis], (1, 1, 3))\n', (306, 342), True, 'import numpy as np\n'), ((483, 512), 'image_novice.novice.open', 'novice.open', (['self.sample_path'], {}), '(self.sample_path)\n', (494, 512), False, 'from image_novice import novice\n'), ((521, 552), 'numpy.testing.assert_equal', 'assert_equal', (['pic.format', '"""png"""'], {}), "(pic.format, 'png')\n", (533, 552), False, 'from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose\n'), ((627, 661), 'numpy.testing.assert_equal', 'assert_equal', (['pic.size', '(665, 500)'], {}), '(pic.size, (665, 500))\n', (639, 661), False, 'from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose\n'), ((670, 698), 'numpy.testing.assert_equal', 'assert_equal', (['pic.width', '(665)'], {}), '(pic.width, 665)\n', (682, 698), False, 'from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose\n'), ((707, 736), 'numpy.testing.assert_equal', 'assert_equal', (['pic.height', '(500)'], {}), '(pic.height, 500)\n', (719, 736), False, 'from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose\n'), ((745, 778), 'numpy.testing.assert_equal', 'assert_equal', (['pic.modified', '(False)'], {}), '(pic.modified, False)\n', (757, 778), False, 'from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose\n'), ((787, 817), 'numpy.testing.assert_equal', 'assert_equal', (['pic.inflation', '(1)'], {}), '(pic.inflation, 1)\n', (799, 817), False, 'from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose\n'), ((868, 916), 'numpy.testing.assert_equal', 'assert_equal', (['num_pixels', '(pic.width * pic.height)'], {}), '(num_pixels, pic.width * pic.height)\n', (880, 916), False, 'from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose\n'), ((968, 1003), 'image_novice.novice.open', 'novice.open', (['self.small_sample_path'], {}), '(self.small_sample_path)\n', (979, 1003), False, 'from image_novice import novice\n'), ((1053, 1101), 'numpy.testing.assert_equal', 'assert_equal', (['num_pixels', '(pic.width * pic.height)'], {}), '(num_pixels, pic.width * pic.height)\n', (1065, 1101), False, 'from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose\n'), ((1144, 1179), 'image_novice.novice.open', 'novice.open', (['self.small_sample_path'], {}), '(self.small_sample_path)\n', (1155, 1179), False, 'from image_novice import novice\n'), ((1188, 1221), 'numpy.testing.assert_equal', 'assert_equal', (['pic.modified', '(False)'], {}), '(pic.modified, False)\n', (1200, 1221), False, 'from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose\n'), ((1723, 1755), 'numpy.testing.assert_equal', 'assert_equal', (['pic.modified', '(True)'], {}), '(pic.modified, True)\n', (1735, 1755), False, 'from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose\n'), ((1764, 1792), 'numpy.testing.assert_equal', 'assert_equal', (['pic.path', 'None'], {}), '(pic.path, None)\n', (1776, 1792), False, 'from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose\n'), ((2088, 2140), 'image_novice.novice.Picture.from_size', 'novice.Picture.from_size', (['(3, 3)'], {'color': '(10, 10, 10)'}), '((3, 3), color=(10, 10, 10))\n', (2112, 2140), False, 'from image_novice import novice\n'), ((2739, 2773), 'numpy.testing.assert_equal', 'assert_equal', (['pixel.rgb', '(1, 1, 1)'], {}), '(pixel.rgb, (1, 1, 1))\n', (2751, 2773), False, 'from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose\n'), ((2825, 2863), 'image_novice.novice.Picture', 'novice.Picture', (['self.small_sample_path'], {}), '(self.small_sample_path)\n', (2839, 2863), False, 'from image_novice import novice\n'), ((3012, 3054), 'numpy.zeros', 'np.zeros', ([], {'shape': '(10, 5, 3)', 'dtype': 'np.uint8'}), '(shape=(10, 5, 3), dtype=np.uint8)\n', (3020, 3054), True, 'import numpy as np\n'), ((3069, 3095), 'image_novice.novice.Picture', 'novice.Picture', ([], {'array': 'data'}), '(array=data)\n', (3083, 3095), False, 'from image_novice import novice\n'), ((3637, 3672), 'image_novice.novice.open', 'novice.open', (['self.small_sample_path'], {}), '(self.small_sample_path)\n', (3648, 3672), False, 'from image_novice import novice\n'), ((4834, 4896), 'numpy.testing.assert_raises', 'assert_raises', (['IndexError', '(lambda : pic[pic.width, pic.height])'], {}), '(IndexError, lambda : pic[pic.width, pic.height])\n', (4847, 4896), False, 'from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose\n'), ((4947, 4994), 'numpy.testing.assert_raises', 'assert_raises', (['IndexError', '(lambda : pic[-1, -1])'], {}), '(IndexError, lambda : pic[-1, -1])\n', (4960, 4994), False, 'from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose\n'), ((5002, 5051), 'numpy.testing.assert_raises', 'assert_raises', (['IndexError', '(lambda : pic[-1:, -1:])'], {}), '(IndexError, lambda : pic[-1:, -1:])\n', (5015, 5051), False, 'from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose\n'), ((5166, 5193), 'image_novice.novice.Picture', 'novice.Picture', ([], {'array': 'array'}), '(array=array)\n', (5180, 5193), False, 'from image_novice import novice\n'), ((5266, 5315), 'numpy.testing.assert_allclose', 'assert_allclose', (['subpic._image', 'array[x_slice, :]'], {}), '(subpic._image, array[x_slice, :])\n', (5281, 5315), False, 'from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose\n'), ((5498, 5525), 'image_novice.novice.Picture', 'novice.Picture', ([], {'array': 'array'}), '(array=array)\n', (5512, 5525), False, 'from image_novice import novice\n'), ((5824, 5884), 'numpy.testing.assert_equal', 'assert_equal', (['pic[rest:, :]._image', 'pic_orig[:cut, :]._image'], {}), '(pic[rest:, :]._image, pic_orig[:cut, :]._image)\n', (5836, 5884), False, 'from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose\n'), ((5893, 5953), 'numpy.testing.assert_equal', 'assert_equal', (['pic[:rest, :]._image', 'pic_orig[cut:, :]._image'], {}), '(pic[:rest, :]._image, pic_orig[cut:, :]._image)\n', (5905, 5953), False, 'from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose\n'), ((6873, 6900), 'image_novice.novice.Picture', 'novice.Picture', ([], {'array': 'array'}), '(array=array)\n', (6887, 6900), False, 'from image_novice import novice\n'), ((7082, 7111), 'image_novice.novice.open', 'novice.open', (['self.sample_path'], {}), '(self.sample_path)\n', (7093, 7111), False, 'from image_novice import novice\n'), ((7312, 7347), 'image_novice.novice.new', 'novice.new', (['(10, 10)'], {'color': '"""black"""'}), "((10, 10), color='black')\n", (7322, 7347), False, 'from image_novice import novice\n'), ((7449, 7486), 'image_novice.novice.new', 'novice.new', (['(10, 10)'], {'color': '"""#AABBCC"""'}), "((10, 10), color='#AABBCC')\n", (7459, 7486), False, 'from image_novice import novice\n'), ((7589, 7629), 'image_novice.novice.new', 'novice.new', (['(10, 10)'], {'color': '(23, 47, 99)'}), '((10, 10), color=(23, 47, 99))\n', (7599, 7629), False, 'from image_novice import novice\n'), ((584, 617), 'os.path.abspath', 'os.path.abspath', (['self.sample_path'], {}), '(self.sample_path)\n', (599, 617), False, 'import os, tempfile\n'), ((1807, 1849), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".jpg"""'}), "(suffix='.jpg')\n", (1834, 1849), False, 'import os, tempfile\n'), ((1902, 1935), 'numpy.testing.assert_equal', 'assert_equal', (['pic.modified', '(False)'], {}), '(pic.modified, False)\n', (1914, 1935), False, 'from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose\n'), ((2010, 2042), 'numpy.testing.assert_equal', 'assert_equal', (['pic.format', '"""jpeg"""'], {}), "(pic.format, 'jpeg')\n", (2022, 2042), False, 'from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose\n'), ((2193, 2205), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (2202, 2205), True, 'import numpy as np\n'), ((2240, 2252), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (2249, 2252), True, 'import numpy as np\n'), ((2341, 2365), 'numpy.testing.assert_equal', 'assert_equal', (['channel', 'i'], {}), '(channel, i)\n', (2353, 2365), False, 'from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose\n'), ((2574, 2602), 'numpy.testing.assert_equal', 'assert_equal', (['channel', '(i + 3)'], {}), '(channel, i + 3)\n', (2586, 2602), False, 'from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose\n'), ((2656, 2688), 'image_novice.novice.Picture.from_size', 'novice.Picture.from_size', (['(1, 1)'], {}), '((1, 1))\n', (2680, 2688), False, 'from image_novice import novice\n'), ((3367, 3409), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".jpg"""'}), "(suffix='.jpg')\n", (3394, 3409), False, 'import os, tempfile\n'), ((3560, 3592), 'numpy.testing.assert_equal', 'assert_equal', (['pic.format', '"""jpeg"""'], {}), "(pic.format, 'jpeg')\n", (3572, 3592), False, 'from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose\n'), ((7382, 7412), 'numpy.testing.assert_equal', 'assert_equal', (['p.rgb', '(0, 0, 0)'], {}), '(p.rgb, (0, 0, 0))\n', (7394, 7412), False, 'from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose\n'), ((7521, 7557), 'numpy.testing.assert_equal', 'assert_equal', (['p.rgb', '(170, 187, 204)'], {}), '(p.rgb, (170, 187, 204))\n', (7533, 7557), False, 'from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose\n'), ((7664, 7697), 'numpy.testing.assert_equal', 'assert_equal', (['p.rgb', '(23, 47, 99)'], {}), '(p.rgb, (23, 47, 99))\n', (7676, 7697), False, 'from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose\n'), ((1444, 1476), 'numpy.testing.assert_equal', 'assert_equal', (['(p.red <= 128)', '(True)'], {}), '(p.red <= 128, True)\n', (1456, 1476), False, 'from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose\n'), ((1493, 1527), 'numpy.testing.assert_equal', 'assert_equal', (['(p.green <= 128)', '(True)'], {}), '(p.green <= 128, True)\n', (1505, 1527), False, 'from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose\n'), ((1544, 1577), 'numpy.testing.assert_equal', 'assert_equal', (['(p.blue <= 128)', '(True)'], {}), '(p.blue <= 128, True)\n', (1556, 1577), False, 'from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose\n'), ((1971, 1996), 'os.path.abspath', 'os.path.abspath', (['tmp.name'], {}), '(tmp.name)\n', (1986, 1996), False, 'import os, tempfile\n'), ((2468, 2480), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (2477, 2480), True, 'import numpy as np\n'), ((3246, 3265), 'numpy.zeros', 'np.zeros', (['(3, 3, 3)'], {}), '((3, 3, 3))\n', (3254, 3265), True, 'import numpy as np\n'), ((3521, 3546), 'os.path.abspath', 'os.path.abspath', (['tmp.name'], {}), '(tmp.name)\n', (3536, 3546), False, 'import os, tempfile\n'), ((3804, 3834), 'numpy.testing.assert_equal', 'assert_equal', (['p.rgb', '(0, 0, 0)'], {}), '(p.rgb, (0, 0, 0))\n', (3816, 3834), False, 'from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose\n'), ((3851, 3873), 'numpy.testing.assert_equal', 'assert_equal', (['p.red', '(0)'], {}), '(p.red, 0)\n', (3863, 3873), False, 'from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose\n'), ((3890, 3914), 'numpy.testing.assert_equal', 'assert_equal', (['p.green', '(0)'], {}), '(p.green, 0)\n', (3902, 3914), False, 'from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose\n'), ((3931, 3954), 'numpy.testing.assert_equal', 'assert_equal', (['p.blue', '(0)'], {}), '(p.blue, 0)\n', (3943, 3954), False, 'from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose\n'), ((4072, 4108), 'numpy.testing.assert_equal', 'assert_equal', (['p.rgb', '(255, 255, 255)'], {}), '(p.rgb, (255, 255, 255))\n', (4084, 4108), False, 'from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose\n'), ((4125, 4149), 'numpy.testing.assert_equal', 'assert_equal', (['p.red', '(255)'], {}), '(p.red, 255)\n', (4137, 4149), False, 'from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose\n'), ((4166, 4192), 'numpy.testing.assert_equal', 'assert_equal', (['p.green', '(255)'], {}), '(p.green, 255)\n', (4178, 4192), False, 'from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose\n'), ((4209, 4234), 'numpy.testing.assert_equal', 'assert_equal', (['p.blue', '(255)'], {}), '(p.blue, 255)\n', (4221, 4234), False, 'from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose\n'), ((4371, 4405), 'numpy.testing.assert_equal', 'assert_equal', (['p.rgb', '(255, 0, 255)'], {}), '(p.rgb, (255, 0, 255))\n', (4383, 4405), False, 'from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose\n'), ((4422, 4446), 'numpy.testing.assert_equal', 'assert_equal', (['p.red', '(255)'], {}), '(p.red, 255)\n', (4434, 4446), False, 'from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose\n'), ((4463, 4487), 'numpy.testing.assert_equal', 'assert_equal', (['p.green', '(0)'], {}), '(p.green, 0)\n', (4475, 4487), False, 'from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose\n'), ((4504, 4529), 'numpy.testing.assert_equal', 'assert_equal', (['p.blue', '(255)'], {}), '(p.blue, 255)\n', (4516, 4529), False, 'from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose\n'), ((4645, 4677), 'numpy.testing.assert_equal', 'assert_equal', (['p.rgb', '(0, 0, 255)'], {}), '(p.rgb, (0, 0, 255))\n', (4657, 4677), False, 'from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose\n'), ((4694, 4716), 'numpy.testing.assert_equal', 'assert_equal', (['p.red', '(0)'], {}), '(p.red, 0)\n', (4706, 4716), False, 'from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose\n'), ((4733, 4757), 'numpy.testing.assert_equal', 'assert_equal', (['p.green', '(0)'], {}), '(p.green, 0)\n', (4745, 4757), False, 'from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose\n'), ((4774, 4799), 'numpy.testing.assert_equal', 'assert_equal', (['p.blue', '(255)'], {}), '(p.blue, 255)\n', (4786, 4799), False, 'from numpy.testing import TestCase, assert_equal, assert_raises, assert_allclose\n'), ((5119, 5135), 'numpy.arange', 'np.arange', (['(0)', '(10)'], {}), '(0, 10)\n', (5128, 5135), True, 'import numpy as np\n'), ((6976, 7013), 'image_novice.novice.Picture', 'novice.Picture', ([], {'array': 'array[::2, ::2]'}), '(array=array[::2, ::2])\n', (6990, 7013), False, 'from image_novice import novice\n'), ((5402, 5428), 'numpy.linspace', 'np.linspace', (['(0)', '(255)', '(h * w)'], {}), '(0, 255, h * w)\n', (5413, 5428), True, 'import numpy as np\n'), ((6817, 6843), 'numpy.linspace', 'np.linspace', (['(0)', '(255)', '(h * w)'], {}), '(0, 255, h * w)\n', (6828, 6843), True, 'import numpy as np\n')] |
import sys
import os
import argparse
import unittest
import warnings
import contextlib
import numpy as np
import numpy.testing as nptest
import unittest
import ants
import superiq
def run_tests():
unittest.main()
class TestModule_super_resolution_segmentation_per_label(unittest.TestCase):
def test_super_resolution_segmentation_per_label(self):
size, radius = 40, 16
A = np.zeros((size,size, size))
AA = A * 0.0
x0, y0, z0 = int(np.floor(A.shape[0]/2)), \
int(np.floor(A.shape[1]/2)), int(np.floor(A.shape[2]/2))
for x in range(x0-radius, x0+radius+1):
for y in range(y0-radius, y0+radius+1):
for z in range(z0-radius, z0+radius+1):
deb = radius - abs(x0-x) - abs(y0-y) - abs(z0-z)
if (deb)>=0: AA[x,y,z] = 1
AA = ants.from_numpy( AA )
AAdil = ants.iMath( AA, "MD", 1 )
AA = AAdil + AA
AA = AA.pad_image( pad_width=[2,2,2] )
AAnoize = ants.add_noise_to_image( AA, "additivegaussian", (0, 0.1 ) )
result = superiq.super_resolution_segmentation_per_label( AAnoize, AA,
[2,2,2], 'bilinear', [1,2], dilation_amount=0 )
refvol = ants.label_geometry_measures( AA )['VolumeInMillimeters'][0]
computedvol = result['segmentation_geometry'][0]['VolumeInMillimeters'][0]
testitsr=refvol==computedvol and result['super_resolution'].shape[0] == 84
self.assertTrue( testitsr )
class TestModule_ljlf_parcellation_one_template(unittest.TestCase):
def test_ljlf_parcellation_one_template_segmentation_isin(self):
tar = ants.image_read( ants.get_ants_data('r16'))
ref = ants.image_read( ants.get_ants_data('r27'))
refseg = ants.kmeans_segmentation( ref, k=3, kmask=None, mrf=0 )['segmentation']
fwd = ants.registration( tar, ref, 'SyN' )['fwdtransforms']
tarlab = [2,3]
temp = superiq.ljlf_parcellation_one_template( tar, tarlab,
fwd, ref, refseg,
templateRepeats=2, submask_dilation=2, verbose=False)
ulab = temp['segmentation'].unique()
testitjlf0 = (int(ulab[1]) in tarlab) & (int(ulab[2]) in tarlab)
self.assertTrue( testitjlf0 )
class TestModule_ljlf_parcellation(unittest.TestCase):
def test_ljlf_parcellation_segmentation_isin(self):
tar = ants.image_read( ants.get_ants_data('r16'))
ref1 = ants.image_read( ants.get_ants_data('r27'))
ref2 = ants.image_read( ants.get_ants_data('r64'))
refseg1 = ants.kmeans_segmentation( ref1, k=4, kmask=None, mrf=0 )['segmentation']
refseg2 = ants.kmeans_segmentation( ref2, k=4, kmask=None, mrf=0 )['segmentation']
fwd = ants.registration( tar, ref1, 'SyN' )['fwdtransforms']
tarlab = [4,3]
temp = superiq.ljlf_parcellation(
tar, tarlab,
fwd, ref1, refseg1,
[ref1,ref2], [refseg1,refseg2], submask_dilation=4, verbose=False)
ulab = temp['segmentation'].unique()
testitjlf1 = (int(ulab[1]) in tarlab) & (int(ulab[2]) in tarlab)
self.assertTrue( testitjlf1 )
class TestModule_sort_library_by_similarity(unittest.TestCase):
def test_sort_library_by_similarity_order(self):
targetimage = ants.image_read( ants.get_data("r16") )
img0 = ants.add_noise_to_image( targetimage, "additivegaussian", ( 0, 2 ) )
img1 = ants.image_read( ants.get_data("r27") )
img2 = ants.image_read( ants.get_data("r16") ).add_noise_to_image( "additivegaussian", (0, 6) )
tseg = ants.threshold_image( targetimage, "Otsu" , 3 )
img0s = ants.threshold_image( img0, "Otsu" , 3 )
img1s = ants.threshold_image( img1, "Otsu" , 3 )
img2s = ants.threshold_image( img2, "Otsu" , 3 )
ilist=[img2,img1,img0]
slist=[img2s,img1s,img0s]
ss = superiq.sort_library_by_similarity( targetimage, tseg, [3], ilist, slist )
testitss = ss['ordering'][1] == 0
self.assertTrue( testitss )
if __name__ == "__main__":
run_tests()
| [
"ants.threshold_image",
"superiq.super_resolution_segmentation_per_label",
"superiq.sort_library_by_similarity",
"ants.kmeans_segmentation",
"ants.get_ants_data",
"ants.get_data",
"ants.add_noise_to_image",
"ants.iMath",
"ants.registration",
"numpy.floor",
"numpy.zeros",
"unittest.main",
"an... | [((203, 218), 'unittest.main', 'unittest.main', ([], {}), '()\n', (216, 218), False, 'import unittest\n'), ((399, 427), 'numpy.zeros', 'np.zeros', (['(size, size, size)'], {}), '((size, size, size))\n', (407, 427), True, 'import numpy as np\n'), ((858, 877), 'ants.from_numpy', 'ants.from_numpy', (['AA'], {}), '(AA)\n', (873, 877), False, 'import ants\n'), ((896, 919), 'ants.iMath', 'ants.iMath', (['AA', '"""MD"""', '(1)'], {}), "(AA, 'MD', 1)\n", (906, 919), False, 'import ants\n'), ((1011, 1068), 'ants.add_noise_to_image', 'ants.add_noise_to_image', (['AA', '"""additivegaussian"""', '(0, 0.1)'], {}), "(AA, 'additivegaussian', (0, 0.1))\n", (1034, 1068), False, 'import ants\n'), ((1089, 1203), 'superiq.super_resolution_segmentation_per_label', 'superiq.super_resolution_segmentation_per_label', (['AAnoize', 'AA', '[2, 2, 2]', '"""bilinear"""', '[1, 2]'], {'dilation_amount': '(0)'}), "(AAnoize, AA, [2, 2, 2],\n 'bilinear', [1, 2], dilation_amount=0)\n", (1136, 1203), False, 'import superiq\n'), ((1941, 2068), 'superiq.ljlf_parcellation_one_template', 'superiq.ljlf_parcellation_one_template', (['tar', 'tarlab', 'fwd', 'ref', 'refseg'], {'templateRepeats': '(2)', 'submask_dilation': '(2)', 'verbose': '(False)'}), '(tar, tarlab, fwd, ref, refseg,\n templateRepeats=2, submask_dilation=2, verbose=False)\n', (1979, 2068), False, 'import superiq\n'), ((2819, 2951), 'superiq.ljlf_parcellation', 'superiq.ljlf_parcellation', (['tar', 'tarlab', 'fwd', 'ref1', 'refseg1', '[ref1, ref2]', '[refseg1, refseg2]'], {'submask_dilation': '(4)', 'verbose': '(False)'}), '(tar, tarlab, fwd, ref1, refseg1, [ref1, ref2], [\n refseg1, refseg2], submask_dilation=4, verbose=False)\n', (2844, 2951), False, 'import superiq\n'), ((3327, 3391), 'ants.add_noise_to_image', 'ants.add_noise_to_image', (['targetimage', '"""additivegaussian"""', '(0, 2)'], {}), "(targetimage, 'additivegaussian', (0, 2))\n", (3350, 3391), False, 'import ants\n'), ((3570, 3614), 'ants.threshold_image', 'ants.threshold_image', (['targetimage', '"""Otsu"""', '(3)'], {}), "(targetimage, 'Otsu', 3)\n", (3590, 3614), False, 'import ants\n'), ((3634, 3671), 'ants.threshold_image', 'ants.threshold_image', (['img0', '"""Otsu"""', '(3)'], {}), "(img0, 'Otsu', 3)\n", (3654, 3671), False, 'import ants\n'), ((3691, 3728), 'ants.threshold_image', 'ants.threshold_image', (['img1', '"""Otsu"""', '(3)'], {}), "(img1, 'Otsu', 3)\n", (3711, 3728), False, 'import ants\n'), ((3748, 3785), 'ants.threshold_image', 'ants.threshold_image', (['img2', '"""Otsu"""', '(3)'], {}), "(img2, 'Otsu', 3)\n", (3768, 3785), False, 'import ants\n'), ((3867, 3939), 'superiq.sort_library_by_similarity', 'superiq.sort_library_by_similarity', (['targetimage', 'tseg', '[3]', 'ilist', 'slist'], {}), '(targetimage, tseg, [3], ilist, slist)\n', (3901, 3939), False, 'import superiq\n'), ((1661, 1686), 'ants.get_ants_data', 'ants.get_ants_data', (['"""r16"""'], {}), "('r16')\n", (1679, 1686), False, 'import ants\n'), ((1719, 1744), 'ants.get_ants_data', 'ants.get_ants_data', (['"""r27"""'], {}), "('r27')\n", (1737, 1744), False, 'import ants\n'), ((1763, 1816), 'ants.kmeans_segmentation', 'ants.kmeans_segmentation', (['ref'], {'k': '(3)', 'kmask': 'None', 'mrf': '(0)'}), '(ref, k=3, kmask=None, mrf=0)\n', (1787, 1816), False, 'import ants\n'), ((1849, 1883), 'ants.registration', 'ants.registration', (['tar', 'ref', '"""SyN"""'], {}), "(tar, ref, 'SyN')\n", (1866, 1883), False, 'import ants\n'), ((2385, 2410), 'ants.get_ants_data', 'ants.get_ants_data', (['"""r16"""'], {}), "('r16')\n", (2403, 2410), False, 'import ants\n'), ((2444, 2469), 'ants.get_ants_data', 'ants.get_ants_data', (['"""r27"""'], {}), "('r27')\n", (2462, 2469), False, 'import ants\n'), ((2503, 2528), 'ants.get_ants_data', 'ants.get_ants_data', (['"""r64"""'], {}), "('r64')\n", (2521, 2528), False, 'import ants\n'), ((2548, 2602), 'ants.kmeans_segmentation', 'ants.kmeans_segmentation', (['ref1'], {'k': '(4)', 'kmask': 'None', 'mrf': '(0)'}), '(ref1, k=4, kmask=None, mrf=0)\n', (2572, 2602), False, 'import ants\n'), ((2639, 2693), 'ants.kmeans_segmentation', 'ants.kmeans_segmentation', (['ref2'], {'k': '(4)', 'kmask': 'None', 'mrf': '(0)'}), '(ref2, k=4, kmask=None, mrf=0)\n', (2663, 2693), False, 'import ants\n'), ((2726, 2761), 'ants.registration', 'ants.registration', (['tar', 'ref1', '"""SyN"""'], {}), "(tar, ref1, 'SyN')\n", (2743, 2761), False, 'import ants\n'), ((3289, 3309), 'ants.get_data', 'ants.get_data', (['"""r16"""'], {}), "('r16')\n", (3302, 3309), False, 'import ants\n'), ((3428, 3448), 'ants.get_data', 'ants.get_data', (['"""r27"""'], {}), "('r27')\n", (3441, 3448), False, 'import ants\n'), ((473, 497), 'numpy.floor', 'np.floor', (['(A.shape[0] / 2)'], {}), '(A.shape[0] / 2)\n', (481, 497), True, 'import numpy as np\n'), ((520, 544), 'numpy.floor', 'np.floor', (['(A.shape[1] / 2)'], {}), '(A.shape[1] / 2)\n', (528, 544), True, 'import numpy as np\n'), ((549, 573), 'numpy.floor', 'np.floor', (['(A.shape[2] / 2)'], {}), '(A.shape[2] / 2)\n', (557, 573), True, 'import numpy as np\n'), ((1228, 1260), 'ants.label_geometry_measures', 'ants.label_geometry_measures', (['AA'], {}), '(AA)\n', (1256, 1260), False, 'import ants\n'), ((3483, 3503), 'ants.get_data', 'ants.get_data', (['"""r16"""'], {}), "('r16')\n", (3496, 3503), False, 'import ants\n')] |
import numpy as np
import threading
import queue
import time
from common import gtec
import matplotlib.pyplot as plt
from common.config import *
class Recorder:
def __init__(
self,
sample_duration=SAMPLE_DURATION,
num_channels=2,
channel_offset=0,
signal_type="emg",
):
"""
Create a Recorder for EOG/EMG signals
:param sample_duration: duration of sampling windows [s]
:param num_channels: number of channels to record
:param channel_offset: number of channels to skip
:param signal_type: emg/eog
"""
self._stop = False
self._sample_duration = sample_duration
self._num_channels = num_channels
self._channel_offset = channel_offset
self._labels = queue.Queue()
self._signal_type = signal_type
self._amp = gtec.GUSBamp()
self._amp.set_sampling_frequency(
FS, [True for i in range(16)], None, (48, 52, FS, 4)
)
self._amp.start()
def start_offline_recording(self, live=True):
"""
Start a thread for recording.
"""
threading.Thread(target=self._record).start()
def stop_offline_recording(self):
"""
Terminate the recording thread.
"""
self._stop = True
def get_data(self):
"""
Get data for the duratoin of the previously defined sample_duration.
"""
signals, _ = self._amp.get_data()
return signals
def read_sample_win(self, duration=None):
"""
Read in a sample window.
:param duration: duration to sample [s], if left out, the duration passed to the constructor will be used
:return: sampled signals
"""
if duration is None:
num_samples = int(self._sample_duration * FS)
else:
num_samples = int(duration * FS)
sample_win = np.zeros((num_samples, self._num_channels))
# start sampling
num_collected_samples = 0
sampling = True
while sampling:
signals, _ = self._amp.get_data()
for i_sample in range(signals.shape[0]):
for channel in range(self._num_channels):
sample_win[num_collected_samples, channel] = signals[
i_sample, channel + self._channel_offset
]
num_collected_samples += 1
if num_collected_samples == num_samples:
sampling = False
return sample_win
def record_label(self, label):
"""
Queue a label to be recorded
:param label:
"""
self._labels.put(label)
def _record(self):
while not self._stop:
label = self._labels.get()
signals = self.read_sample_win()
np.savez(
"training_data/{}/{}.npz".format(self._signal_type, time.time()),
signals=signals,
label=label.value,
)
def main():
recorder = Recorder(sample_duration=6)
raw_data = recorder.read_sample_win()
fig = plt.figure(figsize=(12, 10))
ax2 = fig.add_subplot(2, 1, 1)
ax2.set_title("Signal channel 2 - channel 1")
ax2.set_xlabel("samples")
ax2.set_ylabel("voltage")
ax2.plot(raw_data[2 * 1200 :, 1] - raw_data[2 * 1200 :, 0])
ax2 = fig.add_subplot(2, 1, 2)
ax2.set_title("Signal channel 4 - channel 3")
ax2.set_xlabel("samples")
ax2.set_ylabel("voltage")
ax2.plot(raw_data[2 * 1200 :, 3] - raw_data[2 * 1200 :, 2])
plt.tight_layout()
plt.show()
if __name__ == "__main__":
main()
| [
"common.gtec.GUSBamp",
"matplotlib.pyplot.figure",
"numpy.zeros",
"matplotlib.pyplot.tight_layout",
"threading.Thread",
"queue.Queue",
"time.time",
"matplotlib.pyplot.show"
] | [((3165, 3193), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 10)'}), '(figsize=(12, 10))\n', (3175, 3193), True, 'import matplotlib.pyplot as plt\n'), ((3619, 3637), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3635, 3637), True, 'import matplotlib.pyplot as plt\n'), ((3642, 3652), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3650, 3652), True, 'import matplotlib.pyplot as plt\n'), ((794, 807), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (805, 807), False, 'import queue\n'), ((869, 883), 'common.gtec.GUSBamp', 'gtec.GUSBamp', ([], {}), '()\n', (881, 883), False, 'from common import gtec\n'), ((1933, 1976), 'numpy.zeros', 'np.zeros', (['(num_samples, self._num_channels)'], {}), '((num_samples, self._num_channels))\n', (1941, 1976), True, 'import numpy as np\n'), ((1148, 1185), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._record'}), '(target=self._record)\n', (1164, 1185), False, 'import threading\n'), ((2959, 2970), 'time.time', 'time.time', ([], {}), '()\n', (2968, 2970), False, 'import time\n')] |
"""
Licensed Materials - Property of IBM
Restricted Materials of IBM
20190891
© Copyright IBM Corp. 2021 All Rights Reserved.
"""
import logging
import joblib
import numpy as np
from sklearn.cluster import KMeans
from sklearn.exceptions import NotFittedError
from ibmfl.util import config
from ibmfl.model.sklearn_fl_model import SklearnFLModel
from ibmfl.model.model_update import ModelUpdate
from ibmfl.exceptions import LocalTrainingException, \
ModelInitializationException, ModelException
logger = logging.getLogger(__name__)
class SklearnKMeansFLModel(SklearnFLModel):
"""
Wrapper class for sklearn.cluster.KMeans
"""
def __init__(self, model_name, model_spec, sklearn_model=None, **kwargs):
"""
Create a `SklearnKMeansFLModel` instance from a
sklearn.cluster.KMeans model.
If sklearn_model is provided, it will use it; otherwise it will take
the model_spec to create the model.
:param model_name: A name specifying the type of model, e.g., \
clustering_KMeans
:type model_name: `str`
:param model_spec: A dictionary contains model specification
:type model_spec: `dict`
:param sklearn_model: Complied sklearn model
:type sklearn_model: `sklearn.cluster.KMeans`
:param kwargs: A dictionary contains other parameter settings on \
to initialize a sklearn KMeans model.
:type kwargs: `dict`
"""
super().__init__(model_name, model_spec,
sklearn_model=sklearn_model,
**kwargs)
self.model_type = 'Sklearn-KMeans'
if sklearn_model:
if not issubclass(type(sklearn_model), KMeans):
raise ValueError('Compiled sklearn model needs to be provided'
'(sklearn.cluster.KMeans). '
'Type provided: ' + str(type(sklearn_model)))
self.model = sklearn_model
def fit_model(self, train_data, fit_params=None, **kwargs):
"""
Fits current model with provided training data.
:param train_data: Tuple with first elements being the training data \
(x_train,)
:type train_data: `np.ndarray`
:param fit_params: (optional) Dictionary with hyperparameters that \
will be used to call sklearn.cluster fit function. \
Provided hyperparameter should only contains parameters that \
match sklearn expected values, e.g., `n_clusters`, which provides \
the number of clusters to fit. \
If no `fit_params` is provided, default values as defined in sklearn \
definition are used.
:return: None
"""
# Extract x_train by default,
# Only x_train is extracted since Clustering is unsupervised
x_train = train_data[0]
hyperparams = fit_params.get('hyperparams', {}) or {} if fit_params else {}
local_hp = hyperparams.get('local', {}) or {}
training_hp = local_hp.get('training', {}) or {}
try:
self.model.set_params(**training_hp)
except Exception as err:
logger.exception(str(err))
raise LocalTrainingException(
'Error occurred while setting up model parameters')
try:
self.model.fit(x_train)
except Exception as err:
logger.info(str(err))
raise LocalTrainingException(
'Error occurred while performing model.fit'
)
def update_model(self, model_update):
"""
Update sklearn model with provided model_update, where model_update
should contain `cluster_centers_` having the same
dimension as expected by the sklearn.cluster model.
`cluster_centers_` : np.ndarray, shape (n_clusters, n_features)
:param model_update: `ModelUpdate` object that contains the \
cluster_centers vectors that will be used to update the model.
:type model_update: `ModelUpdate`
:return: None
"""
if isinstance(model_update, ModelUpdate):
cluster_centers_ = model_update.get('weights')
try:
if cluster_centers_ is not None:
self.model.cluster_centers_ = np.array(cluster_centers_)
except Exception as err:
raise LocalTrainingException('Error occurred during '
'updating the model weights. ' +
str(err))
else:
raise LocalTrainingException('Provided model_update should be of '
'type ModelUpdate. '
'Instead they are: ' +
str(type(model_update)))
def get_model_update(self):
"""
Generates a `ModelUpdate` object that will be sent to other entities.
:return: ModelUpdate
:rtype: `ModelUpdate`
"""
try:
cluster_centers_ = self.model.cluster_centers_
except AttributeError:
cluster_centers_ = None
return ModelUpdate(weights=cluster_centers_)
def evaluate(self, test_dataset, **kwargs):
"""
Evaluates the model given testing data.
:param test_dataset: Testing data, a tuple given in the form \
(x_test, test) or a datagenerator of of type `keras.utils.Sequence`,
`keras.preprocessing.image.ImageDataGenerator`
:type test_dataset: `np.ndarray`
:param kwargs: Dictionary of metrics available for the model
:type kwargs: `dict`
"""
if type(test_dataset) is tuple:
x_test = test_dataset[0]
y_test = test_dataset[1]
return self.evaluate_model(x_test, y_test)
else:
raise ModelException("Invalid test dataset!")
def evaluate_model(self, x, y, **kwargs):
"""
Evaluates the model given test data x and the corresponding labels y.
:param x: Samples with shape as expected by the model.
:type x: `np.ndarray`
:param y: Not used for evaluation since this is an unsupervised model
:type y: `None`
:param kwargs: Dictionary of model-specific arguments \
for evaluating models. For example, sample weights accepted \
by model.score.
:return: Dictionary with all evaluation metrics provided by \
specific implementation.
:rtype: `dict`
"""
acc = {}
try:
acc['score'] = self.model.score(x, **kwargs)
except NotFittedError:
logger.info('Model evaluated before fitted. '
'Returning accuracy as 0')
acc['score'] = 0
return acc
def save_model(self, filename=None, path=None):
"""
Save a sklearn.cluster.KMeans model to file in the format specific
to the framework requirement. Meanwhile, initialize the attribute
`_n_threads` to 1 if the model does not have it.
:param filename: Name of the file where to store the model.
:type filename: `str`
:param path: Path of the folder where to store the model. If no path \
is specified, the model will be stored in the default data location of \
the library `DATA_PATH`.
:type path: `str`
:return: filename
"""
if not hasattr(self.model, '_n_threads'):
logger.info("Attribute _n_threads does not exist. "
"Setting it to default value.")
self.model._n_threads = 1
return super().save_model(filename, path)
@staticmethod
def load_model_from_spec(model_spec):
"""
Loads model from provided model_spec, where model_spec is a `dict`
that contains the following items: model_spec['model_definition']
contains the model definition as type sklearn.cluster.KMeans.
:param model_spec: Model specification contains \
a compiled sklearn model.
:type model_spec: `dict`
:return: model
:rtype: `sklearn.cluster`
"""
model = None
try:
if 'model_definition' in model_spec:
model_file = model_spec['model_definition']
model_absolute_path = config.get_absolute_path(model_file)
with open(model_absolute_path, 'rb') as f:
model = joblib.load(f)
if not issubclass(type(model), KMeans):
raise ValueError('Provided complied model in model_spec '
'should be of type sklearn.cluster.'
'Instead they are: ' + str(type(model)))
except Exception as ex:
raise ModelInitializationException('Model specification was '
'badly formed. '+ str(ex))
return model
| [
"logging.getLogger",
"ibmfl.util.config.get_absolute_path",
"ibmfl.exceptions.ModelException",
"ibmfl.exceptions.LocalTrainingException",
"numpy.array",
"joblib.load",
"ibmfl.model.model_update.ModelUpdate"
] | [((509, 536), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (526, 536), False, 'import logging\n'), ((5209, 5246), 'ibmfl.model.model_update.ModelUpdate', 'ModelUpdate', ([], {'weights': 'cluster_centers_'}), '(weights=cluster_centers_)\n', (5220, 5246), False, 'from ibmfl.model.model_update import ModelUpdate\n'), ((5916, 5955), 'ibmfl.exceptions.ModelException', 'ModelException', (['"""Invalid test dataset!"""'], {}), "('Invalid test dataset!')\n", (5930, 5955), False, 'from ibmfl.exceptions import LocalTrainingException, ModelInitializationException, ModelException\n'), ((3217, 3291), 'ibmfl.exceptions.LocalTrainingException', 'LocalTrainingException', (['"""Error occurred while setting up model parameters"""'], {}), "('Error occurred while setting up model parameters')\n", (3239, 3291), False, 'from ibmfl.exceptions import LocalTrainingException, ModelInitializationException, ModelException\n'), ((3444, 3511), 'ibmfl.exceptions.LocalTrainingException', 'LocalTrainingException', (['"""Error occurred while performing model.fit"""'], {}), "('Error occurred while performing model.fit')\n", (3466, 3511), False, 'from ibmfl.exceptions import LocalTrainingException, ModelInitializationException, ModelException\n'), ((8416, 8452), 'ibmfl.util.config.get_absolute_path', 'config.get_absolute_path', (['model_file'], {}), '(model_file)\n', (8440, 8452), False, 'from ibmfl.util import config\n'), ((4307, 4333), 'numpy.array', 'np.array', (['cluster_centers_'], {}), '(cluster_centers_)\n', (4315, 4333), True, 'import numpy as np\n'), ((8541, 8555), 'joblib.load', 'joblib.load', (['f'], {}), '(f)\n', (8552, 8555), False, 'import joblib\n')] |
import csv
import sys
import pandas as pd
import os
import glob
import time
import numpy as np
from datetime import datetime
import statistics
import error
from elaboratedata import *
from sklearn.metrics import confusion_matrix, matthews_corrcoef
from math import ceil
SLEEP=10
#check if attack file exists
def attackExists(SAVEDATTACKS, ATTACK,IMAGESET,LEARNER,x_test,y_test):
return True #TODO: check if os.path.exists(SAVEDATTACKS+ATTACK+'_'+IMAGESET+'_'+LEARNER+".npy") or os.path.exists(SAVEDATTACKS+ATTACK+'_'+IMAGESET+'_'+LEARNER+"_x.npy")
#compute detailed metrics, it is executed only when generating synthetic attack subset
#gives some info on the synthetic attack set
def computeMissingMetrics(CLASS, ATTACK, IMAGESET, LEARNER, predictions, y_test, x_test, classifier,LOGMETRICS,LOGMETRICSCSV):
pred_normal=executeTest(IMAGESET, x_test, classifier, repeat=1) #calcolo i normal
f=open(LOGMETRICS, 'a+')
if(os.path.isfile(LOGMETRICSCSV)==False): #se il file non esiste, aggiungo header file
f1=open(LOGMETRICSCSV, 'a+')
f1.write("IMAGESET, ALGORITHM, ATTACK, Accuracy, Accuracy normal, MCC, Dimension test set, adversarial == normal, adversarial == normal; normal ==true, adversarial == normal; normal !=true,"+
"adversarial != normal; normal ==true, adversarial != normal; normal !=true, adversarial == true; normal !=true, adversarial != normal; adversaral !=true\n")
f1.close()
f1=open(LOGMETRICSCSV, 'a+')
pred_normal_max=np.argmax(pred_normal, axis=1) #argmax sui normal
pred_adv_max= np.argmax(predictions, axis=1) # argmax sugli attacchi
y_max= np.argmax(y_test, axis=1) #argmax sulla ground truth
accuracy_adv=np.sum(np.argmax(predictions, axis=1) == np.argmax(y_test, axis=1)) / len(y_test) #solita accuracy su attacchi
accuracy_normal=np.sum(np.argmax(pred_normal, axis=1) == np.argmax(y_test, axis=1)) / len(y_test) #solita accuracy su attacchi
mcc_adv= matthews_corrcoef(y_max, pred_adv_max) #mcc sugli attacchi
#adversarial == normal (predictions)
adv1=np.sum(pred_normal_max == pred_adv_max)
#adversarial == normal; normal ==true
normTrue=np.sum(pred_normal_max == y_max)
adv2=((pred_adv_max==pred_normal_max) & (pred_normal_max==y_max))
#adversarial == normal; normal !=true
adv3=((pred_adv_max==pred_normal_max) & (pred_normal_max!=y_max))
#adversarial != normal; normal ==true
adv4=((pred_adv_max!=pred_normal_max) & (pred_normal_max==y_max))
#adversarial != normal; normal !true
adv5=((pred_adv_max!=pred_normal_max) & (pred_normal_max!=y_max))
#adversarial == true; normal !=true
adv6=((pred_adv_max==y_max) & (pred_normal_max!=y_max))
#adversarial != normal; adversaral !=true
adv7=((pred_adv_max!=pred_normal_max) & (pred_adv_max!=y_max))
#stampo un po' di roba, creo un csv
f.write(IMAGESET+ " under "+ATTACK+"\n")
# f.write("Confusion matrix")
# f.write("tp="+str(tp)+" fp="+str(fp)+"fn="+str(fn)+" tn="+str(tn)) #stampo confusion matrix sugli attacchi
f.write("Accuracy= "+str(accuracy_adv)+"\n")
f.write("Accuracy Normal= "+str(accuracy_normal)+"\n")
f.write("MCC (ma non ha senso calcolarlo)= "+str(mcc_adv)+"\n")
f.write("Dimensione test set = "+ str(len(y_test))+"\n")
f.write("Prediction adversarial == normal: "+ str(np.sum(adv1))+"\n")
#adversarial == normal; normal ==true
f.write("Prediction adversarial == normal; normal ==true: "+ str(np.sum(adv2))+"\n")
#adversarial == normal; normal !=true
f.write("Prediction adversarial == normal; normal !=true: "+ str(np.sum(adv3))+"\n")
#adversarial != normal; normal ==true
f.write("Prediction adversarial != normal; normal ==true: "+ str(np.sum(adv4))+"\n")
#adversarial != normal; normal !=true
f.write("Prediction adversarial != normal; normal !=true: "+ str(np.sum(adv5))+"\n")
#adversarial == true; normal !=true
f.write("Prediction adversarial == true; normal !=true: "+ str(np.sum(adv6))+"\n")
#adversarial != normal; adversaral !=true
f.write("adversarial != normal; adversaral !=true: "+ str(np.sum(adv7))+"\n")
f.close()
f1.write(IMAGESET+ " , "+IMAGESET +","+ATTACK+", "+str(accuracy_adv)+","+str(accuracy_normal)+","+str(mcc_adv)+","+str(len(y_test))+","+str(np.sum(adv1))+","+str(np.sum(adv2))+","+str(np.sum(adv3))+","+
str(np.sum(adv4))+","+str(np.sum(adv5))+","+str(np.sum(adv6))+","+str(np.sum(adv7))+"\n")
f1.close()
return pred_normal
#x_test è clean, no attack IMAGESET
#x_test_adv è avversario
def saveAccuracy(CLASS, ATTACK, IMAGESET, LEARNER, predictions, y_test, LOGACCURACY):
accuracy = np.sum(np.argmax(predictions, axis=1) == np.argmax(y_test, axis=1)) / len(y_test)
pred_max=np.argmax(predictions, axis=1)
test_max=np.argmax(y_test, axis=1)
f=open(LOGACCURACY, 'a+')
accuracy100=accuracy *100
f.write("Accuracy on test set of "+IMAGESET+ " under "+CLASS+" data ("+ATTACK+" data point) using "+LEARNER+": {}%".format(accuracy * 100))
f.write("\n")
print("Accuracy on test set of "+IMAGESET+ " under "+CLASS+" data ("+ATTACK+" data point) using "+LEARNER+": {}%".format(accuracy * 100))
f.flush()
f.close()
#create synthetic attack set, only the "dangerous images"
#dimension is the same of the original datasets
def createAttackSetSynthetic(CLASS, ATTACK, IMAGESET, LEARNER, predictions, y_test, x_test,x_test_adv, classifier, LOGMETRICS,LOGMETRICSCSV,SYNTETHICATTACKS):
pred_normal=computeMissingMetrics(CLASS, ATTACK, IMAGESET, LEARNER, predictions, y_test, x_test, classifier,LOGMETRICS,LOGMETRICSCSV)
pred_normal_max=np.argmax(pred_normal, axis=1) #argmax sui normal
pred_adv_max= np.argmax(predictions, axis=1) # argmax sugli attacchi
y_max= np.argmax(y_test, axis=1) #argmax sulla ground truth
#adversarial != normal; adversaral !=true
adv7=((pred_adv_max!=pred_normal_max) & (pred_adv_max!=y_max))
x_fail=[]
y_fail=[]
for i in range(len(adv7)):
if(adv7[i]==True):
x_fail.append(x_test_adv[i])
y_fail.append(y_test[i])
x_test_adv1=x_fail
y_test_adv1=y_fail
size_max=x_test.shape[0]
size_attacks=len(x_test_adv1) #.shape[0]
repeat=ceil(size_max/size_attacks)
x_test__tmp=np.empty(x_test.shape) #target dimension as the normal array
y_test_tmp=np.empty(y_test.shape) #target dimension as the normal array
x_test_tmp=np.tile(x_test_adv1, (repeat, 1, 1, 1))
y_test_tmp=np.tile(y_test_adv1, (repeat, 1 ))
x_test_adv=x_test_tmp[0:size_max,:]
y_test=y_test_tmp[0:size_max,:]
print(x_test_adv.shape)
print(y_test.shape)
np.save(SYNTETHICATTACKS+ATTACK+'_'+IMAGESET+'_'+LEARNER+'_x', x_test_adv)
np.save(SYNTETHICATTACKS+ATTACK+'_'+IMAGESET+'_'+LEARNER+'_y', y_test)
def createAttackImagesSIMBA(CLASS,SAVEDATTACKS,FULLATTACKS,SYNTETHICATTACKS, ATTACK,IMAGESET,LEARNER,x_test, y_test, attack,classifier,LOGMETRICS,LOGMETRICSCSV):
if(SAVEDATTACKS==FULLATTACKS):
print("generating test ...")
print("generating test ...")
x_test_adv = np.empty(x_test.shape)
print(x_test.shape)
x_test_adv=np.array(x_test_adv)
for i in range(y_test.shape[0]):
print("now elaborating image "+str(i))
z=attack.generate(x=[x_test[i]], y=[y_test[i]])
z1=np.array(z)
x_test_adv[i]=z1[0] #save image set
print("x_test_adv size is "+str(x_test_adv.shape))
print("z[i] for cell "+str(i)+" is "+str(z1[0].shape))
x_test_adv1=x_test_adv.astype(np.float32)
np.save(SAVEDATTACKS+ATTACK+'_'+IMAGESET+'_'+LEARNER, x_test_adv1)
elif(SAVEDATTACKS==SYNTETHICATTACKS): #attacks only onimages that make everything fail
print("generating test ...")
x_test_adv =np.load(FULLATTACKS+ATTACK+'_'+IMAGESET+'_'+LEARNER+".npy") #save image set
predictions=classifier.predict(x_test_adv)
createAttackSetSynthetic(CLASS,ATTACK, IMAGESET, LEARNER, predictions, y_test, x_test, x_test_adv, classifier,LOGMETRICS,LOGMETRICSCSV, SYNTETHICATTACKS)
#create attack dataset, either full or synthetic
def createAttackImages(CLASS,SAVEDATTACKS,FULLATTACKS,SYNTETHICATTACKS, ATTACK,IMAGESET,LEARNER,x_test, y_test, attack, classifier,LOGMETRICS,LOGMETRICSCSV):
if(SAVEDATTACKS==FULLATTACKS):
print("generating test ...")
x_test_adv = attack.generate(x=x_test) #save image set
np.save(SAVEDATTACKS+ATTACK+'_'+IMAGESET+'_'+LEARNER, x_test_adv)
elif(SAVEDATTACKS==SYNTETHICATTACKS): #attacks only onimages that make everything fail
print("generating test ...")
x_test_adv =np.load(FULLATTACKS+ATTACK+'_'+IMAGESET+'_'+LEARNER+".npy") #save image set
predictions=classifier.predict(x_test_adv)
createAttackSetSynthetic(CLASS,ATTACK, IMAGESET, LEARNER, predictions, y_test, x_test, x_test_adv, classifier,LOGMETRICS,LOGMETRICSCSV, SYNTETHICATTACKS)
#create attack dataset, either full or synthetic (but for some attacks which have a different "generate" function)
def createAttackImages1(CLASS,SAVEDATTACKS,FULLATTACKS,SYNTETHICATTACKS, ATTACK,IMAGESET,LEARNER,x_test, y_test, attack, classifier,LOGMETRICS,LOGMETRICSCSV):
if(SAVEDATTACKS==FULLATTACKS):
print("generating test ...")
x_test_adv = attack.generate(x=x_test, y=y_test) #save image set
np.save(SAVEDATTACKS+ATTACK+'_'+IMAGESET+'_'+LEARNER, x_test_adv)
elif(SAVEDATTACKS==SYNTETHICATTACKS): #attacks only onimages that make everything fail
print("generating test ...")
x_test_adv =np.load(FULLATTACKS+ATTACK+'_'+IMAGESET+'_'+LEARNER+".npy") #save image set
predictions=classifier.predict(x_test_adv)
createAttackSetSynthetic(CLASS,ATTACK, IMAGESET, LEARNER, predictions, y_test, x_test, x_test_adv, classifier,LOGMETRICS,LOGMETRICSCSV, SYNTETHICATTACKS)
#create the patches to be applied to images, and create the attack set with patched images
def createPatches(CLASS,SAVEDATTACKS,FULLATTACKS,SYNTETHICATTACKS, ATTACK,
IMAGESET,LEARNER,x_test, y_test, attack, scale, classifier,LOGMETRICS,LOGMETRICSCSV):
if(SAVEDATTACKS==FULLATTACKS):
print("generating test ...")
print("generating a total of "+str(y_test.shape[0])+" patches")
patch, patch_mask = attack.generate(x=x_test, y=y_test)
np.save(SAVEDATTACKS+ATTACK+'_'+IMAGESET+'_'+LEARNER+'patch', patch)
np.save(SAVEDATTACKS+ATTACK+'_'+IMAGESET+'_'+LEARNER+'patch_mask', patch_mask)
x_test_adv = np.empty(x_test.shape)
for i in range(y_test.shape[0]):
print("applying patch to image"+str(i))
patched_image = attack.apply_patch(np.array([x_test[i]]), patch_external=patch, mask=patch_mask, scale=scale)
z1=np.array(patched_image)
x_test_adv[i]=z1[0] #save image set
print("x_test_adv size is "+str(x_test_adv.shape))
print("z[i] for cell "+str(i)+" is "+str(z1[0].shape))
x_test_adv1=x_test_adv.astype(np.float32)
np.save(SAVEDATTACKS+ATTACK+'_'+IMAGESET+'_'+LEARNER, x_test_adv1)
elif(SAVEDATTACKS==SYNTETHICATTACKS): #attacks only onimages that make everything fail
x_test_adv =np.load(FULLATTACKS+ATTACK+'_'+IMAGESET+'_'+LEARNER+'.npy') #save image set
predictions=classifier.predict(x_test_adv)
createAttackSetSynthetic(CLASS,ATTACK, IMAGESET, LEARNER, predictions, y_test, x_test, x_test_adv, classifier,LOGMETRICS,LOGMETRICSCSV, SYNTETHICATTACKS)
def executeTest(IMAGESET, x_test_set, classifier,repeat):
os.system('rm ./datalog/*')
os.system("./gpu_monitor.sh &")
for i in range(repeat):
predictions = classifier.predict(x_test_set)
os.system("killall nvidia-smi")
time.sleep(SLEEP)#a sleep just to allow killall to terminate
return predictions
#prediction, accuracy computation, logging
def executeAll(IMAGESET, x_test_adv, classifier,CLASS, ATTACK, LEARNER, y_test, itemN,REPETITION,LOGACCURACY):
repeat=REPETITION[IMAGESET][0]
predictions=executeTest(IMAGESET, x_test_adv, classifier,repeat)
saveAccuracy(CLASS, ATTACK, IMAGESET, LEARNER, predictions, y_test, LOGACCURACY)
elaborateData(CLASS, ATTACK, IMAGESET, LEARNER, itemN)
os.system('rm ./datalog/*')
| [
"numpy.tile",
"math.ceil",
"numpy.argmax",
"time.sleep",
"os.path.isfile",
"numpy.sum",
"numpy.array",
"numpy.empty",
"os.system",
"numpy.save",
"sklearn.metrics.matthews_corrcoef",
"numpy.load"
] | [((1515, 1545), 'numpy.argmax', 'np.argmax', (['pred_normal'], {'axis': '(1)'}), '(pred_normal, axis=1)\n', (1524, 1545), True, 'import numpy as np\n'), ((1583, 1613), 'numpy.argmax', 'np.argmax', (['predictions'], {'axis': '(1)'}), '(predictions, axis=1)\n', (1592, 1613), True, 'import numpy as np\n'), ((1649, 1674), 'numpy.argmax', 'np.argmax', (['y_test'], {'axis': '(1)'}), '(y_test, axis=1)\n', (1658, 1674), True, 'import numpy as np\n'), ((1974, 2012), 'sklearn.metrics.matthews_corrcoef', 'matthews_corrcoef', (['y_max', 'pred_adv_max'], {}), '(y_max, pred_adv_max)\n', (1991, 2012), False, 'from sklearn.metrics import confusion_matrix, matthews_corrcoef\n'), ((2083, 2122), 'numpy.sum', 'np.sum', (['(pred_normal_max == pred_adv_max)'], {}), '(pred_normal_max == pred_adv_max)\n', (2089, 2122), True, 'import numpy as np\n'), ((2178, 2210), 'numpy.sum', 'np.sum', (['(pred_normal_max == y_max)'], {}), '(pred_normal_max == y_max)\n', (2184, 2210), True, 'import numpy as np\n'), ((4765, 4795), 'numpy.argmax', 'np.argmax', (['predictions'], {'axis': '(1)'}), '(predictions, axis=1)\n', (4774, 4795), True, 'import numpy as np\n'), ((4809, 4834), 'numpy.argmax', 'np.argmax', (['y_test'], {'axis': '(1)'}), '(y_test, axis=1)\n', (4818, 4834), True, 'import numpy as np\n'), ((5653, 5683), 'numpy.argmax', 'np.argmax', (['pred_normal'], {'axis': '(1)'}), '(pred_normal, axis=1)\n', (5662, 5683), True, 'import numpy as np\n'), ((5721, 5751), 'numpy.argmax', 'np.argmax', (['predictions'], {'axis': '(1)'}), '(predictions, axis=1)\n', (5730, 5751), True, 'import numpy as np\n'), ((5787, 5812), 'numpy.argmax', 'np.argmax', (['y_test'], {'axis': '(1)'}), '(y_test, axis=1)\n', (5796, 5812), True, 'import numpy as np\n'), ((6249, 6278), 'math.ceil', 'ceil', (['(size_max / size_attacks)'], {}), '(size_max / size_attacks)\n', (6253, 6278), False, 'from math import ceil\n'), ((6293, 6315), 'numpy.empty', 'np.empty', (['x_test.shape'], {}), '(x_test.shape)\n', (6301, 6315), True, 'import numpy as np\n'), ((6369, 6391), 'numpy.empty', 'np.empty', (['y_test.shape'], {}), '(y_test.shape)\n', (6377, 6391), True, 'import numpy as np\n'), ((6445, 6484), 'numpy.tile', 'np.tile', (['x_test_adv1', '(repeat, 1, 1, 1)'], {}), '(x_test_adv1, (repeat, 1, 1, 1))\n', (6452, 6484), True, 'import numpy as np\n'), ((6500, 6533), 'numpy.tile', 'np.tile', (['y_test_adv1', '(repeat, 1)'], {}), '(y_test_adv1, (repeat, 1))\n', (6507, 6533), True, 'import numpy as np\n'), ((6667, 6757), 'numpy.save', 'np.save', (["(SYNTETHICATTACKS + ATTACK + '_' + IMAGESET + '_' + LEARNER + '_x')", 'x_test_adv'], {}), "(SYNTETHICATTACKS + ATTACK + '_' + IMAGESET + '_' + LEARNER + '_x',\n x_test_adv)\n", (6674, 6757), True, 'import numpy as np\n'), ((6746, 6832), 'numpy.save', 'np.save', (["(SYNTETHICATTACKS + ATTACK + '_' + IMAGESET + '_' + LEARNER + '_y')", 'y_test'], {}), "(SYNTETHICATTACKS + ATTACK + '_' + IMAGESET + '_' + LEARNER + '_y',\n y_test)\n", (6753, 6832), True, 'import numpy as np\n'), ((11619, 11646), 'os.system', 'os.system', (['"""rm ./datalog/*"""'], {}), "('rm ./datalog/*')\n", (11628, 11646), False, 'import os\n'), ((11651, 11682), 'os.system', 'os.system', (['"""./gpu_monitor.sh &"""'], {}), "('./gpu_monitor.sh &')\n", (11660, 11682), False, 'import os\n'), ((11768, 11799), 'os.system', 'os.system', (['"""killall nvidia-smi"""'], {}), "('killall nvidia-smi')\n", (11777, 11799), False, 'import os\n'), ((11804, 11821), 'time.sleep', 'time.sleep', (['SLEEP'], {}), '(SLEEP)\n', (11814, 11821), False, 'import time\n'), ((12296, 12323), 'os.system', 'os.system', (['"""rm ./datalog/*"""'], {}), "('rm ./datalog/*')\n", (12305, 12323), False, 'import os\n'), ((936, 965), 'os.path.isfile', 'os.path.isfile', (['LOGMETRICSCSV'], {}), '(LOGMETRICSCSV)\n', (950, 965), False, 'import os\n'), ((7110, 7132), 'numpy.empty', 'np.empty', (['x_test.shape'], {}), '(x_test.shape)\n', (7118, 7132), True, 'import numpy as np\n'), ((7180, 7200), 'numpy.array', 'np.array', (['x_test_adv'], {}), '(x_test_adv)\n', (7188, 7200), True, 'import numpy as np\n'), ((7616, 7692), 'numpy.save', 'np.save', (["(SAVEDATTACKS + ATTACK + '_' + IMAGESET + '_' + LEARNER)", 'x_test_adv1'], {}), "(SAVEDATTACKS + ATTACK + '_' + IMAGESET + '_' + LEARNER, x_test_adv1)\n", (7623, 7692), True, 'import numpy as np\n'), ((8472, 8547), 'numpy.save', 'np.save', (["(SAVEDATTACKS + ATTACK + '_' + IMAGESET + '_' + LEARNER)", 'x_test_adv'], {}), "(SAVEDATTACKS + ATTACK + '_' + IMAGESET + '_' + LEARNER, x_test_adv)\n", (8479, 8547), True, 'import numpy as np\n'), ((9403, 9478), 'numpy.save', 'np.save', (["(SAVEDATTACKS + ATTACK + '_' + IMAGESET + '_' + LEARNER)", 'x_test_adv'], {}), "(SAVEDATTACKS + ATTACK + '_' + IMAGESET + '_' + LEARNER, x_test_adv)\n", (9410, 9478), True, 'import numpy as np\n'), ((10399, 10484), 'numpy.save', 'np.save', (["(SAVEDATTACKS + ATTACK + '_' + IMAGESET + '_' + LEARNER + 'patch')", 'patch'], {}), "(SAVEDATTACKS + ATTACK + '_' + IMAGESET + '_' + LEARNER + 'patch', patch\n )\n", (10406, 10484), True, 'import numpy as np\n'), ((10476, 10570), 'numpy.save', 'np.save', (["(SAVEDATTACKS + ATTACK + '_' + IMAGESET + '_' + LEARNER + 'patch_mask')", 'patch_mask'], {}), "(SAVEDATTACKS + ATTACK + '_' + IMAGESET + '_' + LEARNER +\n 'patch_mask', patch_mask)\n", (10483, 10570), True, 'import numpy as np\n'), ((10576, 10598), 'numpy.empty', 'np.empty', (['x_test.shape'], {}), '(x_test.shape)\n', (10584, 10598), True, 'import numpy as np\n'), ((11089, 11165), 'numpy.save', 'np.save', (["(SAVEDATTACKS + ATTACK + '_' + IMAGESET + '_' + LEARNER)", 'x_test_adv1'], {}), "(SAVEDATTACKS + ATTACK + '_' + IMAGESET + '_' + LEARNER, x_test_adv1)\n", (11096, 11165), True, 'import numpy as np\n'), ((7368, 7379), 'numpy.array', 'np.array', (['z'], {}), '(z)\n', (7376, 7379), True, 'import numpy as np\n'), ((7831, 7902), 'numpy.load', 'np.load', (["(FULLATTACKS + ATTACK + '_' + IMAGESET + '_' + LEARNER + '.npy')"], {}), "(FULLATTACKS + ATTACK + '_' + IMAGESET + '_' + LEARNER + '.npy')\n", (7838, 7902), True, 'import numpy as np\n'), ((8686, 8757), 'numpy.load', 'np.load', (["(FULLATTACKS + ATTACK + '_' + IMAGESET + '_' + LEARNER + '.npy')"], {}), "(FULLATTACKS + ATTACK + '_' + IMAGESET + '_' + LEARNER + '.npy')\n", (8693, 8757), True, 'import numpy as np\n'), ((9617, 9688), 'numpy.load', 'np.load', (["(FULLATTACKS + ATTACK + '_' + IMAGESET + '_' + LEARNER + '.npy')"], {}), "(FULLATTACKS + ATTACK + '_' + IMAGESET + '_' + LEARNER + '.npy')\n", (9624, 9688), True, 'import numpy as np\n'), ((10829, 10852), 'numpy.array', 'np.array', (['patched_image'], {}), '(patched_image)\n', (10837, 10852), True, 'import numpy as np\n'), ((11267, 11338), 'numpy.load', 'np.load', (["(FULLATTACKS + ATTACK + '_' + IMAGESET + '_' + LEARNER + '.npy')"], {}), "(FULLATTACKS + ATTACK + '_' + IMAGESET + '_' + LEARNER + '.npy')\n", (11274, 11338), True, 'import numpy as np\n'), ((1726, 1756), 'numpy.argmax', 'np.argmax', (['predictions'], {'axis': '(1)'}), '(predictions, axis=1)\n', (1735, 1756), True, 'import numpy as np\n'), ((1760, 1785), 'numpy.argmax', 'np.argmax', (['y_test'], {'axis': '(1)'}), '(y_test, axis=1)\n', (1769, 1785), True, 'import numpy as np\n'), ((1857, 1887), 'numpy.argmax', 'np.argmax', (['pred_normal'], {'axis': '(1)'}), '(pred_normal, axis=1)\n', (1866, 1887), True, 'import numpy as np\n'), ((1891, 1916), 'numpy.argmax', 'np.argmax', (['y_test'], {'axis': '(1)'}), '(y_test, axis=1)\n', (1900, 1916), True, 'import numpy as np\n'), ((4677, 4707), 'numpy.argmax', 'np.argmax', (['predictions'], {'axis': '(1)'}), '(predictions, axis=1)\n', (4686, 4707), True, 'import numpy as np\n'), ((4711, 4736), 'numpy.argmax', 'np.argmax', (['y_test'], {'axis': '(1)'}), '(y_test, axis=1)\n', (4720, 4736), True, 'import numpy as np\n'), ((10739, 10760), 'numpy.array', 'np.array', (['[x_test[i]]'], {}), '([x_test[i]])\n', (10747, 10760), True, 'import numpy as np\n'), ((3350, 3362), 'numpy.sum', 'np.sum', (['adv1'], {}), '(adv1)\n', (3356, 3362), True, 'import numpy as np\n'), ((3481, 3493), 'numpy.sum', 'np.sum', (['adv2'], {}), '(adv2)\n', (3487, 3493), True, 'import numpy as np\n'), ((3612, 3624), 'numpy.sum', 'np.sum', (['adv3'], {}), '(adv3)\n', (3618, 3624), True, 'import numpy as np\n'), ((3743, 3755), 'numpy.sum', 'np.sum', (['adv4'], {}), '(adv4)\n', (3749, 3755), True, 'import numpy as np\n'), ((3874, 3886), 'numpy.sum', 'np.sum', (['adv5'], {}), '(adv5)\n', (3880, 3886), True, 'import numpy as np\n'), ((4001, 4013), 'numpy.sum', 'np.sum', (['adv6'], {}), '(adv6)\n', (4007, 4013), True, 'import numpy as np\n'), ((4129, 4141), 'numpy.sum', 'np.sum', (['adv7'], {}), '(adv7)\n', (4135, 4141), True, 'import numpy as np\n'), ((4448, 4460), 'numpy.sum', 'np.sum', (['adv7'], {}), '(adv7)\n', (4454, 4460), True, 'import numpy as np\n'), ((4426, 4438), 'numpy.sum', 'np.sum', (['adv6'], {}), '(adv6)\n', (4432, 4438), True, 'import numpy as np\n'), ((4404, 4416), 'numpy.sum', 'np.sum', (['adv5'], {}), '(adv5)\n', (4410, 4416), True, 'import numpy as np\n'), ((4382, 4394), 'numpy.sum', 'np.sum', (['adv4'], {}), '(adv4)\n', (4388, 4394), True, 'import numpy as np\n'), ((4351, 4363), 'numpy.sum', 'np.sum', (['adv3'], {}), '(adv3)\n', (4357, 4363), True, 'import numpy as np\n'), ((4329, 4341), 'numpy.sum', 'np.sum', (['adv2'], {}), '(adv2)\n', (4335, 4341), True, 'import numpy as np\n'), ((4307, 4319), 'numpy.sum', 'np.sum', (['adv1'], {}), '(adv1)\n', (4313, 4319), True, 'import numpy as np\n')] |
import numpy as np
from sklearn.pipeline import Pipeline
from sklearn import clone
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import StandardScaler
from boxcox.optimization import Optimizer
from scipy.stats import boxcox
class Gridsearch2D(Optimizer):
"""
Optimization of the lambda values for a given 2D dataset and classifier with a gridsearch
"""
def __init__(self, nr_points=11, lower_bound=-5, upper_bound=5):
"""
:param nr_points: number of points evenly space on grid
:param lower_bound: lower bound of grid
:param upper_bound: upper bound of grid
"""
self.nr_points = nr_points
self.lower_bound = lower_bound
self.upper_bound = upper_bound
self.validation_performance = 0
def run(self, features, labels, classifier):
"""
optimizes the lambda values
:param features: samples to classify
:param labels: class labels for the samples
:param classifier: classifier to train and evaluate
:return: optimal lambdas for every feature
"""
lambda1 = np.linspace(start=self.lower_bound, stop=self.upper_bound, num=self.nr_points)
lambda2 = np.linspace(start=self.lower_bound, stop=self.upper_bound, num=self.nr_points)
lambdas = []
performance_tmp = 0
for l1_idx, l1 in enumerate(lambda1):
for l2_idx, l2 in enumerate(lambda2):
classifier_temp = clone(classifier)
features_temp = np.copy(features)
# Transform data
features_temp[:, 0] = boxcox(features_temp[:, 0], l1)
features_temp[:, 1] = boxcox(features_temp[:, 1], l2)
pipeline = Pipeline(steps=[('scaler', StandardScaler()), ('classifier', classifier_temp)])
pipeline.fit(features_temp, labels)
prediction = pipeline.predict(features_temp)
acc = accuracy_score(prediction, labels)
if acc > performance_tmp:
lambdas = [l1, l2]
performance_tmp = acc
self.validation_performance = performance_tmp
# print("Validation accuracy: " + str(self.validation_performance))
return lambdas
def get_validation_performance(self):
"""
:return: validation performance of hyperparameter tuning
"""
return self.validation_performance
| [
"numpy.copy",
"scipy.stats.boxcox",
"sklearn.preprocessing.StandardScaler",
"numpy.linspace",
"sklearn.clone",
"sklearn.metrics.accuracy_score"
] | [((1136, 1214), 'numpy.linspace', 'np.linspace', ([], {'start': 'self.lower_bound', 'stop': 'self.upper_bound', 'num': 'self.nr_points'}), '(start=self.lower_bound, stop=self.upper_bound, num=self.nr_points)\n', (1147, 1214), True, 'import numpy as np\n'), ((1233, 1311), 'numpy.linspace', 'np.linspace', ([], {'start': 'self.lower_bound', 'stop': 'self.upper_bound', 'num': 'self.nr_points'}), '(start=self.lower_bound, stop=self.upper_bound, num=self.nr_points)\n', (1244, 1311), True, 'import numpy as np\n'), ((1494, 1511), 'sklearn.clone', 'clone', (['classifier'], {}), '(classifier)\n', (1499, 1511), False, 'from sklearn import clone\n'), ((1544, 1561), 'numpy.copy', 'np.copy', (['features'], {}), '(features)\n', (1551, 1561), True, 'import numpy as np\n'), ((1634, 1665), 'scipy.stats.boxcox', 'boxcox', (['features_temp[:, 0]', 'l1'], {}), '(features_temp[:, 0], l1)\n', (1640, 1665), False, 'from scipy.stats import boxcox\n'), ((1704, 1735), 'scipy.stats.boxcox', 'boxcox', (['features_temp[:, 1]', 'l2'], {}), '(features_temp[:, 1], l2)\n', (1710, 1735), False, 'from scipy.stats import boxcox\n'), ((1980, 2014), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['prediction', 'labels'], {}), '(prediction, labels)\n', (1994, 2014), False, 'from sklearn.metrics import accuracy_score\n'), ((1791, 1807), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1805, 1807), False, 'from sklearn.preprocessing import StandardScaler\n')] |
import numpy as np
import utils as ut
import detector as det
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
class GazeModel:
"""Linear regression model for gaze estimation.
"""
def __init__(self, calibration_images, calibration_positions):
"""Uses calibration_images and calibratoin_positions to
create regression mode.
"""
self.images = calibration_images
self.positions = calibration_positions
self.calibrate()
def calibrate(self):
"""Create the regression model here.
"""
pups = [det.find_pupil(self.images[i], debug=False) for i in range(len(self.images))]
pups_centers = np.asarray([pups[i][0] for i in range(len(self.images))])
pups_centers = np.asarray([[np.round(i) for i in nested] for nested in pups_centers])
targets_X = self.positions[:, 0]
targets_Y = self.positions[:, 1]
D = np.hstack((pups_centers, np.ones((pups_centers.shape[0], 1), dtype=pups_centers.dtype)))
theta_X, *_ = np.linalg.lstsq(D, targets_X, rcond=None)
theta_Y, *_ = np.linalg.lstsq(D, targets_Y, rcond=None)
return theta_X, theta_Y
def estimate(self, image):
"""Given an input image, return the estimated gaze coordinates.
"""
my_pup = det.find_pupil(image, debug=False)
center = np.asarray([np.asarray(my_pup[0])])
D = np.hstack((center, np.ones((center.shape[0], 1), dtype=center.dtype)))
t1, t2 = self.calibrate()
x, y = D @ t1, D @ t2
return [y[0], x[0]]
class PolynomialGaze():
"""Polynomial regression model for gaze estimation.
"""
def __init__(self, calibration_images, calibration_positions, order):
"""Uses calibration_images and calibratoin_positions to
create regression model.
"""
self.order = order
self.images = calibration_images
self.positions = calibration_positions
self.calibrate()
def calibrate(self):
"""Create the regression model here.
"""
my_pups = [det.find_pupil(self.images[i], debug=False) for i in range(len(self.images))]
my_pups_int = [ut.pupil_to_int(my_pups[i]) for i in range(len(self.images))]
centers = [my_pups_int[i][0] for i in range(len(self.images))]
centers_np = np.asarray(centers)
targets = self.positions
poly_reg = PolynomialFeatures(degree=self.order)
centers_np_transformed = poly_reg.fit_transform(centers_np)
regressor = LinearRegression()
model_poly = regressor.fit(centers_np_transformed, targets)
return model_poly, poly_reg
def estimate(self, image):
"""Given an input image, return the estimated gaze coordinates.
"""
my_pup = det.find_pupil(image, debug=False)
my_pup_int = ut.pupil_to_int(my_pup)
center = my_pup_int[0]
center_np = np.asarray([center])
model_poly, poly_reg = self.calibrate()
pred = model_poly.predict(poly_reg.fit_transform(center_np))
return np.asscalar(pred[0, 1]), np.asscalar(pred[0, 0]) | [
"sklearn.preprocessing.PolynomialFeatures",
"detector.find_pupil",
"numpy.ones",
"numpy.asarray",
"numpy.asscalar",
"numpy.linalg.lstsq",
"utils.pupil_to_int",
"sklearn.linear_model.LinearRegression",
"numpy.round"
] | [((1090, 1131), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['D', 'targets_X'], {'rcond': 'None'}), '(D, targets_X, rcond=None)\n', (1105, 1131), True, 'import numpy as np\n'), ((1154, 1195), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['D', 'targets_Y'], {'rcond': 'None'}), '(D, targets_Y, rcond=None)\n', (1169, 1195), True, 'import numpy as np\n'), ((1361, 1395), 'detector.find_pupil', 'det.find_pupil', (['image'], {'debug': '(False)'}), '(image, debug=False)\n', (1375, 1395), True, 'import detector as det\n'), ((2400, 2419), 'numpy.asarray', 'np.asarray', (['centers'], {}), '(centers)\n', (2410, 2419), True, 'import numpy as np\n'), ((2472, 2509), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', ([], {'degree': 'self.order'}), '(degree=self.order)\n', (2490, 2509), False, 'from sklearn.preprocessing import PolynomialFeatures\n'), ((2598, 2616), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (2614, 2616), False, 'from sklearn.linear_model import LinearRegression\n'), ((2855, 2889), 'detector.find_pupil', 'det.find_pupil', (['image'], {'debug': '(False)'}), '(image, debug=False)\n', (2869, 2889), True, 'import detector as det\n'), ((2911, 2934), 'utils.pupil_to_int', 'ut.pupil_to_int', (['my_pup'], {}), '(my_pup)\n', (2926, 2934), True, 'import utils as ut\n'), ((2986, 3006), 'numpy.asarray', 'np.asarray', (['[center]'], {}), '([center])\n', (2996, 3006), True, 'import numpy as np\n'), ((632, 675), 'detector.find_pupil', 'det.find_pupil', (['self.images[i]'], {'debug': '(False)'}), '(self.images[i], debug=False)\n', (646, 675), True, 'import detector as det\n'), ((2145, 2188), 'detector.find_pupil', 'det.find_pupil', (['self.images[i]'], {'debug': '(False)'}), '(self.images[i], debug=False)\n', (2159, 2188), True, 'import detector as det\n'), ((2246, 2273), 'utils.pupil_to_int', 'ut.pupil_to_int', (['my_pups[i]'], {}), '(my_pups[i])\n', (2261, 2273), True, 'import utils as ut\n'), ((3140, 3163), 'numpy.asscalar', 'np.asscalar', (['pred[0, 1]'], {}), '(pred[0, 1])\n', (3151, 3163), True, 'import numpy as np\n'), ((3165, 3188), 'numpy.asscalar', 'np.asscalar', (['pred[0, 0]'], {}), '(pred[0, 0])\n', (3176, 3188), True, 'import numpy as np\n'), ((1004, 1065), 'numpy.ones', 'np.ones', (['(pups_centers.shape[0], 1)'], {'dtype': 'pups_centers.dtype'}), '((pups_centers.shape[0], 1), dtype=pups_centers.dtype)\n', (1011, 1065), True, 'import numpy as np\n'), ((1425, 1446), 'numpy.asarray', 'np.asarray', (['my_pup[0]'], {}), '(my_pup[0])\n', (1435, 1446), True, 'import numpy as np\n'), ((1480, 1529), 'numpy.ones', 'np.ones', (['(center.shape[0], 1)'], {'dtype': 'center.dtype'}), '((center.shape[0], 1), dtype=center.dtype)\n', (1487, 1529), True, 'import numpy as np\n'), ((827, 838), 'numpy.round', 'np.round', (['i'], {}), '(i)\n', (835, 838), True, 'import numpy as np\n')] |
import math
from typing import TYPE_CHECKING, Any, List, Optional, Tuple, Union
import moderngl as mgl
import numpy as np
import vpype as vp
from ._utils import ColorType, load_program, load_texture_array
if TYPE_CHECKING: # pragma: no cover
from .engine import Engine
ResourceType = Union[mgl.Buffer, mgl.Texture, mgl.TextureArray]
class Painter:
def __init__(self, ctx: mgl.Context):
self._ctx = ctx
self._resources: List[ResourceType] = []
def __del__(self):
for resource in self._resources:
resource.release()
def register_resource(self, resource: ResourceType) -> ResourceType:
self._resources.append(resource)
return resource
def buffer(self, *args: Any, **kwargs: Any) -> mgl.Buffer:
buffer = self._ctx.buffer(*args, **kwargs)
self.register_resource(buffer)
return buffer
def render(self, engine: "Engine", projection: np.ndarray) -> None:
raise NotImplementedError
class PaperBoundsPainter(Painter):
def __init__(
self,
ctx: mgl.Context,
paper_size: Tuple[float, float],
color: ColorType = (0, 0, 0, 0.45),
shadow_size: float = 7.0,
):
super().__init__(ctx)
data = np.array(
[
(0, 0),
(paper_size[0], 0),
(paper_size[0], paper_size[1]),
(0, paper_size[1]),
(paper_size[0], shadow_size),
(paper_size[0] + shadow_size, shadow_size),
(paper_size[0] + shadow_size, paper_size[1] + shadow_size),
(shadow_size, paper_size[1] + shadow_size),
(shadow_size, paper_size[1]),
],
dtype="f4",
)
line_idx = np.array([0, 1, 2, 3], dtype="i4")
triangle_idx = np.array(
[
(0, 3, 1), # page background
(1, 3, 2),
(4, 2, 5), # shadow
(2, 6, 5),
(7, 6, 2),
(8, 7, 2),
],
dtype="i4",
).reshape(-1)
self._color = color
self._prog = load_program("fast_line_mono", ctx)
vbo = self.buffer(data.tobytes())
self._bounds_vao = ctx.vertex_array(
self._prog, [(vbo, "2f", "in_vert")], self.buffer(line_idx.tobytes())
)
self._shading_vao = ctx.vertex_array(
self._prog, [(vbo, "2f", "in_vert")], self.buffer(triangle_idx.tobytes())
)
def render(self, engine: "Engine", projection: np.ndarray) -> None:
self._prog["projection"].write(projection)
self._prog["color"].value = (0, 0, 0, 0.25)
self._shading_vao.render(mgl.TRIANGLES, first=6, vertices=12)
self._prog["color"].value = (1, 1, 1, 1)
self._shading_vao.render(mgl.TRIANGLES, first=0, vertices=6)
self._prog["color"].value = self._color
self._bounds_vao.render(mgl.LINE_LOOP)
class LineCollectionFastPainter(Painter):
def __init__(self, ctx: mgl.Context, lc: vp.LineCollection, color: ColorType):
super().__init__(ctx)
self._prog = load_program("fast_line_mono", ctx)
self._color = color
vertices, indices = self._build_buffers(lc)
vbo = self.buffer(vertices.tobytes())
ibo = self.buffer(indices.tobytes())
self._vao = ctx.vertex_array(self._prog, [(vbo, "2f4", "in_vert")], index_buffer=ibo)
def render(self, engine: "Engine", projection: np.ndarray) -> None:
self._prog["projection"].write(projection)
self._prog["color"].value = self._color
self._vao.render(mgl.LINE_STRIP)
@staticmethod
def _build_buffers(lc: vp.LineCollection) -> Tuple[np.ndarray, np.ndarray]:
total_length = sum(len(line) for line in lc)
buffer = np.empty((total_length, 2), dtype="f4")
indices = np.empty(total_length + len(lc), dtype="i4")
indices.fill(-1)
# build index array
cur_index = 0
for i, line in enumerate(lc):
next_idx = cur_index + len(line)
indices[i + cur_index : i + next_idx] = np.arange(cur_index, next_idx)
buffer[cur_index:next_idx] = vp.as_vector(line)
cur_index = next_idx
return buffer, indices
class LineCollectionFastColorfulPainter(Painter):
COLORS = [
np.array((0.0, 0.0, 1.0, 1.0)),
np.array((0.0, 0.5, 0.0, 1.0)),
np.array((1.0, 0.0, 0.0, 1.0)),
np.array((0.0, 0.75, 0.75, 1.0)),
np.array((0.0, 1.0, 0.0, 1.0)),
np.array((0.75, 0, 0.75, 1.0)),
np.array((0.75, 0.75, 0.0, 1.0)),
]
def __init__(self, ctx: mgl.Context, lc: vp.LineCollection, show_points: bool = False):
super().__init__(ctx)
self._show_points = show_points
self._prog = load_program("fast_line", ctx)
# TODO: hacked color table size is not ideal, this will need to be changed when
# implementing color themes
self._prog["colors"].write(np.concatenate(self.COLORS).astype("f4").tobytes())
vertices, indices = self._build_buffers(lc)
vbo = self.buffer(vertices.tobytes())
ibo = self.buffer(indices.tobytes())
self._vao = ctx.vertex_array(
self._prog,
[(vbo, "2f4 i1", "in_vert", "color_idx")],
ibo,
)
def render(self, engine: "Engine", projection: np.ndarray) -> None:
self._prog["projection"].write(projection)
self._vao.render(mgl.LINE_STRIP)
if self._show_points:
self._vao.render(mgl.POINTS)
@classmethod
def _build_buffers(cls, lc: vp.LineCollection) -> Tuple[np.ndarray, np.ndarray]:
total_length = sum(len(line) for line in lc)
buffer = np.empty(total_length, dtype=[("vertex", "2f4"), ("color", "i1")])
indices = np.empty(total_length + len(lc), dtype="i4")
indices.fill(-1)
# build index array
cur_index = 0
for i, line in enumerate(lc):
next_idx = cur_index + len(line)
indices[i + cur_index : i + next_idx] = np.arange(cur_index, next_idx)
buffer["vertex"][cur_index:next_idx] = vp.as_vector(line)
buffer["color"][cur_index:next_idx] = i % len(cls.COLORS)
cur_index = next_idx
return buffer, indices
class LineCollectionPointsPainter(Painter):
def __init__(
self, ctx: mgl.Context, lc: vp.LineCollection, color: ColorType = (0, 0, 0, 0.25)
):
super().__init__(ctx)
vertex = """
#version 330
uniform mat4 projection;
in vec2 position;
void main() {
gl_PointSize = 5.0;
gl_Position = projection * vec4(position, 0.0, 1.0);
}
"""
fragment = """
#version 330
uniform vec4 color;
out vec4 out_color;
void main() {
out_color = color;
}
"""
self._prog = ctx.program(vertex_shader=vertex, fragment_shader=fragment)
self._color = color
vertices = self._build_buffers(lc)
vbo = self.buffer(vertices.tobytes())
self._vao = ctx.vertex_array(self._prog, [(vbo, "2f4", "position")])
def render(self, engine: "Engine", projection: np.ndarray) -> None:
self._prog["projection"].write(projection)
self._prog["color"].value = self._color
self._vao.render(mgl.POINTS)
@staticmethod
def _build_buffers(lc: vp.LineCollection) -> np.ndarray:
buffer = np.empty((sum(len(line) for line in lc), 2), dtype="f4")
# build index array
cur_index = 0
for i, line in enumerate(lc):
next_idx = cur_index + len(line)
buffer[cur_index:next_idx] = vp.as_vector(line)
cur_index = next_idx
return buffer
class LineCollectionPenUpPainter(Painter):
def __init__(
self, ctx: mgl.Context, lc: vp.LineCollection, color: ColorType = (0, 0, 0, 0.5)
):
super().__init__(ctx)
self._color = color
self._prog = load_program("fast_line_mono", ctx)
# build vertices
vertices: List[Tuple[float, float]] = []
for i in range(len(lc) - 1):
vertices.extend(
((lc[i][-1].real, lc[i][-1].imag), (lc[i + 1][0].real, lc[i + 1][0].imag))
)
if len(vertices) > 0:
vbo = self.buffer(np.array(vertices, dtype="f4").tobytes())
self._vao = ctx.vertex_array(self._prog, [(vbo, "2f4", "in_vert")])
else:
self._vao = None
def render(self, engine: "Engine", projection: np.ndarray) -> None:
if self._vao is not None:
self._prog["color"].value = self._color
self._prog["projection"].write(projection)
self._vao.render(mgl.LINES)
class LineCollectionPreviewPainter(Painter):
def __init__(
self, ctx: mgl.Context, lc: vp.LineCollection, pen_width: float, color: ColorType
):
super().__init__(ctx)
self._color = color
self._pen_width = pen_width
self._prog = load_program("preview_line", ctx)
vertices, indices = self._build_buffers(lc)
vbo = self.buffer(vertices.tobytes())
ibo = self.buffer(indices.tobytes())
self._vao = ctx.vertex_array(self._prog, [(vbo, "2f4", "position")], ibo)
def render(self, engine: "Engine", projection: np.ndarray) -> None:
self._prog["color"].value = self._color
self._prog["pen_width"].value = self._pen_width
self._prog["antialias"].value = 1.5 / engine.scale
self._prog["projection"].write(projection)
if engine.debug:
self._prog["kill_frag_shader"].value = False
self._prog["debug_view"].value = True
self._prog["color"].value = self._color[0:3] + (0.3,)
self._vao.render(mgl.LINE_STRIP_ADJACENCY)
self._prog["kill_frag_shader"].value = True
self._prog["debug_view"].value = False
self._prog["color"].value = (0, 1, 0, 1)
self._ctx.wireframe = True
self._vao.render(mgl.LINE_STRIP_ADJACENCY)
self._ctx.wireframe = False
else:
self._prog["kill_frag_shader"].value = False
self._prog["debug_view"].value = False
self._vao.render(mgl.LINE_STRIP_ADJACENCY)
@staticmethod
def _build_buffers(lc: vp.LineCollection):
"""Prepare the buffers for multi-polyline rendering. Closed polyline must have their
last point identical to their first point."""
indices = []
reset_index = [-1]
start_index = 0
for i, line in enumerate(lc):
if line[0] == line[-1]: # closed path
idx = np.arange(len(line) + 3) - 1
idx[0], idx[-2], idx[-1] = len(line) - 1, 0, 1
else:
idx = np.arange(len(line) + 2) - 1
idx[0], idx[-1] = 0, len(line) - 1
indices.append(idx + start_index)
start_index += len(line)
indices.append(reset_index)
return (
np.vstack([vp.as_vector(line).astype("f4") for line in lc]),
np.concatenate(indices).astype("i4"),
)
class RulersPainter(Painter):
def __init__(self, ctx: mgl.Context):
super().__init__(ctx)
# this also sets the font size
self._thickness = 20.0
self._font_size = 7.0
self._prog = load_program("ruler_patch", ctx)
# vertices
vertices = self.buffer(
np.array(
[
(-1.0, 1.0),
(0.0, 1.0),
(1.0, 1.0),
(-1.0, 0.0),
(0, 0.0),
(1.0, 0.0),
(-1.0, -1.0),
(0.0, -1.0),
],
dtype="f4",
).tobytes()
)
# line strip for stroke
frame_indices = self.buffer(np.array([3, 5, 1, 7], dtype="i4").tobytes())
self._stroke_vao = ctx.vertex_array(
self._prog, [(vertices, "2f4", "in_vert")], frame_indices
)
# triangles for fill
# first 6 vertices for the small top-right
# next 12 vertices for the rulers themselves
patch_indices = self.buffer(
np.array(
[0, 1, 3, 1, 3, 4, 1, 2, 4, 2, 4, 5, 3, 4, 6, 4, 6, 7], dtype="i4"
).tobytes()
)
self._fill_vao = ctx.vertex_array(
self._prog, [(vertices, "2f4", "in_vert")], patch_indices
)
# major ticks buffer
self._ticks_prog = load_program("ruler_ticks", ctx)
self._ticks_prog["color"] = (0.2, 0.2, 0.2, 1.0)
self._ticks_vao = ctx.vertex_array(self._ticks_prog, [])
# TEXT STUFF
# https://github.com/Contraz/demosys-py/blob/master/demosys/effects/text/resources/data/demosys/text/meta.json
# {
# "characters": 190,
# "character_ranges": [
# {
# "min": 32,
# "max": 126
# },
# {
# "min": 161,
# "max": 255
# }
# ],
# "character_height": 159,
# "character_width": 77,
# "atlas_height": 30210,
# "atlas_width": 77
# }
self._texture = load_texture_array("VeraMono.png", ctx, (77, 159, 190), 4)
self._text_prog = load_program("ruler_text", ctx)
self._aspect_ratio = 159.0 / 77.0
self._text_prog["color"].value = (0, 0, 0, 1.0)
self._text_vao = ctx.vertex_array(self._text_prog, [])
# unit label
self._unit_label = LabelPainter(ctx, "XX")
@property
def thickness(self) -> float:
return self._thickness
def render(self, engine: "Engine", projection: np.ndarray) -> None:
# ===========================
# render frame
self._prog["ruler_width"] = 2 * self._thickness * engine.pixel_factor / engine.width
self._prog["ruler_height"] = 2 * self._thickness * engine.pixel_factor / engine.height
self._prog["color"].value = (1.0, 1.0, 1.0, 1.0)
self._fill_vao.render(mode=mgl.TRIANGLES, first=6)
# ===========================
# render ticks
spec = engine.scale_spec
self._ticks_prog["scale"] = spec.scale_px * engine.scale
self._ticks_prog["divisions"] = list(spec.divisions)
self._ticks_prog["delta_number"] = spec.scale
# compute various stuff
horiz_tick_count = math.ceil(engine.width / engine.scale / spec.scale_px) + 1
vertical_tick_count = math.ceil(engine.height / engine.scale / spec.scale_px) + 1
doc_width, doc_height = (
engine.document.page_size
if engine.document is not None and engine.document.page_size is not None
else (-1.0, -1.0)
)
start_number_horiz = math.floor(engine.origin[0] / spec.scale_px) * spec.scale
start_number_vert = math.floor(engine.origin[1] / spec.scale_px) * spec.scale
thickness = self._thickness * engine.pixel_factor
font_size = self._font_size * engine.pixel_factor
# render vertical ruler
self._ticks_prog["vertical"] = True
self._ticks_prog["viewport_dim"] = engine.height
self._ticks_prog["document_dim"] = doc_height / spec.to_px
self._ticks_prog["offset"] = (engine.origin[1] % spec.scale_px) * engine.scale
self._ticks_prog["ruler_thickness"] = 2 * thickness / engine.width
self._ticks_prog["start_number"] = start_number_vert
self._ticks_vao.render(mode=mgl.POINTS, vertices=vertical_tick_count)
# render horizontal ruler
self._ticks_prog["vertical"] = False
self._ticks_prog["viewport_dim"] = engine.width
self._ticks_prog["document_dim"] = doc_width / spec.to_px
self._ticks_prog["offset"] = (engine.origin[0] % spec.scale_px) * engine.scale
self._ticks_prog["ruler_thickness"] = 2 * thickness / engine.height
self._ticks_prog["start_number"] = start_number_horiz
self._ticks_vao.render(mode=mgl.POINTS, vertices=horiz_tick_count)
# ===========================
# render glyph
self._texture.use(0)
self._text_prog["scale"] = spec.scale_px * engine.scale
self._text_prog["delta_number"] = spec.scale
# horizontal
self._text_prog["vertical"] = False
self._text_prog["viewport_dim"] = engine.width
self._text_prog["document_dim"] = doc_width / spec.to_px
self._text_prog["offset"] = (engine.origin[0] % spec.scale_px) * engine.scale
self._text_prog["glyph_size"].value = (
font_size * 2.0 / engine.width,
font_size * 2.0 * self._aspect_ratio / engine.height,
)
self._text_prog["start_number"] = start_number_horiz
self._text_vao.render(mode=mgl.POINTS, vertices=horiz_tick_count)
# vertical
self._text_prog["vertical"] = True
self._text_prog["viewport_dim"] = engine.height
self._text_prog["document_dim"] = doc_height / spec.to_px
self._text_prog["offset"] = (engine.origin[1] % spec.scale_px) * engine.scale
self._text_prog["glyph_size"].value = (
font_size * 2.0 * self._aspect_ratio / engine.width,
font_size * 2.0 / engine.height,
)
self._text_prog["start_number"] = start_number_vert
self._text_vao.render(mode=mgl.POINTS, vertices=vertical_tick_count)
# ===========================
# render units corner
self._prog["color"].value = (1.0, 1.0, 1.0, 1.0)
self._fill_vao.render(mode=mgl.TRIANGLES, vertices=6)
self._prog["color"].value = (0.0, 0.0, 0.0, 1.0)
self._stroke_vao.render(mode=mgl.LINES)
self._unit_label.font_size = font_size
self._unit_label.position = (thickness / 7.0, thickness / 8.0)
self._unit_label.label = spec.unit
self._unit_label.render(engine, projection)
class LabelPainter(Painter):
def __init__(
self,
ctx: mgl.Context,
label: str = "",
position: Tuple[float, float] = (0.0, 0.0),
font_size: float = 14.0,
max_size: Optional[int] = None,
color: ColorType = (0.0, 0.0, 0.0, 1.0),
):
super().__init__(ctx)
self.position = position
self.font_size = font_size
self._max_size = max_size or len(label)
self._buffer = self.buffer(reserve=self._max_size)
self.label = label
self._color = color
self._texture = load_texture_array("VeraMono.png", ctx, (77, 159, 190), 4)
self._aspect_ratio = 159.0 / 77.0
self._prog = load_program("label", ctx)
self._vao = ctx.vertex_array(self._prog, [(self._buffer, "u1", "in_char")])
@property
def label(self) -> str:
return self._label
@label.setter
def label(self, label: str) -> None:
self._label = label
self._size = min(len(label), self._max_size)
self._buffer.write(
np.array([ord(c) for c in label[: self._max_size]], dtype=np.uint8).tobytes()
)
def render(self, engine: "Engine", projection: np.ndarray) -> None:
self._texture.use(0)
self._prog["color"].value = self._color
self._prog["position"].value = (
-1.0 + 2.0 * self.position[0] / engine.width,
1.0 - 2.0 * self.position[1] / engine.height,
)
self._prog["glyph_size"].value = (
self.font_size * 2.0 / engine.width,
self.font_size * 2.0 * self._aspect_ratio / engine.height,
)
self._vao.render(mode=mgl.POINTS, vertices=self._size)
| [
"vpype.as_vector",
"math.ceil",
"math.floor",
"numpy.array",
"numpy.empty",
"numpy.concatenate",
"numpy.arange"
] | [((1260, 1586), 'numpy.array', 'np.array', (['[(0, 0), (paper_size[0], 0), (paper_size[0], paper_size[1]), (0, paper_size\n [1]), (paper_size[0], shadow_size), (paper_size[0] + shadow_size,\n shadow_size), (paper_size[0] + shadow_size, paper_size[1] + shadow_size\n ), (shadow_size, paper_size[1] + shadow_size), (shadow_size, paper_size[1])\n ]'], {'dtype': '"""f4"""'}), "([(0, 0), (paper_size[0], 0), (paper_size[0], paper_size[1]), (0,\n paper_size[1]), (paper_size[0], shadow_size), (paper_size[0] +\n shadow_size, shadow_size), (paper_size[0] + shadow_size, paper_size[1] +\n shadow_size), (shadow_size, paper_size[1] + shadow_size), (shadow_size,\n paper_size[1])], dtype='f4')\n", (1268, 1586), True, 'import numpy as np\n'), ((1784, 1818), 'numpy.array', 'np.array', (['[0, 1, 2, 3]'], {'dtype': '"""i4"""'}), "([0, 1, 2, 3], dtype='i4')\n", (1792, 1818), True, 'import numpy as np\n'), ((3851, 3890), 'numpy.empty', 'np.empty', (['(total_length, 2)'], {'dtype': '"""f4"""'}), "((total_length, 2), dtype='f4')\n", (3859, 3890), True, 'import numpy as np\n'), ((4396, 4426), 'numpy.array', 'np.array', (['(0.0, 0.0, 1.0, 1.0)'], {}), '((0.0, 0.0, 1.0, 1.0))\n', (4404, 4426), True, 'import numpy as np\n'), ((4436, 4466), 'numpy.array', 'np.array', (['(0.0, 0.5, 0.0, 1.0)'], {}), '((0.0, 0.5, 0.0, 1.0))\n', (4444, 4466), True, 'import numpy as np\n'), ((4476, 4506), 'numpy.array', 'np.array', (['(1.0, 0.0, 0.0, 1.0)'], {}), '((1.0, 0.0, 0.0, 1.0))\n', (4484, 4506), True, 'import numpy as np\n'), ((4516, 4548), 'numpy.array', 'np.array', (['(0.0, 0.75, 0.75, 1.0)'], {}), '((0.0, 0.75, 0.75, 1.0))\n', (4524, 4548), True, 'import numpy as np\n'), ((4558, 4588), 'numpy.array', 'np.array', (['(0.0, 1.0, 0.0, 1.0)'], {}), '((0.0, 1.0, 0.0, 1.0))\n', (4566, 4588), True, 'import numpy as np\n'), ((4598, 4628), 'numpy.array', 'np.array', (['(0.75, 0, 0.75, 1.0)'], {}), '((0.75, 0, 0.75, 1.0))\n', (4606, 4628), True, 'import numpy as np\n'), ((4638, 4670), 'numpy.array', 'np.array', (['(0.75, 0.75, 0.0, 1.0)'], {}), '((0.75, 0.75, 0.0, 1.0))\n', (4646, 4670), True, 'import numpy as np\n'), ((5804, 5870), 'numpy.empty', 'np.empty', (['total_length'], {'dtype': "[('vertex', '2f4'), ('color', 'i1')]"}), "(total_length, dtype=[('vertex', '2f4'), ('color', 'i1')])\n", (5812, 5870), True, 'import numpy as np\n'), ((4165, 4195), 'numpy.arange', 'np.arange', (['cur_index', 'next_idx'], {}), '(cur_index, next_idx)\n', (4174, 4195), True, 'import numpy as np\n'), ((4237, 4255), 'vpype.as_vector', 'vp.as_vector', (['line'], {}), '(line)\n', (4249, 4255), True, 'import vpype as vp\n'), ((6145, 6175), 'numpy.arange', 'np.arange', (['cur_index', 'next_idx'], {}), '(cur_index, next_idx)\n', (6154, 6175), True, 'import numpy as np\n'), ((6228, 6246), 'vpype.as_vector', 'vp.as_vector', (['line'], {}), '(line)\n', (6240, 6246), True, 'import vpype as vp\n'), ((7933, 7951), 'vpype.as_vector', 'vp.as_vector', (['line'], {}), '(line)\n', (7945, 7951), True, 'import vpype as vp\n'), ((14862, 14916), 'math.ceil', 'math.ceil', (['(engine.width / engine.scale / spec.scale_px)'], {}), '(engine.width / engine.scale / spec.scale_px)\n', (14871, 14916), False, 'import math\n'), ((14951, 15006), 'math.ceil', 'math.ceil', (['(engine.height / engine.scale / spec.scale_px)'], {}), '(engine.height / engine.scale / spec.scale_px)\n', (14960, 15006), False, 'import math\n'), ((15237, 15281), 'math.floor', 'math.floor', (['(engine.origin[0] / spec.scale_px)'], {}), '(engine.origin[0] / spec.scale_px)\n', (15247, 15281), False, 'import math\n'), ((15323, 15367), 'math.floor', 'math.floor', (['(engine.origin[1] / spec.scale_px)'], {}), '(engine.origin[1] / spec.scale_px)\n', (15333, 15367), False, 'import math\n'), ((1842, 1934), 'numpy.array', 'np.array', (['[(0, 3, 1), (1, 3, 2), (4, 2, 5), (2, 6, 5), (7, 6, 2), (8, 7, 2)]'], {'dtype': '"""i4"""'}), "([(0, 3, 1), (1, 3, 2), (4, 2, 5), (2, 6, 5), (7, 6, 2), (8, 7, 2)],\n dtype='i4')\n", (1850, 1934), True, 'import numpy as np\n'), ((11396, 11419), 'numpy.concatenate', 'np.concatenate', (['indices'], {}), '(indices)\n', (11410, 11419), True, 'import numpy as np\n'), ((11768, 11893), 'numpy.array', 'np.array', (['[(-1.0, 1.0), (0.0, 1.0), (1.0, 1.0), (-1.0, 0.0), (0, 0.0), (1.0, 0.0), (-\n 1.0, -1.0), (0.0, -1.0)]'], {'dtype': '"""f4"""'}), "([(-1.0, 1.0), (0.0, 1.0), (1.0, 1.0), (-1.0, 0.0), (0, 0.0), (1.0,\n 0.0), (-1.0, -1.0), (0.0, -1.0)], dtype='f4')\n", (11776, 11893), True, 'import numpy as np\n'), ((12205, 12239), 'numpy.array', 'np.array', (['[3, 5, 1, 7]'], {'dtype': '"""i4"""'}), "([3, 5, 1, 7], dtype='i4')\n", (12213, 12239), True, 'import numpy as np\n'), ((12559, 12635), 'numpy.array', 'np.array', (['[0, 1, 3, 1, 3, 4, 1, 2, 4, 2, 4, 5, 3, 4, 6, 4, 6, 7]'], {'dtype': '"""i4"""'}), "([0, 1, 3, 1, 3, 4, 1, 2, 4, 2, 4, 5, 3, 4, 6, 4, 6, 7], dtype='i4')\n", (12567, 12635), True, 'import numpy as np\n'), ((8590, 8620), 'numpy.array', 'np.array', (['vertices'], {'dtype': '"""f4"""'}), "(vertices, dtype='f4')\n", (8598, 8620), True, 'import numpy as np\n'), ((5054, 5081), 'numpy.concatenate', 'np.concatenate', (['self.COLORS'], {}), '(self.COLORS)\n', (5068, 5081), True, 'import numpy as np\n'), ((11334, 11352), 'vpype.as_vector', 'vp.as_vector', (['line'], {}), '(line)\n', (11346, 11352), True, 'import vpype as vp\n')] |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates actor and critic networks with attention architecture.
Also implements attention version of standard TFAgents Networks.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gin
import numpy as np
from six.moves import zip
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.keras_layers import dynamic_unroll_layer
from tf_agents.networks import actor_distribution_network
from tf_agents.networks import actor_distribution_rnn_network
from tf_agents.networks import categorical_projection_network
from tf_agents.networks import encoding_network
from tf_agents.networks import lstm_encoding_network
from tf_agents.networks import network
from tf_agents.networks import normal_projection_network
from tf_agents.networks import utils
from tf_agents.networks import value_network
from tf_agents.networks import value_rnn_network
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import time_step
from tf_agents.utils import nest_utils
from social_rl.multiagent_tfagents import multigrid_networks
class _Stack(tf.keras.layers.Layer):
"""Stack of pooling and convolutional blocks with residual connections."""
def __init__(self, num_ch, num_blocks, **kwargs):
# pylint: disable=g-complex-comprehension
super(_Stack, self).__init__(**kwargs)
self.num_ch = num_ch
self.num_blocks = num_blocks
self._conv = tf.keras.layers.Conv2D(
num_ch, 3, strides=1, padding="same", kernel_initializer="lecun_normal")
self._max_pool = tf.keras.layers.MaxPool2D(
pool_size=3, padding="same", strides=2)
self._res_convs0 = [
tf.keras.layers.Conv2D(
num_ch, 3, strides=1, padding="same", name="res_%d/conv2d_0" % i,
kernel_initializer="lecun_normal")
for i in range(num_blocks)
]
self._res_convs1 = [
tf.keras.layers.Conv2D(
num_ch, 3, strides=1, padding="same", name="res_%d/conv2d_1" % i,
kernel_initializer="lecun_normal")
for i in range(num_blocks)
]
def __call__(self, conv_out, training=False):
# Downscale.
conv_out = self._conv(conv_out)
conv_out = self._max_pool(conv_out)
# Residual block(s).
for (res_conv0, res_conv1) in zip(self._res_convs0, self._res_convs1):
block_input = conv_out
conv_out = tf.nn.relu(conv_out)
conv_out = res_conv0(conv_out)
conv_out = tf.nn.relu(conv_out)
conv_out = res_conv1(conv_out)
conv_out += block_input
return conv_out
def get_config(self):
config = super().get_config().copy()
config.update({
"num_ch": self.num_ch,
"num_blocks": self.num_blocks
})
return config
def get_spatial_basis(h, w, d):
"""Gets a sinusoidal position encoding for image attention."""
half_d = d // 2
basis = np.zeros((h, w, d), dtype=np.float32)
div = np.exp(
np.arange(0, half_d, 2, dtype=np.float32) * -np.log(100.0) / half_d)
h_grid = np.expand_dims(np.arange(0, h, dtype=np.float32), 1)
w_grid = np.expand_dims(np.arange(0, w, dtype=np.float32), 1)
basis[:, :, 0:half_d:2] = np.sin(h_grid * div)[:, np.newaxis, :]
basis[:, :, 1:half_d:2] = np.cos(h_grid * div)[:, np.newaxis, :]
basis[:, :, half_d::2] = np.sin(w_grid * div)[np.newaxis, :, :]
basis[:, :, half_d + 1::2] = np.cos(w_grid * div)[np.newaxis, :, :]
return basis
class AttentionCombinerConv(tf.keras.layers.Layer):
"""Combiner that applies attention to input images."""
def __init__(self,
image_index_flat,
network_state_index_flat,
image_shape,
conv_filters=64,
n_heads=4,
basis_dim=16):
super(AttentionCombinerConv, self).__init__(trainable=True)
self.combiner = tf.keras.layers.Concatenate(axis=-1)
self.image_index_flat = image_index_flat
self.network_state_index_flat = network_state_index_flat
self.image_shape = image_shape
self.conv_filters = conv_filters
self.n_heads = n_heads
self.basis_dim = basis_dim
self.attention_network = tf.keras.Sequential([
tf.keras.layers.Reshape((image_shape[0] * image_shape[1], n_heads)),
tf.keras.layers.Softmax(axis=1)
])
self.q = tf.keras.layers.Dense(conv_filters)
self.k = tf.keras.layers.Conv2D(conv_filters, 1, padding="same")
self.v = tf.keras.layers.Conv2D(conv_filters, 1, padding="same")
self.spatial_basis = tf.constant(
get_spatial_basis(image_shape[0], image_shape[1],
basis_dim)[np.newaxis, :, :, :])
def __call__(self, obs):
h, w, _ = self.image_shape
input_copy = obs.copy()
batch_size = tf.shape(input_copy[self.image_index_flat])[0]
spatial_basis_tiled = tf.tile(self.spatial_basis, (batch_size, 1, 1, 1))
image_features = tf.concat(
(input_copy[self.image_index_flat], spatial_basis_tiled), axis=-1)
network_state = self.combiner(input_copy[self.network_state_index_flat])
query = self.q(network_state)
keys = self.k(image_features)
values = self.v(image_features)
depth_per_head = self.conv_filters // self.n_heads
q_heads = tf.reshape(query, (-1, 1, 1, self.n_heads, depth_per_head))
k_heads = tf.reshape(keys, (-1, h, w, self.n_heads, depth_per_head))
v_heads = tf.reshape(values, (-1, h * w, self.n_heads, depth_per_head))
attention_weights = tf.reduce_sum(q_heads * k_heads, axis=-1)
attention_weights = self.attention_network(attention_weights)
mean_attention_weights = tf.reshape(
tf.reduce_mean(attention_weights, axis=-1), (-1, h, w))
weighted_features = tf.reshape(
attention_weights[:, :, :, tf.newaxis] * v_heads,
(-1, h * w, self.conv_filters))
input_copy[self.image_index_flat] = tf.reduce_sum(weighted_features, axis=1)
input_copy.pop(self.network_state_index_flat)
return self.combiner(input_copy), mean_attention_weights
def get_config(self):
return {
"image_index_flat": self.image_index_flat,
"network_state_index_flat": self.network_state_index_flat,
"image_shape": self.image_shape,
"conv_filters": self.conv_filters,
"n_heads": self.n_heads,
"basis_dim": self.basis_dim
}
@gin.configurable
def construct_attention_networks(observation_spec,
action_spec,
use_rnns=True,
actor_fc_layers=(200, 100),
value_fc_layers=(200, 100),
lstm_size=(128,),
conv_filters=8,
conv_kernel=3,
scalar_fc=5,
scalar_name="direction",
scalar_dim=4,
use_stacks=False,
):
"""Creates an actor and critic network designed for use with MultiGrid.
A convolution layer processes the image and a dense layer processes the
direction the agent is facing. These are fed into some fully connected layers
and an LSTM.
Args:
observation_spec: A tf-agents observation spec.
action_spec: A tf-agents action spec.
use_rnns: If True, will construct RNN networks. Non-recurrent networks are
not supported currently.
actor_fc_layers: Dimension and number of fully connected layers in actor.
value_fc_layers: Dimension and number of fully connected layers in critic.
lstm_size: Number of cells in each LSTM layers.
conv_filters: Number of convolution filters.
conv_kernel: Size of the convolution kernel.
scalar_fc: Number of neurons in the fully connected layer processing the
scalar input.
scalar_name: Name of the scalar input.
scalar_dim: Highest possible value for the scalar input. Used to convert to
one-hot representation.
use_stacks: Use ResNet stacks (compresses the image).
Returns:
A tf-agents ActorDistributionRnnNetwork for the actor, and a ValueRnnNetwork
for the critic.
"""
if not use_rnns:
raise NotImplementedError(
"Non-recurrent attention networks are not suppported.")
preprocessing_layers = {
"policy_state":
tf.keras.layers.Lambda(lambda x: x)
}
if use_stacks:
preprocessing_layers["image"] = tf.keras.models.Sequential([
multigrid_networks.cast_and_scale(),
_Stack(conv_filters // 2, 2),
_Stack(conv_filters, 2),
tf.keras.layers.ReLU(),
])
else:
preprocessing_layers["image"] = tf.keras.models.Sequential([
multigrid_networks.cast_and_scale(),
tf.keras.layers.Conv2D(conv_filters, conv_kernel, padding="same"),
tf.keras.layers.ReLU(),
])
if scalar_name in observation_spec:
preprocessing_layers[scalar_name] = tf.keras.models.Sequential(
[multigrid_networks.one_hot_layer(scalar_dim),
tf.keras.layers.Dense(scalar_fc)])
if "position" in observation_spec:
preprocessing_layers["position"] = tf.keras.models.Sequential(
[multigrid_networks.cast_and_scale(), tf.keras.layers.Dense(scalar_fc)])
preprocessing_nest = tf.nest.map_structure(lambda l: None,
preprocessing_layers)
flat_observation_spec = nest_utils.flatten_up_to(
preprocessing_nest,
observation_spec,
)
image_index_flat = flat_observation_spec.index(observation_spec["image"])
network_state_index_flat = flat_observation_spec.index(
observation_spec["policy_state"])
if use_stacks:
image_shape = [i // 4 for i in observation_spec["image"].shape] # H x W x D
else:
image_shape = observation_spec["image"].shape
preprocessing_combiner = AttentionCombinerConv(image_index_flat,
network_state_index_flat,
image_shape)
custom_objects = {"_Stack": _Stack}
with tf.keras.utils.custom_object_scope(custom_objects):
actor_net = AttentionActorDistributionRnnNetwork(
observation_spec,
action_spec,
preprocessing_layers=preprocessing_layers,
preprocessing_combiner=preprocessing_combiner,
input_fc_layer_params=actor_fc_layers,
output_fc_layer_params=None,
lstm_size=lstm_size)
value_net = AttentionValueRnnNetwork(
observation_spec,
preprocessing_layers=preprocessing_layers,
preprocessing_combiner=preprocessing_combiner,
input_fc_layer_params=value_fc_layers,
output_fc_layer_params=None)
return actor_net, value_net
@gin.configurable
def construct_multigrid_networks(observation_spec,
action_spec,
use_rnns=True,
actor_fc_layers=(200, 100),
value_fc_layers=(200, 100),
lstm_size=(128,),
conv_filters=8,
conv_kernel=3,
scalar_fc=5,
scalar_name="direction",
scalar_dim=4,
use_stacks=False,
):
"""Creates an actor and critic network designed for use with MultiGrid.
A convolution layer processes the image and a dense layer processes the
direction the agent is facing. These are fed into some fully connected layers
and an LSTM.
Args:
observation_spec: A tf-agents observation spec.
action_spec: A tf-agents action spec.
use_rnns: If True, will construct RNN networks.
actor_fc_layers: Dimension and number of fully connected layers in actor.
value_fc_layers: Dimension and number of fully connected layers in critic.
lstm_size: Number of cells in each LSTM layers.
conv_filters: Number of convolution filters.
conv_kernel: Size of the convolution kernel.
scalar_fc: Number of neurons in the fully connected layer processing the
scalar input.
scalar_name: Name of the scalar input.
scalar_dim: Highest possible value for the scalar input. Used to convert to
one-hot representation.
use_stacks: Use ResNet stacks (compresses the image).
Returns:
A tf-agents ActorDistributionRnnNetwork for the actor, and a ValueRnnNetwork
for the critic.
"""
preprocessing_layers = {
"policy_state":
tf.keras.layers.Lambda(lambda x: x)
}
if use_stacks:
preprocessing_layers["image"] = tf.keras.models.Sequential([
multigrid_networks.cast_and_scale(),
_Stack(conv_filters // 2, 2),
_Stack(conv_filters, 2),
tf.keras.layers.ReLU(),
tf.keras.layers.Flatten()
])
else:
preprocessing_layers["image"] = tf.keras.models.Sequential([
multigrid_networks.cast_and_scale(),
tf.keras.layers.Conv2D(conv_filters, conv_kernel, padding="same"),
tf.keras.layers.ReLU(),
tf.keras.layers.Flatten()
])
if scalar_name in observation_spec:
preprocessing_layers[scalar_name] = tf.keras.models.Sequential(
[multigrid_networks.one_hot_layer(scalar_dim),
tf.keras.layers.Dense(scalar_fc)])
if "position" in observation_spec:
preprocessing_layers["position"] = tf.keras.models.Sequential(
[multigrid_networks.cast_and_scale(), tf.keras.layers.Dense(scalar_fc)])
preprocessing_combiner = tf.keras.layers.Concatenate(axis=-1)
custom_objects = {"_Stack": _Stack}
with tf.keras.utils.custom_object_scope(custom_objects):
if use_rnns:
actor_net = actor_distribution_rnn_network.ActorDistributionRnnNetwork(
observation_spec,
action_spec,
preprocessing_layers=preprocessing_layers,
preprocessing_combiner=preprocessing_combiner,
input_fc_layer_params=actor_fc_layers,
output_fc_layer_params=None,
lstm_size=lstm_size)
value_net = value_rnn_network.ValueRnnNetwork(
observation_spec,
preprocessing_layers=preprocessing_layers,
preprocessing_combiner=preprocessing_combiner,
input_fc_layer_params=value_fc_layers,
output_fc_layer_params=None)
else:
actor_net = actor_distribution_network.ActorDistributionNetwork(
observation_spec,
action_spec,
preprocessing_layers=preprocessing_layers,
preprocessing_combiner=preprocessing_combiner,
fc_layer_params=actor_fc_layers,
activation_fn=tf.keras.activations.tanh)
value_net = value_network.ValueNetwork(
observation_spec,
preprocessing_layers=preprocessing_layers,
preprocessing_combiner=preprocessing_combiner,
fc_layer_params=value_fc_layers,
activation_fn=tf.keras.activations.tanh)
return actor_net, value_net
def _categorical_projection_net(action_spec, logits_init_output_factor=0.1):
return categorical_projection_network.CategoricalProjectionNetwork(
action_spec, logits_init_output_factor=logits_init_output_factor)
def _normal_projection_net(action_spec,
init_action_stddev=0.35,
init_means_output_factor=0.1):
std_bias_initializer_value = np.log(np.exp(init_action_stddev) - 1)
return normal_projection_network.NormalProjectionNetwork(
action_spec,
init_means_output_factor=init_means_output_factor,
std_bias_initializer_value=std_bias_initializer_value)
class AttentionNetwork(network.Network):
"""A modification of tf_agents network that returns attention info."""
def __call__(self, inputs, *args, **kwargs):
"""A wrapper around `Network.call`.
A typical `call` method in a class subclassing `Network` will have a
signature that accepts `inputs`, as well as other `*args` and `**kwargs`.
`call` can optionally also accept `step_type` and `network_state`
(if `state_spec != ()` is not trivial). e.g.:
```python
def call(self,
inputs,
step_type=None,
network_state=(),
training=False):
...
return outputs, new_network_state
```
We will validate the first argument (`inputs`)
against `self.input_tensor_spec` if one is available.
If a `network_state` kwarg is given it is also validated against
`self.state_spec`. Similarly, the return value of the `call` method is
expected to be a tuple/list with 2 values: `(output, new_state)`.
We validate `new_state` against `self.state_spec`.
If no `network_state` kwarg is given (or if empty `network_state = ()` is
given, it is up to `call` to assume a proper "empty" state, and to
emit an appropriate `output_state`.
Args:
inputs: The input to `self.call`, matching `self.input_tensor_spec`.
*args: Additional arguments to `self.call`.
**kwargs: Additional keyword arguments to `self.call`. These can include
`network_state` and `step_type`. `step_type` is required if the
network"s `call` requires it. `network_state` is required if the
underlying network"s `call` requires it.
Returns:
A tuple `(outputs, new_network_state)`.
"""
if self.input_tensor_spec is not None:
nest_utils.assert_matching_dtypes_and_inner_shapes(
inputs,
self.input_tensor_spec,
allow_extra_fields=True,
caller=self,
tensors_name="`inputs`",
specs_name="`input_tensor_spec`")
call_argspec = network.tf_inspect.getargspec(self.call)
# Convert *args, **kwargs to a canonical kwarg representation.
normalized_kwargs = network.tf_inspect.getcallargs(self.call, inputs, *args,
**kwargs)
network_state = normalized_kwargs.get("network_state", None)
normalized_kwargs.pop("self", None)
# pylint: disable=literal-comparison
network_has_state = (
network_state is not None and network_state is not () and
network_state is not [])
# pylint: enable=literal-comparison
if network_has_state:
nest_utils.assert_matching_dtypes_and_inner_shapes(
network_state,
self.state_spec,
allow_extra_fields=True,
caller=self,
tensors_name="`network_state`",
specs_name="`state_spec`")
if "step_type" not in call_argspec.args and not call_argspec.keywords:
normalized_kwargs.pop("step_type", None)
if (network_state in (None, ()) and
"network_state" not in call_argspec.args and not call_argspec.keywords):
normalized_kwargs.pop("network_state", None)
outputs, new_state, attention_weights = tf.keras.layers.Layer.__call__(
self, **normalized_kwargs)
nest_utils.assert_matching_dtypes_and_inner_shapes(
new_state,
self.state_spec,
allow_extra_fields=True,
caller=self,
tensors_name="`new_state`",
specs_name="`state_spec`")
return outputs, new_state, attention_weights
class AttentionDistributionNetwork(
AttentionNetwork,
network.DistributionNetwork,
):
def __call__(self, inputs, *args, **kwargs):
return AttentionNetwork.__call__(self, inputs, *args, **kwargs)
class AttentionEncodingNetwork(AttentionNetwork,
encoding_network.EncodingNetwork):
"""A modification of tf_agents encoding network that returns attention info."""
def __call__(self, inputs, *args, **kwargs):
return AttentionNetwork.__call__(self, inputs, *args, **kwargs)
def call(self, observation, step_type=None, network_state=(), training=False):
del step_type # unused.
if self._batch_squash:
outer_rank = nest_utils.get_outer_rank(observation,
self.input_tensor_spec)
batch_squash = utils.BatchSquash(outer_rank)
observation = tf.nest.map_structure(batch_squash.flatten, observation)
if self._flat_preprocessing_layers is None:
processed = observation
else:
processed = []
for obs, layer in zip(
nest_utils.flatten_up_to(self._preprocessing_nest, observation),
self._flat_preprocessing_layers):
processed.append(layer(obs, training=training))
if len(processed) == 1 and self._preprocessing_combiner is None:
# If only one observation is passed and the preprocessing_combiner
# is unspecified, use the preprocessed version of this observation.
processed = processed[0]
states = processed
if self._preprocessing_combiner is not None:
states, attention_weights = self._preprocessing_combiner(states)
for layer in self._postprocessing_layers:
states = layer(states, training=training)
if self._batch_squash:
states = tf.nest.map_structure(batch_squash.unflatten, states)
return states, network_state, attention_weights
class AttentionLSTMEncodingNetwork(AttentionNetwork,
lstm_encoding_network.LSTMEncodingNetwork):
"""A modification of tf_agents LSTM encoding network that returns attention info."""
def __init__(
self,
input_tensor_spec,
preprocessing_layers=None,
preprocessing_combiner=None,
conv_layer_params=None,
input_fc_layer_params=(75, 40),
lstm_size=None,
output_fc_layer_params=(75, 40),
activation_fn=tf.keras.activations.relu,
rnn_construction_fn=None,
rnn_construction_kwargs=None,
dtype=tf.float32,
name="LSTMEncodingNetwork",
):
super(AttentionLSTMEncodingNetwork,
self).__init__(input_tensor_spec, preprocessing_layers,
preprocessing_combiner, conv_layer_params,
input_fc_layer_params, lstm_size,
output_fc_layer_params, activation_fn,
rnn_construction_fn, rnn_construction_kwargs, dtype,
name)
kernel_initializer = tf.compat.v1.variance_scaling_initializer(
scale=2.0, mode="fan_in", distribution="truncated_normal")
input_encoder = AttentionEncodingNetwork(
input_tensor_spec,
preprocessing_layers=preprocessing_layers,
preprocessing_combiner=preprocessing_combiner,
conv_layer_params=conv_layer_params,
fc_layer_params=input_fc_layer_params,
activation_fn=activation_fn,
kernel_initializer=kernel_initializer,
dtype=dtype)
self._input_encoder = input_encoder
def __call__(self, inputs, *args, **kwargs):
return AttentionNetwork.__call__(self, inputs, *args, **kwargs)
def call(self, observation, step_type, network_state=(), training=False):
"""Apply the network.
Args:
observation: A tuple of tensors matching `input_tensor_spec`.
step_type: A tensor of `StepType.
network_state: (optional.) The network state.
training: Whether the output is being used for training.
Returns:
`(outputs, network_state)` - the network output and next network state.
Raises:
ValueError: If observation tensors lack outer `(batch,)` or
`(batch, time)` axes.
"""
num_outer_dims = nest_utils.get_outer_rank(observation,
self.input_tensor_spec)
if num_outer_dims not in (1, 2):
raise ValueError(
"Input observation must have a batch or batch x time outer shape.")
has_time_dim = num_outer_dims == 2
if not has_time_dim:
# Add a time dimension to the inputs.
observation = tf.nest.map_structure(lambda t: tf.expand_dims(t, 1),
observation)
step_type = tf.nest.map_structure(lambda t: tf.expand_dims(t, 1),
step_type)
state, _, attention_weights = self._input_encoder(
observation, step_type=step_type, network_state=(), training=training)
network_kwargs = {}
if isinstance(self._lstm_network, dynamic_unroll_layer.DynamicUnroll):
network_kwargs["reset_mask"] = tf.equal(
step_type, time_step.StepType.FIRST, name="mask")
# Unroll over the time sequence.
output = self._lstm_network(
inputs=state,
initial_state=network_state,
training=training,
**network_kwargs)
if isinstance(self._lstm_network, dynamic_unroll_layer.DynamicUnroll):
state, network_state = output
else:
state = output[0]
network_state = tf.nest.pack_sequence_as(
self._lstm_network.cell.state_size, tf.nest.flatten(output[1:]))
for layer in self._output_encoder:
state = layer(state, training=training)
if not has_time_dim:
# Remove time dimension from the state.
state = tf.squeeze(state, [1])
return state, network_state, attention_weights
@gin.configurable
class AttentionActorDistributionRnnNetwork(AttentionDistributionNetwork):
"""A modification of tf_agents rnn network that returns attention info."""
def __init__(self,
input_tensor_spec,
output_tensor_spec,
preprocessing_layers=None,
preprocessing_combiner=None,
conv_layer_params=None,
input_fc_layer_params=(200, 100),
input_dropout_layer_params=None,
lstm_size=None,
output_fc_layer_params=(200, 100),
activation_fn=tf.keras.activations.relu,
dtype=tf.float32,
discrete_projection_net=_categorical_projection_net,
continuous_projection_net=_normal_projection_net,
rnn_construction_fn=None,
rnn_construction_kwargs=None,
name="ActorDistributionRnnNetwork"):
"""Creates an instance of `ActorDistributionRnnNetwork`.
Args:
input_tensor_spec: A nest of `tensor_spec.TensorSpec` representing the
input.
output_tensor_spec: A nest of `tensor_spec.BoundedTensorSpec` representing
the output.
preprocessing_layers: (Optional.) A nest of `tf.keras.layers.Layer`
representing preprocessing for the different observations. All of these
layers must not be already built. For more details see the documentation
of `networks.EncodingNetwork`.
preprocessing_combiner: (Optional.) A keras layer that takes a flat list
of tensors and combines them. Good options include `tf.keras.layers.Add`
and `tf.keras.layers.Concatenate(axis=-1)`. This layer must not be
already built. For more details see the documentation of
`networks.EncodingNetwork`.
conv_layer_params: Optional list of convolution layers parameters, where
each item is a length-three tuple indicating (filters, kernel_size,
stride).
input_fc_layer_params: Optional list of fully_connected parameters, where
each item is the number of units in the layer. This is applied before
the LSTM cell.
input_dropout_layer_params: Optional list of dropout layer parameters,
each item is the fraction of input units to drop or a dictionary of
parameters according to the keras.Dropout documentation. The additional
parameter `permanent", if set to True, allows to apply dropout at
inference for approximated Bayesian inference. The dropout layers are
interleaved with the fully connected layers; there is a dropout layer
after each fully connected layer, except if the entry in the list is
None. This list must have the same length of input_fc_layer_params, or
be None.
lstm_size: An iterable of ints specifying the LSTM cell sizes to use.
output_fc_layer_params: Optional list of fully_connected parameters, where
each item is the number of units in the layer. This is applied after the
LSTM cell.
activation_fn: Activation function, e.g. tf.nn.relu, slim.leaky_relu, ...
dtype: The dtype to use by the convolution and fully connected layers.
discrete_projection_net: Callable that generates a discrete projection
network to be called with some hidden state and the outer_rank of the
state.
continuous_projection_net: Callable that generates a continuous projection
network to be called with some hidden state and the outer_rank of the
state.
rnn_construction_fn: (Optional.) Alternate RNN construction function, e.g.
tf.keras.layers.LSTM, tf.keras.layers.CuDNNLSTM. It is invalid to
provide both rnn_construction_fn and lstm_size.
rnn_construction_kwargs: (Optional.) Dictionary or arguments to pass to
rnn_construction_fn.
The RNN will be constructed via: ``` rnn_layer =
rnn_construction_fn(**rnn_construction_kwargs) ```
name: A string representing name of the network.
Raises:
ValueError: If "input_dropout_layer_params" is not None.
"""
if input_dropout_layer_params:
raise ValueError("Dropout layer is not supported.")
lstm_encoder = AttentionLSTMEncodingNetwork(
input_tensor_spec=input_tensor_spec,
preprocessing_layers=preprocessing_layers,
preprocessing_combiner=preprocessing_combiner,
conv_layer_params=conv_layer_params,
input_fc_layer_params=input_fc_layer_params,
lstm_size=lstm_size,
output_fc_layer_params=output_fc_layer_params,
activation_fn=activation_fn,
rnn_construction_fn=rnn_construction_fn,
rnn_construction_kwargs=rnn_construction_kwargs,
dtype=dtype,
name=name)
def map_proj(spec):
if tensor_spec.is_discrete(spec):
return discrete_projection_net(spec)
else:
return continuous_projection_net(spec)
projection_networks = tf.nest.map_structure(map_proj, output_tensor_spec)
output_spec = tf.nest.map_structure(lambda proj_net: proj_net.output_spec,
projection_networks)
super(AttentionActorDistributionRnnNetwork, self).__init__(
input_tensor_spec=input_tensor_spec,
state_spec=lstm_encoder.state_spec,
output_spec=output_spec,
name=name)
self._lstm_encoder = lstm_encoder
self._projection_networks = projection_networks
self._output_tensor_spec = output_tensor_spec
def __call__(self, inputs, *args, **kwargs):
return AttentionDistributionNetwork.__call__(self, inputs, *args, **kwargs)
@property
def output_tensor_spec(self):
return self._output_tensor_spec
def call(self, observation, step_type, network_state=(), training=False):
state, network_state, attention_weights = self._lstm_encoder(
observation,
step_type=step_type,
network_state=network_state,
training=training)
outer_rank = nest_utils.get_outer_rank(observation, self.input_tensor_spec)
output_actions = tf.nest.map_structure(
lambda proj_net: proj_net(state, outer_rank, training=training)[0],
self._projection_networks)
return output_actions, network_state, attention_weights
@gin.configurable
class AttentionValueRnnNetwork(network.Network):
"""Recurrent value network. Reduces to 1 value output per batch item."""
def __init__(self,
input_tensor_spec,
preprocessing_layers=None,
preprocessing_combiner=None,
conv_layer_params=None,
input_fc_layer_params=(75, 40),
input_dropout_layer_params=None,
lstm_size=(40,),
output_fc_layer_params=(75, 40),
activation_fn=tf.keras.activations.relu,
dtype=tf.float32,
name="ValueRnnNetwork"):
"""Creates an instance of `ValueRnnNetwork`.
Network supports calls with shape outer_rank + input_tensor_shape.shape.
Note outer_rank must be at least 1.
Args:
input_tensor_spec: A nest of `tensor_spec.TensorSpec` representing the
input observations.
preprocessing_layers: (Optional.) A nest of `tf.keras.layers.Layer`
representing preprocessing for the different observations. All of these
layers must not be already built. For more details see the documentation
of `networks.EncodingNetwork`.
preprocessing_combiner: (Optional.) A keras layer that takes a flat list
of tensors and combines them. Good options include
`tf.keras.layers.Add` and `tf.keras.layers.Concatenate(axis=-1)`. This
layer must not be already built. For more details see the documentation
of `networks.EncodingNetwork`.
conv_layer_params: Optional list of convolution layers parameters, where
each item is a length-three tuple indicating (filters, kernel_size,
stride).
input_fc_layer_params: Optional list of fully_connected parameters, where
each item is the number of units in the layer. This is applied before
the LSTM cell.
input_dropout_layer_params: Optional list of dropout layer parameters,
where each item is the fraction of input units to drop. The dropout
layers are interleaved with the fully connected layers; there is a
dropout layer after each fully connected layer, except if the entry in
the list is None. This list must have the same length of
input_fc_layer_params, or be None.
lstm_size: An iterable of ints specifying the LSTM cell sizes to use.
output_fc_layer_params: Optional list of fully_connected parameters, where
each item is the number of units in the layer. This is applied after the
LSTM cell.
activation_fn: Activation function, e.g. tf.keras.activations.relu,.
dtype: The dtype to use by the convolution, LSTM, and fully connected
layers.
name: A string representing name of the network.
"""
del input_dropout_layer_params
lstm_encoder = AttentionLSTMEncodingNetwork(
input_tensor_spec=input_tensor_spec,
preprocessing_layers=preprocessing_layers,
preprocessing_combiner=preprocessing_combiner,
conv_layer_params=conv_layer_params,
input_fc_layer_params=input_fc_layer_params,
lstm_size=lstm_size,
output_fc_layer_params=output_fc_layer_params,
activation_fn=activation_fn,
dtype=dtype,
name=name)
postprocessing_layers = tf.keras.layers.Dense(
1,
activation=None,
kernel_initializer=tf.compat.v1.initializers.random_uniform(
minval=-0.03, maxval=0.03))
super(AttentionValueRnnNetwork, self).__init__(
input_tensor_spec=input_tensor_spec,
state_spec=lstm_encoder.state_spec,
name=name)
self._lstm_encoder = lstm_encoder
self._postprocessing_layers = postprocessing_layers
def call(self, observation, step_type=None, network_state=(), training=False):
state, network_state, _ = self._lstm_encoder(
observation,
step_type=step_type,
network_state=network_state,
training=training)
value = self._postprocessing_layers(state, training=training)
return tf.squeeze(value, -1), network_state
| [
"tensorflow.tile",
"tensorflow.equal",
"tensorflow.shape",
"tf_agents.networks.actor_distribution_rnn_network.ActorDistributionRnnNetwork",
"tensorflow.reduce_sum",
"numpy.log",
"tf_agents.utils.nest_utils.flatten_up_to",
"tf_agents.networks.network.tf_inspect.getargspec",
"tf_agents.specs.tensor_sp... | [((3503, 3540), 'numpy.zeros', 'np.zeros', (['(h, w, d)'], {'dtype': 'np.float32'}), '((h, w, d), dtype=np.float32)\n', (3511, 3540), True, 'import numpy as np\n'), ((9877, 9936), 'tensorflow.nest.map_structure', 'tf.nest.map_structure', (['(lambda l: None)', 'preprocessing_layers'], {}), '(lambda l: None, preprocessing_layers)\n', (9898, 9936), True, 'import tensorflow as tf\n'), ((10008, 10070), 'tf_agents.utils.nest_utils.flatten_up_to', 'nest_utils.flatten_up_to', (['preprocessing_nest', 'observation_spec'], {}), '(preprocessing_nest, observation_spec)\n', (10032, 10070), False, 'from tf_agents.utils import nest_utils\n'), ((14190, 14226), 'tensorflow.keras.layers.Concatenate', 'tf.keras.layers.Concatenate', ([], {'axis': '(-1)'}), '(axis=-1)\n', (14217, 14226), True, 'import tensorflow as tf\n'), ((15712, 15841), 'tf_agents.networks.categorical_projection_network.CategoricalProjectionNetwork', 'categorical_projection_network.CategoricalProjectionNetwork', (['action_spec'], {'logits_init_output_factor': 'logits_init_output_factor'}), '(action_spec,\n logits_init_output_factor=logits_init_output_factor)\n', (15771, 15841), False, 'from tf_agents.networks import categorical_projection_network\n'), ((16077, 16253), 'tf_agents.networks.normal_projection_network.NormalProjectionNetwork', 'normal_projection_network.NormalProjectionNetwork', (['action_spec'], {'init_means_output_factor': 'init_means_output_factor', 'std_bias_initializer_value': 'std_bias_initializer_value'}), '(action_spec,\n init_means_output_factor=init_means_output_factor,\n std_bias_initializer_value=std_bias_initializer_value)\n', (16126, 16253), False, 'from tf_agents.networks import normal_projection_network\n'), ((2075, 2174), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['num_ch', '(3)'], {'strides': '(1)', 'padding': '"""same"""', 'kernel_initializer': '"""lecun_normal"""'}), "(num_ch, 3, strides=1, padding='same',\n kernel_initializer='lecun_normal')\n", (2097, 2174), True, 'import tensorflow as tf\n'), ((2201, 2266), 'tensorflow.keras.layers.MaxPool2D', 'tf.keras.layers.MaxPool2D', ([], {'pool_size': '(3)', 'padding': '"""same"""', 'strides': '(2)'}), "(pool_size=3, padding='same', strides=2)\n", (2226, 2266), True, 'import tensorflow as tf\n'), ((2925, 2964), 'six.moves.zip', 'zip', (['self._res_convs0', 'self._res_convs1'], {}), '(self._res_convs0, self._res_convs1)\n', (2928, 2964), False, 'from six.moves import zip\n'), ((3658, 3691), 'numpy.arange', 'np.arange', (['(0)', 'h'], {'dtype': 'np.float32'}), '(0, h, dtype=np.float32)\n', (3667, 3691), True, 'import numpy as np\n'), ((3722, 3755), 'numpy.arange', 'np.arange', (['(0)', 'w'], {'dtype': 'np.float32'}), '(0, w, dtype=np.float32)\n', (3731, 3755), True, 'import numpy as np\n'), ((3788, 3808), 'numpy.sin', 'np.sin', (['(h_grid * div)'], {}), '(h_grid * div)\n', (3794, 3808), True, 'import numpy as np\n'), ((3855, 3875), 'numpy.cos', 'np.cos', (['(h_grid * div)'], {}), '(h_grid * div)\n', (3861, 3875), True, 'import numpy as np\n'), ((3921, 3941), 'numpy.sin', 'np.sin', (['(w_grid * div)'], {}), '(w_grid * div)\n', (3927, 3941), True, 'import numpy as np\n'), ((3991, 4011), 'numpy.cos', 'np.cos', (['(w_grid * div)'], {}), '(w_grid * div)\n', (3997, 4011), True, 'import numpy as np\n'), ((4452, 4488), 'tensorflow.keras.layers.Concatenate', 'tf.keras.layers.Concatenate', ([], {'axis': '(-1)'}), '(axis=-1)\n', (4479, 4488), True, 'import tensorflow as tf\n'), ((4913, 4948), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['conv_filters'], {}), '(conv_filters)\n', (4934, 4948), True, 'import tensorflow as tf\n'), ((4962, 5017), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['conv_filters', '(1)'], {'padding': '"""same"""'}), "(conv_filters, 1, padding='same')\n", (4984, 5017), True, 'import tensorflow as tf\n'), ((5031, 5086), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['conv_filters', '(1)'], {'padding': '"""same"""'}), "(conv_filters, 1, padding='same')\n", (5053, 5086), True, 'import tensorflow as tf\n'), ((5419, 5469), 'tensorflow.tile', 'tf.tile', (['self.spatial_basis', '(batch_size, 1, 1, 1)'], {}), '(self.spatial_basis, (batch_size, 1, 1, 1))\n', (5426, 5469), True, 'import tensorflow as tf\n'), ((5491, 5567), 'tensorflow.concat', 'tf.concat', (['(input_copy[self.image_index_flat], spatial_basis_tiled)'], {'axis': '(-1)'}), '((input_copy[self.image_index_flat], spatial_basis_tiled), axis=-1)\n', (5500, 5567), True, 'import tensorflow as tf\n'), ((5828, 5887), 'tensorflow.reshape', 'tf.reshape', (['query', '(-1, 1, 1, self.n_heads, depth_per_head)'], {}), '(query, (-1, 1, 1, self.n_heads, depth_per_head))\n', (5838, 5887), True, 'import tensorflow as tf\n'), ((5902, 5960), 'tensorflow.reshape', 'tf.reshape', (['keys', '(-1, h, w, self.n_heads, depth_per_head)'], {}), '(keys, (-1, h, w, self.n_heads, depth_per_head))\n', (5912, 5960), True, 'import tensorflow as tf\n'), ((5975, 6036), 'tensorflow.reshape', 'tf.reshape', (['values', '(-1, h * w, self.n_heads, depth_per_head)'], {}), '(values, (-1, h * w, self.n_heads, depth_per_head))\n', (5985, 6036), True, 'import tensorflow as tf\n'), ((6062, 6103), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(q_heads * k_heads)'], {'axis': '(-1)'}), '(q_heads * k_heads, axis=-1)\n', (6075, 6103), True, 'import tensorflow as tf\n'), ((6299, 6395), 'tensorflow.reshape', 'tf.reshape', (['(attention_weights[:, :, :, tf.newaxis] * v_heads)', '(-1, h * w, self.conv_filters)'], {}), '(attention_weights[:, :, :, tf.newaxis] * v_heads, (-1, h * w,\n self.conv_filters))\n', (6309, 6395), True, 'import tensorflow as tf\n'), ((6449, 6489), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['weighted_features'], {'axis': '(1)'}), '(weighted_features, axis=1)\n', (6462, 6489), True, 'import tensorflow as tf\n'), ((8954, 8989), 'tensorflow.keras.layers.Lambda', 'tf.keras.layers.Lambda', (['(lambda x: x)'], {}), '(lambda x: x)\n', (8976, 8989), True, 'import tensorflow as tf\n'), ((10668, 10718), 'tensorflow.keras.utils.custom_object_scope', 'tf.keras.utils.custom_object_scope', (['custom_objects'], {}), '(custom_objects)\n', (10702, 10718), True, 'import tensorflow as tf\n'), ((13195, 13230), 'tensorflow.keras.layers.Lambda', 'tf.keras.layers.Lambda', (['(lambda x: x)'], {}), '(lambda x: x)\n', (13217, 13230), True, 'import tensorflow as tf\n'), ((14273, 14323), 'tensorflow.keras.utils.custom_object_scope', 'tf.keras.utils.custom_object_scope', (['custom_objects'], {}), '(custom_objects)\n', (14307, 14323), True, 'import tensorflow as tf\n'), ((18309, 18349), 'tf_agents.networks.network.tf_inspect.getargspec', 'network.tf_inspect.getargspec', (['self.call'], {}), '(self.call)\n', (18338, 18349), False, 'from tf_agents.networks import network\n'), ((18442, 18508), 'tf_agents.networks.network.tf_inspect.getcallargs', 'network.tf_inspect.getcallargs', (['self.call', 'inputs', '*args'], {}), '(self.call, inputs, *args, **kwargs)\n', (18472, 18508), False, 'from tf_agents.networks import network\n'), ((19491, 19548), 'tensorflow.keras.layers.Layer.__call__', 'tf.keras.layers.Layer.__call__', (['self'], {}), '(self, **normalized_kwargs)\n', (19521, 19548), True, 'import tensorflow as tf\n'), ((19563, 19744), 'tf_agents.utils.nest_utils.assert_matching_dtypes_and_inner_shapes', 'nest_utils.assert_matching_dtypes_and_inner_shapes', (['new_state', 'self.state_spec'], {'allow_extra_fields': '(True)', 'caller': 'self', 'tensors_name': '"""`new_state`"""', 'specs_name': '"""`state_spec`"""'}), "(new_state, self.\n state_spec, allow_extra_fields=True, caller=self, tensors_name=\n '`new_state`', specs_name='`state_spec`')\n", (19613, 19744), False, 'from tf_agents.utils import nest_utils\n'), ((22796, 22900), 'tensorflow.compat.v1.variance_scaling_initializer', 'tf.compat.v1.variance_scaling_initializer', ([], {'scale': '(2.0)', 'mode': '"""fan_in"""', 'distribution': '"""truncated_normal"""'}), "(scale=2.0, mode='fan_in',\n distribution='truncated_normal')\n", (22837, 22900), True, 'import tensorflow as tf\n'), ((24005, 24067), 'tf_agents.utils.nest_utils.get_outer_rank', 'nest_utils.get_outer_rank', (['observation', 'self.input_tensor_spec'], {}), '(observation, self.input_tensor_spec)\n', (24030, 24067), False, 'from tf_agents.utils import nest_utils\n'), ((30634, 30685), 'tensorflow.nest.map_structure', 'tf.nest.map_structure', (['map_proj', 'output_tensor_spec'], {}), '(map_proj, output_tensor_spec)\n', (30655, 30685), True, 'import tensorflow as tf\n'), ((30704, 30789), 'tensorflow.nest.map_structure', 'tf.nest.map_structure', (['(lambda proj_net: proj_net.output_spec)', 'projection_networks'], {}), '(lambda proj_net: proj_net.output_spec,\n projection_networks)\n', (30725, 30789), True, 'import tensorflow as tf\n'), ((31656, 31718), 'tf_agents.utils.nest_utils.get_outer_rank', 'nest_utils.get_outer_rank', (['observation', 'self.input_tensor_spec'], {}), '(observation, self.input_tensor_spec)\n', (31681, 31718), False, 'from tf_agents.utils import nest_utils\n'), ((2310, 2438), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['num_ch', '(3)'], {'strides': '(1)', 'padding': '"""same"""', 'name': "('res_%d/conv2d_0' % i)", 'kernel_initializer': '"""lecun_normal"""'}), "(num_ch, 3, strides=1, padding='same', name=\n 'res_%d/conv2d_0' % i, kernel_initializer='lecun_normal')\n", (2332, 2438), True, 'import tensorflow as tf\n'), ((2533, 2661), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['num_ch', '(3)'], {'strides': '(1)', 'padding': '"""same"""', 'name': "('res_%d/conv2d_1' % i)", 'kernel_initializer': '"""lecun_normal"""'}), "(num_ch, 3, strides=1, padding='same', name=\n 'res_%d/conv2d_1' % i, kernel_initializer='lecun_normal')\n", (2555, 2661), True, 'import tensorflow as tf\n'), ((3012, 3032), 'tensorflow.nn.relu', 'tf.nn.relu', (['conv_out'], {}), '(conv_out)\n', (3022, 3032), True, 'import tensorflow as tf\n'), ((3087, 3107), 'tensorflow.nn.relu', 'tf.nn.relu', (['conv_out'], {}), '(conv_out)\n', (3097, 3107), True, 'import tensorflow as tf\n'), ((5346, 5389), 'tensorflow.shape', 'tf.shape', (['input_copy[self.image_index_flat]'], {}), '(input_copy[self.image_index_flat])\n', (5354, 5389), True, 'import tensorflow as tf\n'), ((6219, 6261), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['attention_weights'], {'axis': '(-1)'}), '(attention_weights, axis=-1)\n', (6233, 6261), True, 'import tensorflow as tf\n'), ((14360, 14641), 'tf_agents.networks.actor_distribution_rnn_network.ActorDistributionRnnNetwork', 'actor_distribution_rnn_network.ActorDistributionRnnNetwork', (['observation_spec', 'action_spec'], {'preprocessing_layers': 'preprocessing_layers', 'preprocessing_combiner': 'preprocessing_combiner', 'input_fc_layer_params': 'actor_fc_layers', 'output_fc_layer_params': 'None', 'lstm_size': 'lstm_size'}), '(observation_spec,\n action_spec, preprocessing_layers=preprocessing_layers,\n preprocessing_combiner=preprocessing_combiner, input_fc_layer_params=\n actor_fc_layers, output_fc_layer_params=None, lstm_size=lstm_size)\n', (14418, 14641), False, 'from tf_agents.networks import actor_distribution_rnn_network\n'), ((14718, 14936), 'tf_agents.networks.value_rnn_network.ValueRnnNetwork', 'value_rnn_network.ValueRnnNetwork', (['observation_spec'], {'preprocessing_layers': 'preprocessing_layers', 'preprocessing_combiner': 'preprocessing_combiner', 'input_fc_layer_params': 'value_fc_layers', 'output_fc_layer_params': 'None'}), '(observation_spec, preprocessing_layers=\n preprocessing_layers, preprocessing_combiner=preprocessing_combiner,\n input_fc_layer_params=value_fc_layers, output_fc_layer_params=None)\n', (14751, 14936), False, 'from tf_agents.networks import value_rnn_network\n'), ((15007, 15266), 'tf_agents.networks.actor_distribution_network.ActorDistributionNetwork', 'actor_distribution_network.ActorDistributionNetwork', (['observation_spec', 'action_spec'], {'preprocessing_layers': 'preprocessing_layers', 'preprocessing_combiner': 'preprocessing_combiner', 'fc_layer_params': 'actor_fc_layers', 'activation_fn': 'tf.keras.activations.tanh'}), '(observation_spec,\n action_spec, preprocessing_layers=preprocessing_layers,\n preprocessing_combiner=preprocessing_combiner, fc_layer_params=\n actor_fc_layers, activation_fn=tf.keras.activations.tanh)\n', (15058, 15266), False, 'from tf_agents.networks import actor_distribution_network\n'), ((15333, 15550), 'tf_agents.networks.value_network.ValueNetwork', 'value_network.ValueNetwork', (['observation_spec'], {'preprocessing_layers': 'preprocessing_layers', 'preprocessing_combiner': 'preprocessing_combiner', 'fc_layer_params': 'value_fc_layers', 'activation_fn': 'tf.keras.activations.tanh'}), '(observation_spec, preprocessing_layers=\n preprocessing_layers, preprocessing_combiner=preprocessing_combiner,\n fc_layer_params=value_fc_layers, activation_fn=tf.keras.activations.tanh)\n', (15359, 15550), False, 'from tf_agents.networks import value_network\n'), ((16035, 16061), 'numpy.exp', 'np.exp', (['init_action_stddev'], {}), '(init_action_stddev)\n', (16041, 16061), True, 'import numpy as np\n'), ((18048, 18237), 'tf_agents.utils.nest_utils.assert_matching_dtypes_and_inner_shapes', 'nest_utils.assert_matching_dtypes_and_inner_shapes', (['inputs', 'self.input_tensor_spec'], {'allow_extra_fields': '(True)', 'caller': 'self', 'tensors_name': '"""`inputs`"""', 'specs_name': '"""`input_tensor_spec`"""'}), "(inputs, self.\n input_tensor_spec, allow_extra_fields=True, caller=self, tensors_name=\n '`inputs`', specs_name='`input_tensor_spec`')\n", (18098, 18237), False, 'from tf_agents.utils import nest_utils\n'), ((18909, 19098), 'tf_agents.utils.nest_utils.assert_matching_dtypes_and_inner_shapes', 'nest_utils.assert_matching_dtypes_and_inner_shapes', (['network_state', 'self.state_spec'], {'allow_extra_fields': '(True)', 'caller': 'self', 'tensors_name': '"""`network_state`"""', 'specs_name': '"""`state_spec`"""'}), "(network_state, self.\n state_spec, allow_extra_fields=True, caller=self, tensors_name=\n '`network_state`', specs_name='`state_spec`')\n", (18959, 19098), False, 'from tf_agents.utils import nest_utils\n'), ((20519, 20581), 'tf_agents.utils.nest_utils.get_outer_rank', 'nest_utils.get_outer_rank', (['observation', 'self.input_tensor_spec'], {}), '(observation, self.input_tensor_spec)\n', (20544, 20581), False, 'from tf_agents.utils import nest_utils\n'), ((20648, 20677), 'tf_agents.networks.utils.BatchSquash', 'utils.BatchSquash', (['outer_rank'], {}), '(outer_rank)\n', (20665, 20677), False, 'from tf_agents.networks import utils\n'), ((20698, 20754), 'tensorflow.nest.map_structure', 'tf.nest.map_structure', (['batch_squash.flatten', 'observation'], {}), '(batch_squash.flatten, observation)\n', (20719, 20754), True, 'import tensorflow as tf\n'), ((21607, 21660), 'tensorflow.nest.map_structure', 'tf.nest.map_structure', (['batch_squash.unflatten', 'states'], {}), '(batch_squash.unflatten, states)\n', (21628, 21660), True, 'import tensorflow as tf\n'), ((24887, 24945), 'tensorflow.equal', 'tf.equal', (['step_type', 'time_step.StepType.FIRST'], {'name': '"""mask"""'}), "(step_type, time_step.StepType.FIRST, name='mask')\n", (24895, 24945), True, 'import tensorflow as tf\n'), ((25581, 25603), 'tensorflow.squeeze', 'tf.squeeze', (['state', '[1]'], {}), '(state, [1])\n', (25591, 25603), True, 'import tensorflow as tf\n'), ((30472, 30501), 'tf_agents.specs.tensor_spec.is_discrete', 'tensor_spec.is_discrete', (['spec'], {}), '(spec)\n', (30495, 30501), False, 'from tf_agents.specs import tensor_spec\n'), ((35983, 36004), 'tensorflow.squeeze', 'tf.squeeze', (['value', '(-1)'], {}), '(value, -1)\n', (35993, 36004), True, 'import tensorflow as tf\n'), ((3563, 3604), 'numpy.arange', 'np.arange', (['(0)', 'half_d', '(2)'], {'dtype': 'np.float32'}), '(0, half_d, 2, dtype=np.float32)\n', (3572, 3604), True, 'import numpy as np\n'), ((4784, 4851), 'tensorflow.keras.layers.Reshape', 'tf.keras.layers.Reshape', (['(image_shape[0] * image_shape[1], n_heads)'], {}), '((image_shape[0] * image_shape[1], n_heads))\n', (4807, 4851), True, 'import tensorflow as tf\n'), ((4861, 4892), 'tensorflow.keras.layers.Softmax', 'tf.keras.layers.Softmax', ([], {'axis': '(1)'}), '(axis=1)\n', (4884, 4892), True, 'import tensorflow as tf\n'), ((9084, 9119), 'social_rl.multiagent_tfagents.multigrid_networks.cast_and_scale', 'multigrid_networks.cast_and_scale', ([], {}), '()\n', (9117, 9119), False, 'from social_rl.multiagent_tfagents import multigrid_networks\n'), ((9200, 9222), 'tensorflow.keras.layers.ReLU', 'tf.keras.layers.ReLU', ([], {}), '()\n', (9220, 9222), True, 'import tensorflow as tf\n'), ((9312, 9347), 'social_rl.multiagent_tfagents.multigrid_networks.cast_and_scale', 'multigrid_networks.cast_and_scale', ([], {}), '()\n', (9345, 9347), False, 'from social_rl.multiagent_tfagents import multigrid_networks\n'), ((9357, 9422), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['conv_filters', 'conv_kernel'], {'padding': '"""same"""'}), "(conv_filters, conv_kernel, padding='same')\n", (9379, 9422), True, 'import tensorflow as tf\n'), ((9432, 9454), 'tensorflow.keras.layers.ReLU', 'tf.keras.layers.ReLU', ([], {}), '()\n', (9452, 9454), True, 'import tensorflow as tf\n'), ((9578, 9622), 'social_rl.multiagent_tfagents.multigrid_networks.one_hot_layer', 'multigrid_networks.one_hot_layer', (['scalar_dim'], {}), '(scalar_dim)\n', (9610, 9622), False, 'from social_rl.multiagent_tfagents import multigrid_networks\n'), ((9633, 9665), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['scalar_fc'], {}), '(scalar_fc)\n', (9654, 9665), True, 'import tensorflow as tf\n'), ((9781, 9816), 'social_rl.multiagent_tfagents.multigrid_networks.cast_and_scale', 'multigrid_networks.cast_and_scale', ([], {}), '()\n', (9814, 9816), False, 'from social_rl.multiagent_tfagents import multigrid_networks\n'), ((9818, 9850), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['scalar_fc'], {}), '(scalar_fc)\n', (9839, 9850), True, 'import tensorflow as tf\n'), ((13325, 13360), 'social_rl.multiagent_tfagents.multigrid_networks.cast_and_scale', 'multigrid_networks.cast_and_scale', ([], {}), '()\n', (13358, 13360), False, 'from social_rl.multiagent_tfagents import multigrid_networks\n'), ((13441, 13463), 'tensorflow.keras.layers.ReLU', 'tf.keras.layers.ReLU', ([], {}), '()\n', (13461, 13463), True, 'import tensorflow as tf\n'), ((13473, 13498), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (13496, 13498), True, 'import tensorflow as tf\n'), ((13587, 13622), 'social_rl.multiagent_tfagents.multigrid_networks.cast_and_scale', 'multigrid_networks.cast_and_scale', ([], {}), '()\n', (13620, 13622), False, 'from social_rl.multiagent_tfagents import multigrid_networks\n'), ((13632, 13697), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['conv_filters', 'conv_kernel'], {'padding': '"""same"""'}), "(conv_filters, conv_kernel, padding='same')\n", (13654, 13697), True, 'import tensorflow as tf\n'), ((13707, 13729), 'tensorflow.keras.layers.ReLU', 'tf.keras.layers.ReLU', ([], {}), '()\n', (13727, 13729), True, 'import tensorflow as tf\n'), ((13739, 13764), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (13762, 13764), True, 'import tensorflow as tf\n'), ((13887, 13931), 'social_rl.multiagent_tfagents.multigrid_networks.one_hot_layer', 'multigrid_networks.one_hot_layer', (['scalar_dim'], {}), '(scalar_dim)\n', (13919, 13931), False, 'from social_rl.multiagent_tfagents import multigrid_networks\n'), ((13942, 13974), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['scalar_fc'], {}), '(scalar_fc)\n', (13963, 13974), True, 'import tensorflow as tf\n'), ((14090, 14125), 'social_rl.multiagent_tfagents.multigrid_networks.cast_and_scale', 'multigrid_networks.cast_and_scale', ([], {}), '()\n', (14123, 14125), False, 'from social_rl.multiagent_tfagents import multigrid_networks\n'), ((14127, 14159), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['scalar_fc'], {}), '(scalar_fc)\n', (14148, 14159), True, 'import tensorflow as tf\n'), ((20904, 20967), 'tf_agents.utils.nest_utils.flatten_up_to', 'nest_utils.flatten_up_to', (['self._preprocessing_nest', 'observation'], {}), '(self._preprocessing_nest, observation)\n', (20928, 20967), False, 'from tf_agents.utils import nest_utils\n'), ((25380, 25407), 'tensorflow.nest.flatten', 'tf.nest.flatten', (['output[1:]'], {}), '(output[1:])\n', (25395, 25407), True, 'import tensorflow as tf\n'), ((35322, 35389), 'tensorflow.compat.v1.initializers.random_uniform', 'tf.compat.v1.initializers.random_uniform', ([], {'minval': '(-0.03)', 'maxval': '(0.03)'}), '(minval=-0.03, maxval=0.03)\n', (35362, 35389), True, 'import tensorflow as tf\n'), ((3608, 3621), 'numpy.log', 'np.log', (['(100.0)'], {}), '(100.0)\n', (3614, 3621), True, 'import numpy as np\n'), ((24415, 24435), 'tensorflow.expand_dims', 'tf.expand_dims', (['t', '(1)'], {}), '(t, 1)\n', (24429, 24435), True, 'import tensorflow as tf\n'), ((24542, 24562), 'tensorflow.expand_dims', 'tf.expand_dims', (['t', '(1)'], {}), '(t, 1)\n', (24556, 24562), True, 'import tensorflow as tf\n')] |
import numpy as np
__all__ = [
'Task'
]
class Task(object):
def __init__(self, ndim=None, seed=None):
self._ndim = ndim
self.rng = np.random.RandomState(seed)
def set_seed(self, seed=None):
self.rng = np.random.RandomState(seed)
def name(self):
return self.__class__.__name__
def ndim(self):
if self._ndim is None:
self._ndim = self.ground_truth_generator()(1).shape[0]
return self._ndim
def search_space(self):
raise NotImplementedError()
def parameters_names(self):
return [
'param_%d' % (i, )
for i in range(len(self.search_space()))
]
def solution(self):
raise NotImplementedError
def example_parameters(self):
state = np.random.get_state()
np.random.seed(1234444)
ss = np.array(self.search_space())
u = np.random.uniform(size=len(ss))
result = u * (ss[:, 1] - ss[:, 0]) + ss[:, 0]
np.random.set_state(state)
return result
def ground_truth_generator(self):
raise NotImplementedError()
def generator(self, params):
raise NotImplementedError()
def transform(self, data0, params):
"""
This function transforms sample `data0` from the ground-truth generator
into sample from a generator with parameters `params`.
Useful only for synthetic examples.
"""
raise NotImplementedError()
def is_synthetic(self):
try:
self.transform(None, None)
except NotImplementedError:
return False
except:
return True
def model_parameters(self):
return dict()
def models(self, seed=None):
from .utils import nn_models, gbdt_models
from ..meta import apply_with_kwargs
parameters = self.model_parameters()
parameters['ndim'] = self.ndim()
if seed is not None:
parameters['seed'] = seed
parameters['random_state'] = seed
ms = dict()
ms.update(
apply_with_kwargs(nn_models, **parameters)
)
ms.update(
apply_with_kwargs(gbdt_models, **parameters)
)
return ms
def optimizers(self):
from skopt.learning import GaussianProcessRegressor
from skopt.learning.gaussian_process.kernels import Matern
from ..opt import bayesian_optimization, random_search
return {
'BO' : bayesian_optimization(
base_estimator=GaussianProcessRegressor(
kernel=Matern(length_scale=1, length_scale_bounds=(1e-3, 1e+3)),
alpha=1e-4
),
n_initial_points=5
),
'RS' : random_search()
} | [
"numpy.random.get_state",
"numpy.random.set_state",
"skopt.learning.gaussian_process.kernels.Matern",
"numpy.random.seed",
"numpy.random.RandomState"
] | [((145, 172), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (166, 172), True, 'import numpy as np\n'), ((222, 249), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (243, 249), True, 'import numpy as np\n'), ((713, 734), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (732, 734), True, 'import numpy as np\n'), ((739, 762), 'numpy.random.seed', 'np.random.seed', (['(1234444)'], {}), '(1234444)\n', (753, 762), True, 'import numpy as np\n'), ((898, 924), 'numpy.random.set_state', 'np.random.set_state', (['state'], {}), '(state)\n', (917, 924), True, 'import numpy as np\n'), ((2330, 2389), 'skopt.learning.gaussian_process.kernels.Matern', 'Matern', ([], {'length_scale': '(1)', 'length_scale_bounds': '(0.001, 1000.0)'}), '(length_scale=1, length_scale_bounds=(0.001, 1000.0))\n', (2336, 2389), False, 'from skopt.learning.gaussian_process.kernels import Matern\n')] |
import pandas as pd
import gensim
import multiprocessing
import numpy as np
from utils import pickle_obj, semantic_search_author, semantic_search_word, get_related_authors, get_related_words, translate_dict
from sklearn.manifold import TSNE
from bokeh.plotting import figure, show, output_notebook, output_file, save
from bokeh.models import HoverTool, ColumnDataSource, value
from sklearn.metrics.pairwise import cosine_similarity
from scipy.sparse.linalg import svds
df_kth = pd.read_csv("assets/dataframes/all_authors_df_2004")
df_su = pd.read_csv("assets/dataframes/suDf")
df_uppsala = pd.read_csv("assets/dataframes/uppsalaDf")
df_sodertorn = pd.read_csv("assets/dataframes/sodertornDf")
df_kth = df_kth.rename(columns={"KTH_id": "Auth_id", "KTH_name": "Auth_name"})
def get_nlp_data(df):
return df.Abstracts.values, df.Doc_id.values, df.Auth_id.values, df.Auth_name.values
text_doc_kth, doc_id_kth, auth_kth, name_kth = get_nlp_data(df_kth)
text_doc_su, doc_id_su, auth_su, name_su = get_nlp_data(df_su)
text_doc_uppsala, doc_id_uppsala, auth_uppsala, name_uppsala = get_nlp_data(df_uppsala)
text_doc_sodertorn, doc_id_sodertorn, auth_sodertorn, name_sodertorn = get_nlp_data(df_sodertorn)
TEXT = np.concatenate([text_doc_kth, text_doc_su, text_doc_uppsala, text_doc_sodertorn])
DOCID = np.concatenate([doc_id_kth, doc_id_su, doc_id_uppsala, doc_id_sodertorn]).astype(str)
AUTHID = np.concatenate([auth_kth, auth_su, auth_uppsala, auth_sodertorn ])
NAME = np.concatenate([name_kth, name_su, name_uppsala, name_sodertorn ])
df = pd.DataFrame(data=list(zip(TEXT, AUTHID, DOCID, NAME)), columns=["Abstracts", "Auth_id", "Doc_id", "Auth_name"])
def read_corpus(abstracts, doc):
for d, w in zip(abstracts, doc):
yield gensim.models.doc2vec.TaggedDocument(gensim.utils.simple_preprocess(d), [str(w)])
train_corpus = list(read_corpus(TEXT, DOCID))
from gensim.test.utils import get_tmpfile
from gensim.models.callbacks import CallbackAny2Vec
class EpochSaver(CallbackAny2Vec):
'''Callback to save model after each epoch.'''
def __init__(self, path_prefix):
self.path_prefix = path_prefix
self.epoch = 0
def on_epoch_end(self, model):
output_path = get_tmpfile('{}_epoch{}.model'.format(self.path_prefix, self.epoch))
model.save(output_path)
self.epoch += 1
class EpochLogger(CallbackAny2Vec):
'''Callback to log information about training'''
def __init__(self):
self.epoch = 0
def on_epoch_begin(self, model):
print("Epoch #{} start".format(self.epoch))
def on_epoch_end(self, model):
print("Epoch #{} end".format(self.epoch))
self.epoch += 1
epoch_logger = EpochLogger()
cores = multiprocessing.cpu_count()
model = gensim.models.doc2vec.Doc2Vec(vector_size=300, min_count=1, dm=0,
sample=1e-3, negative=15,hs=0,dbow_words=1,
max_vocab_size=None,workers=cores,window=10,
callbacks=[epoch_logger])
model.build_vocab(train_corpus)
import time
start = time.time()
model.train(train_corpus, total_examples=model.corpus_count, epochs=1000,report_delay=1)
end = time.time()
print(end - start)
from gensim.test.utils import get_tmpfile
fname = get_tmpfile("doc2vec_more_school_1000_onlyDocId")
model.save(fname)
| [
"pandas.read_csv",
"gensim.test.utils.get_tmpfile",
"multiprocessing.cpu_count",
"numpy.concatenate",
"gensim.models.doc2vec.Doc2Vec",
"time.time",
"gensim.utils.simple_preprocess"
] | [((479, 531), 'pandas.read_csv', 'pd.read_csv', (['"""assets/dataframes/all_authors_df_2004"""'], {}), "('assets/dataframes/all_authors_df_2004')\n", (490, 531), True, 'import pandas as pd\n'), ((540, 577), 'pandas.read_csv', 'pd.read_csv', (['"""assets/dataframes/suDf"""'], {}), "('assets/dataframes/suDf')\n", (551, 577), True, 'import pandas as pd\n'), ((591, 633), 'pandas.read_csv', 'pd.read_csv', (['"""assets/dataframes/uppsalaDf"""'], {}), "('assets/dataframes/uppsalaDf')\n", (602, 633), True, 'import pandas as pd\n'), ((649, 693), 'pandas.read_csv', 'pd.read_csv', (['"""assets/dataframes/sodertornDf"""'], {}), "('assets/dataframes/sodertornDf')\n", (660, 693), True, 'import pandas as pd\n'), ((1212, 1297), 'numpy.concatenate', 'np.concatenate', (['[text_doc_kth, text_doc_su, text_doc_uppsala, text_doc_sodertorn]'], {}), '([text_doc_kth, text_doc_su, text_doc_uppsala,\n text_doc_sodertorn])\n', (1226, 1297), True, 'import numpy as np\n'), ((1397, 1462), 'numpy.concatenate', 'np.concatenate', (['[auth_kth, auth_su, auth_uppsala, auth_sodertorn]'], {}), '([auth_kth, auth_su, auth_uppsala, auth_sodertorn])\n', (1411, 1462), True, 'import numpy as np\n'), ((1471, 1536), 'numpy.concatenate', 'np.concatenate', (['[name_kth, name_su, name_uppsala, name_sodertorn]'], {}), '([name_kth, name_su, name_uppsala, name_sodertorn])\n', (1485, 1536), True, 'import numpy as np\n'), ((2743, 2770), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (2768, 2770), False, 'import multiprocessing\n'), ((2780, 2975), 'gensim.models.doc2vec.Doc2Vec', 'gensim.models.doc2vec.Doc2Vec', ([], {'vector_size': '(300)', 'min_count': '(1)', 'dm': '(0)', 'sample': '(0.001)', 'negative': '(15)', 'hs': '(0)', 'dbow_words': '(1)', 'max_vocab_size': 'None', 'workers': 'cores', 'window': '(10)', 'callbacks': '[epoch_logger]'}), '(vector_size=300, min_count=1, dm=0, sample=\n 0.001, negative=15, hs=0, dbow_words=1, max_vocab_size=None, workers=\n cores, window=10, callbacks=[epoch_logger])\n', (2809, 2975), False, 'import gensim\n'), ((3135, 3146), 'time.time', 'time.time', ([], {}), '()\n', (3144, 3146), False, 'import time\n'), ((3242, 3253), 'time.time', 'time.time', ([], {}), '()\n', (3251, 3253), False, 'import time\n'), ((3326, 3375), 'gensim.test.utils.get_tmpfile', 'get_tmpfile', (['"""doc2vec_more_school_1000_onlyDocId"""'], {}), "('doc2vec_more_school_1000_onlyDocId')\n", (3337, 3375), False, 'from gensim.test.utils import get_tmpfile\n'), ((1302, 1375), 'numpy.concatenate', 'np.concatenate', (['[doc_id_kth, doc_id_su, doc_id_uppsala, doc_id_sodertorn]'], {}), '([doc_id_kth, doc_id_su, doc_id_uppsala, doc_id_sodertorn])\n', (1316, 1375), True, 'import numpy as np\n'), ((1779, 1812), 'gensim.utils.simple_preprocess', 'gensim.utils.simple_preprocess', (['d'], {}), '(d)\n', (1809, 1812), False, 'import gensim\n')] |
from unittest import TestCase
from numpy import array, ndarray
from numpy.testing import assert_array_equal
from trigger import accel_value, trigger_time
class TriggerTimeTest(TestCase):
def test_estimates_when_function_exceeds(self):
function = 10
t = array([1599574034])
trig_level = 100
expected = ndarray([])
actual = trigger_time(function, t, trig_level)
assert_array_equal(expected, actual)
class TestAccelValue(TestCase):
def test_it_provides_the_right_value(self):
"""
[x,y,z,accel_value]
x,y and z values were randomly generated
accel_value = ((x**2 + y**2 + z**2)**0.5)
"""
testCases = [
[1, 2, 2, 3],
[2, 4, 4, 6],
[2, -1, 2, 3],
[-4, -4, 2, 6],
[4, -2, 4, 6],
[2, 2, -1, 3],
[-5.444444444, -10.33333333, -4.111111111, 12.38228524],
[6.555555556, 7.111111111, -5.111111111, 10.93922605],
[7.888888889, -5.222222222, 11, 14.50883086],
[7.111111111, 2.222222222, -8.111111111, 11.01345978],
[3.333333333, -6.666666667, 2.777777778, 7.954345035],
[-7.222222222, -8.666666667, -7.333333333, 13.45545922],
[2.555555556, 8.333333333, -3.444444444, 9.372273266],
[-10.88888889, 5.555555556, -10.77777778, 16.29701177],
[1.777777778, 2.888888889, 2.111111111, 3.995367688],
[-1.333333333, 3, -8.333333333, 8.956685895],
[-8.111111111, -10.55555556, -9.111111111, 16.13140484],
[9, -11, 10.66666667, 17.77013725],
[9.777777778, -5.555555556, 3, 11.63912092],
[-8.555555556, 5.777777778, -7.555555556, 12.79322737],
[1.222222222, 1.111111111, -3.777777778, 4.123105626],
[1.111111111, -11, 9.666666667, 14.68601417],
[-8.222222222, 3.222222222, 4.555555556, 9.936837562],
[9.555555556, -9.888888889, 7.555555556, 15.69028952],
[-10.44444444, -2.666666667, 0.222222222, 10.7817862],
[-3.666666667, -1.444444444, 11.11111111, 11.78930254],
[8.222222222, 0.888888889, 3.333333333, 8.916623399],
[1.555555556, -4.333333333, -7.888888889, 9.134117295],
[-0.555555556, 8.444444444, 7.111111111, 11.05374078],
[-7.222222222, 8, -10.33333333, 14.93111756],
[7.222222222, -3.222222222, 7.111111111, 10.63537076],
[-1.666666667, -2, 5.333333333, 5.934831272],
[-2.555555556, 5.111111111, -4.777777778, 7.448589228],
[3.111111111, 3.444444444, -9.666666667, 10.72322966],
[-9, 4.666666667, -5.444444444, 11.5073782],
[-0.666666667, 8.222222222, 10.11111111, 13.04928928],
[-8.111111111, -4.222222222, -4.888888889, 10.36911368],
[-2.555555556, 0.111111111, -9, 9.356452847],
[10, -1.222222222, -2.555555556, 10.39349274],
]
for test in testCases:
assert_array_equal(
round(accel_value(test[0], test[1], test[2]), 4), round(test[3], 4)
)
| [
"numpy.array",
"numpy.ndarray",
"trigger.trigger_time",
"trigger.accel_value",
"numpy.testing.assert_array_equal"
] | [((275, 294), 'numpy.array', 'array', (['[1599574034]'], {}), '([1599574034])\n', (280, 294), False, 'from numpy import array, ndarray\n'), ((339, 350), 'numpy.ndarray', 'ndarray', (['[]'], {}), '([])\n', (346, 350), False, 'from numpy import array, ndarray\n'), ((368, 405), 'trigger.trigger_time', 'trigger_time', (['function', 't', 'trig_level'], {}), '(function, t, trig_level)\n', (380, 405), False, 'from trigger import accel_value, trigger_time\n'), ((414, 450), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['expected', 'actual'], {}), '(expected, actual)\n', (432, 450), False, 'from numpy.testing import assert_array_equal\n'), ((3085, 3123), 'trigger.accel_value', 'accel_value', (['test[0]', 'test[1]', 'test[2]'], {}), '(test[0], test[1], test[2])\n', (3096, 3123), False, 'from trigger import accel_value, trigger_time\n')] |
import json
import maya
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
days = {
0:'monday',
1:'tuesday',
2:'wednesday',
3:'thursday',
4:'friday',
5:'saturday',
6:'sunday'
}
NEWLINE = '\n'
SPACE = ' '
COLON = ':'
def day(t):
# where t means unformatted time
maya_datetime = maya.parse(t).datetime()
weekday = maya_datetime.weekday()
try:
return days[weekday]
except KeyError:
return None
def day_i(t):
# where t means unformatted time
maya_datetime = maya.parse(t).datetime()
weekday = maya_datetime.weekday()
return weekday
def hour(t):
maya_datetime = maya.parse(t).datetime()
hour = maya_datetime.hour
return hour
def date(t):
maya_datetime = maya.parse(t).datetime()
date = maya_datetime.date()
return date
def empty_name_data():
'''
{
'monday': {
'commits':0,
'times':[]
},
'tuesday': {
...
}
'''
data = {}
for key in days:
day = days[key]
data[day] = {
'commits':0,
'times':[]
}
return data
def to_json_file(data, filename):
with open(filename, 'w', encoding='utf-8') as f:
json.dump(data, f, ensure_ascii=False, indent=4)
def parse_log(file_name):
'''
using command: git shortlog --format=format:%cI > log.txt
produces data in the format:
{
'name':[
'TIMEZONEDATA',
'TIMEZONEDATA'
],
'name2':[
]
}
'''
raw_data = {}
current_name = None
with open(file_name, encoding='utf-8') as f:
source = f.read()
lines = source.split(NEWLINE)
for line in lines:
if line:
if line[0] != SPACE:
name = SPACE.join(line.strip(COLON).split(SPACE)[:-1])
current_name = name
raw_data[current_name] = []
elif line[0] == SPACE:
raw_data[current_name].append(line.strip())
else:
current_name = None
return raw_data
def get_clean_data(logfilename):
'''
produces data in the format:
"<NAME>": {
"monday": {
"commits": 19,
"times": [
"2019-03-11@06:42:39",
"2019-03-25@02:59:00"
]
},
"tuesday": {
...
}
'''
log_data = parse_log(logfilename)
clean_data = {}
for name in log_data:
clean_data[name] = empty_name_data()
unformatted_times = log_data[name]
for t in unformatted_times:
dayoftheweek = day(t)
clean_data[name][dayoftheweek]['commits'] += 1
#commit_time = str(maya_datetime.time())
#commit_date = str(maya_datetime.date())
clean_data[name][dayoftheweek]['times'].append(t)
return clean_data
def merge_names(clean_data, namelist, default=None):
default_name = default
data = empty_name_data()
for name in namelist:
for day in days:
wday = days[day]
data[wday]['commits'] += clean_data[name][wday]['commits']
data[wday]['times'] += clean_data[name][wday]['times']
for name in namelist:
if name == default:
continue
del clean_data[name]
clean_data[default_name] = data
def plot_user(data, name, alpha=0.1):
ax = plt.gca()
# for key in clean_data:
USER = name
mydata = data[USER]
arj_day = []
arj_time = []
arj_date = []
arj_hour = []
for d in mydata:
times = mydata[d]['times']
for t in times:
arj_day.append(day_i(t))
arj_hour.append(int(hour(t)))
arj_date.append(date(t))
#print(arj_date, arj_days, arj_time)
df = pd.DataFrame({
'day':arj_day,
'hour':arj_hour,
'date':arj_date
})
# print(df)
plt.scatter(df['day'], df['hour'], alpha=alpha, s=100)
plt.title(USER)
plt.xticks(np.arange(7), [days[d] for d in days])
plt.xlabel("Days")
plt.ylabel("Hours/24");
plt.show()
def save_user_plot(data, name, alpha=0.01):
ax = plt.gca()
# for key in clean_data:
USER = name
mydata = data[USER]
arj_day = []
arj_time = []
arj_date = []
arj_hour = []
for d in mydata:
times = mydata[d]['times']
for t in times:
arj_day.append(day_i(t))
arj_hour.append(int(hour(t)))
arj_date.append(date(t))
#print(arj_date, arj_days, arj_time)
df = pd.DataFrame({
'day':arj_day,
'hour':arj_hour,
'date':arj_date
})
# print(df)
plt.scatter(df['day'], df['hour'], alpha=alpha, s=100)
plt.title(USER)
plt.xticks(np.arange(7), [days[d] for d in days])
plt.xlabel("Days")
plt.ylabel("Hours/24")
plt.savefig('pics/{}.png'.format(USER.replace(' ', '_')))
plt.close()
def save_users_plot(data, names, alpha=0.1):
for name in names:
save_user_plot(data, name, alpha=alpha)
def top_committers(filepath, number, return_=False, return_names=False):
committers = []
raw_data = parse_log(filepath)
for name in raw_data:
commits = len(raw_data[name])
committers.append([name, commits])
sorted_committers = sorted(committers, key=lambda x: x[1], reverse=True) # [[name, commits], [name, commits]]
if return_:
return sorted_committers[:number]
elif return_names:
return [c[0] for c in sorted_committers[:number]]
else:
print('Top {} committers for {}:'.format(number, filepath))
for committer in target_val:
print('{}{:>7}: {}'.format(' '*4, committer[1], committer[0])) | [
"matplotlib.pyplot.ylabel",
"numpy.arange",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.close",
"maya.parse",
"matplotlib.pyplot.scatter",
"pandas.DataFrame",
"matplotlib.pyplot.title",
"json.dump",
"matplotlib.pyplot.show"
] | [((3456, 3465), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3463, 3465), True, 'import matplotlib.pyplot as plt\n'), ((3893, 3959), 'pandas.DataFrame', 'pd.DataFrame', (["{'day': arj_day, 'hour': arj_hour, 'date': arj_date}"], {}), "({'day': arj_day, 'hour': arj_hour, 'date': arj_date})\n", (3905, 3959), True, 'import pandas as pd\n'), ((4009, 4063), 'matplotlib.pyplot.scatter', 'plt.scatter', (["df['day']", "df['hour']"], {'alpha': 'alpha', 's': '(100)'}), "(df['day'], df['hour'], alpha=alpha, s=100)\n", (4020, 4063), True, 'import matplotlib.pyplot as plt\n'), ((4068, 4083), 'matplotlib.pyplot.title', 'plt.title', (['USER'], {}), '(USER)\n', (4077, 4083), True, 'import matplotlib.pyplot as plt\n'), ((4142, 4160), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Days"""'], {}), "('Days')\n", (4152, 4160), True, 'import matplotlib.pyplot as plt\n'), ((4165, 4187), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Hours/24"""'], {}), "('Hours/24')\n", (4175, 4187), True, 'import matplotlib.pyplot as plt\n'), ((4193, 4203), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4201, 4203), True, 'import matplotlib.pyplot as plt\n'), ((4262, 4271), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4269, 4271), True, 'import matplotlib.pyplot as plt\n'), ((4759, 4825), 'pandas.DataFrame', 'pd.DataFrame', (["{'day': arj_day, 'hour': arj_hour, 'date': arj_date}"], {}), "({'day': arj_day, 'hour': arj_hour, 'date': arj_date})\n", (4771, 4825), True, 'import pandas as pd\n'), ((4899, 4953), 'matplotlib.pyplot.scatter', 'plt.scatter', (["df['day']", "df['hour']"], {'alpha': 'alpha', 's': '(100)'}), "(df['day'], df['hour'], alpha=alpha, s=100)\n", (4910, 4953), True, 'import matplotlib.pyplot as plt\n'), ((4962, 4977), 'matplotlib.pyplot.title', 'plt.title', (['USER'], {}), '(USER)\n', (4971, 4977), True, 'import matplotlib.pyplot as plt\n'), ((5044, 5062), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Days"""'], {}), "('Days')\n", (5054, 5062), True, 'import matplotlib.pyplot as plt\n'), ((5071, 5093), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Hours/24"""'], {}), "('Hours/24')\n", (5081, 5093), True, 'import matplotlib.pyplot as plt\n'), ((5168, 5179), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5177, 5179), True, 'import matplotlib.pyplot as plt\n'), ((1272, 1320), 'json.dump', 'json.dump', (['data', 'f'], {'ensure_ascii': '(False)', 'indent': '(4)'}), '(data, f, ensure_ascii=False, indent=4)\n', (1281, 1320), False, 'import json\n'), ((4099, 4111), 'numpy.arange', 'np.arange', (['(7)'], {}), '(7)\n', (4108, 4111), True, 'import numpy as np\n'), ((4997, 5009), 'numpy.arange', 'np.arange', (['(7)'], {}), '(7)\n', (5006, 5009), True, 'import numpy as np\n'), ((339, 352), 'maya.parse', 'maya.parse', (['t'], {}), '(t)\n', (349, 352), False, 'import maya\n'), ((554, 567), 'maya.parse', 'maya.parse', (['t'], {}), '(t)\n', (564, 567), False, 'import maya\n'), ((673, 686), 'maya.parse', 'maya.parse', (['t'], {}), '(t)\n', (683, 686), False, 'import maya\n'), ((779, 792), 'maya.parse', 'maya.parse', (['t'], {}), '(t)\n', (789, 792), False, 'import maya\n')] |
import numpy as np
import sys
from flare import gp, env, struc, kernels
from flare.modules import analyze_gp, qe_parsers
from mc_kernels import mc_simple
from scipy.optimize import minimize
import time
import datetime
def sweep(txt_name, data_file, cell, training_snaps, cutoffs, kernel,
kernel_grad, initial_hyps, par):
# set up text file
txt = update_init()
write_file(txt_name, txt)
# define md_trajectory object
md_trajectory = analyze_gp.MDAnalysis(data_file, cell)
# set up training run
hyps = initial_hyps
for cutoff in cutoffs:
gp_test = \
analyze_gp.get_gp_from_snaps(md_trajectory, training_snaps,
kernel, kernel_grad, hyps, cutoff,
par=par)
gp_test.algo = 'BFGS'
gp_test.hyps = hyps
# train gp model
time0 = time.time()
gp_test.train(monitor=True)
time1 = time.time()
training_time = time1 - time0
likelihood = gp_test.like
hyps = gp_test.hyps
txt += """\n
cutoff: {}
optimized hyperparameters:
""".format(cutoff)
txt += str(hyps)
txt += """
likelihood: %.5f
training time: %.2f s""" % (likelihood, training_time)
write_file(txt_name, txt)
def sweep_and_test(txt_name, data_file, cell, training_snaps, cutoffs, kernel,
kernel_grad, initial_hyps, par, test_snaps):
# set up text file
txt = update_init()
write_file(txt_name, txt)
# define md_trajectory object
md_trajectory = analyze_gp.MDAnalysis(data_file, cell)
# set up training run
hyps = initial_hyps
for cutoff in cutoffs:
gp_test = \
analyze_gp.get_gp_from_snaps(md_trajectory, training_snaps,
kernel, kernel_grad, hyps, cutoff,
par=par)
gp_test.algo = 'BFGS'
gp_test.hyps = hyps
# train gp model
time0 = time.time()
gp_test.train(monitor=True)
time1 = time.time()
training_time = time1 - time0
likelihood = gp_test.like
hyps = gp_test.hyps
txt += """\n
cutoff: {}
optimized hyperparameters:
""".format(cutoff)
txt += str(hyps)
txt += """
likelihood: %.5f
training time: %.2f s""" % (likelihood, training_time)
write_file(txt_name, txt)
# test model
all_predictions, all_variances, all_forces = \
analyze_gp.predict_forces_on_test_set(gp_test, md_trajectory,
test_snaps, cutoff)
training_set_size = len(training_snaps) * 32 * 3
avg_force = np.mean(np.abs(all_forces))
max_force = np.max(np.abs(all_forces))
mae = np.mean(np.abs(all_predictions - all_forces))
max_err = np.max(np.abs(all_predictions - all_forces))
avg_std = np.mean(np.sqrt(all_variances))
max_std = np.max(np.sqrt(all_variances))
txt += """\n
training_set_size = %i
average force = %.4f
max force = %.4f
mean absolute error = %.4f
max error = %.4f
average std = %.4f
max std = %.4f
\n""" % (training_set_size, avg_force, max_force, mae, max_err, avg_std,
max_std)
write_file(txt_name, txt)
def write_file(fname, text):
with open(fname, 'w') as fin:
fin.write(text)
def update_init():
init_text = """Cutoff test.
Date and time: %s.
Author: <NAME>.
""" % str(datetime.datetime.now())
return init_text
def update_fin():
fin_text = """
-------------------------------------------------------------------------------
JOB DONE.
-------------------------------------------------------------------------------
"""
return fin_text
| [
"numpy.abs",
"numpy.sqrt",
"datetime.datetime.now",
"flare.modules.analyze_gp.MDAnalysis",
"flare.modules.analyze_gp.get_gp_from_snaps",
"flare.modules.analyze_gp.predict_forces_on_test_set",
"time.time"
] | [((466, 504), 'flare.modules.analyze_gp.MDAnalysis', 'analyze_gp.MDAnalysis', (['data_file', 'cell'], {}), '(data_file, cell)\n', (487, 504), False, 'from flare.modules import analyze_gp, qe_parsers\n'), ((1611, 1649), 'flare.modules.analyze_gp.MDAnalysis', 'analyze_gp.MDAnalysis', (['data_file', 'cell'], {}), '(data_file, cell)\n', (1632, 1649), False, 'from flare.modules import analyze_gp, qe_parsers\n'), ((616, 723), 'flare.modules.analyze_gp.get_gp_from_snaps', 'analyze_gp.get_gp_from_snaps', (['md_trajectory', 'training_snaps', 'kernel', 'kernel_grad', 'hyps', 'cutoff'], {'par': 'par'}), '(md_trajectory, training_snaps, kernel,\n kernel_grad, hyps, cutoff, par=par)\n', (644, 723), False, 'from flare.modules import analyze_gp, qe_parsers\n'), ((902, 913), 'time.time', 'time.time', ([], {}), '()\n', (911, 913), False, 'import time\n'), ((966, 977), 'time.time', 'time.time', ([], {}), '()\n', (975, 977), False, 'import time\n'), ((1761, 1868), 'flare.modules.analyze_gp.get_gp_from_snaps', 'analyze_gp.get_gp_from_snaps', (['md_trajectory', 'training_snaps', 'kernel', 'kernel_grad', 'hyps', 'cutoff'], {'par': 'par'}), '(md_trajectory, training_snaps, kernel,\n kernel_grad, hyps, cutoff, par=par)\n', (1789, 1868), False, 'from flare.modules import analyze_gp, qe_parsers\n'), ((2048, 2059), 'time.time', 'time.time', ([], {}), '()\n', (2057, 2059), False, 'import time\n'), ((2112, 2123), 'time.time', 'time.time', ([], {}), '()\n', (2121, 2123), False, 'import time\n'), ((2565, 2650), 'flare.modules.analyze_gp.predict_forces_on_test_set', 'analyze_gp.predict_forces_on_test_set', (['gp_test', 'md_trajectory', 'test_snaps', 'cutoff'], {}), '(gp_test, md_trajectory, test_snaps,\n cutoff)\n', (2602, 2650), False, 'from flare.modules import analyze_gp, qe_parsers\n'), ((2783, 2801), 'numpy.abs', 'np.abs', (['all_forces'], {}), '(all_forces)\n', (2789, 2801), True, 'import numpy as np\n'), ((2830, 2848), 'numpy.abs', 'np.abs', (['all_forces'], {}), '(all_forces)\n', (2836, 2848), True, 'import numpy as np\n'), ((2872, 2908), 'numpy.abs', 'np.abs', (['(all_predictions - all_forces)'], {}), '(all_predictions - all_forces)\n', (2878, 2908), True, 'import numpy as np\n'), ((2935, 2971), 'numpy.abs', 'np.abs', (['(all_predictions - all_forces)'], {}), '(all_predictions - all_forces)\n', (2941, 2971), True, 'import numpy as np\n'), ((2999, 3021), 'numpy.sqrt', 'np.sqrt', (['all_variances'], {}), '(all_variances)\n', (3006, 3021), True, 'import numpy as np\n'), ((3048, 3070), 'numpy.sqrt', 'np.sqrt', (['all_variances'], {}), '(all_variances)\n', (3055, 3070), True, 'import numpy as np\n'), ((3546, 3569), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3567, 3569), False, 'import datetime\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 29 16:10:21 2018
@author: michelcassard
"""
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt2
import pandas as pd
from pandas import datetime
import math, time
import itertools
from sklearn import preprocessing
from sklearn.metrics import mean_squared_error
from math import sqrt
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.recurrent import LSTM
from keras.models import load_model
import keras
import h5py
import os
from statistics import mean
from keras import backend as K
from keras.layers.convolutional import Convolution1D, MaxPooling1D
from keras.layers.core import Flatten
from mosek.fusion import *
import pylab
import random
seq_len = 22
shape = [seq_len, 9, 1]
neurons = [256, 256, 32, 1]
dropout = 0.3
decay = 0.5
epochs = 90
#os.chdir("/Users/youssefberrada/Dropbox (MIT)/15.961 Independant Study/Data")
os.chdir("/Users/michelcassard/Dropbox (MIT)/15.960 Independant Study/Data")
file = 'FX-5.xlsx'
# Load spreadsheet
xl = pd.ExcelFile(file)
def get_stock_data(stock_name, ma=[]):
"""
Return a dataframe of that stock and normalize all the values.
(Optional: create moving average)
"""
df = xl.parse(stock_name)
df.drop(['VOLUME'], 1, inplace=True)
df.set_index('Date', inplace=True)
# Renaming all the columns so that we can use the old version code
df.rename(columns={'OPEN': 'Open', 'HIGH': 'High', 'LOW': 'Low', 'NUMBER_TICKS': 'Volume', 'LAST_PRICE': 'Adj Close'}, inplace=True)
# Percentage change
df['Pct'] = df['Adj Close'].pct_change()
df.dropna(inplace=True)
# Moving Average
if ma != []:
for moving in ma:
df['{}ma'.format(moving)] = df['Adj Close'].rolling(window=moving).mean()
df.dropna(inplace=True)
# Move Adj Close to the rightmost for the ease of training
adj_close = df['Adj Close']
df.drop(labels=['Adj Close'], axis=1, inplace=True)
df = pd.concat([df, adj_close], axis=1)
return df
df_GBP=get_stock_data("GBP Curncy", ma=[50, 100, 200])
def plot_stock(df):
print(df.head())
plt.subplot(211)
plt.plot(df['Adj Close'], color='red', label='Adj Close')
plt.legend(loc='best')
plt.subplot(212)
plt.plot(df['Pct'], color='blue', label='Percentage change')
plt.legend(loc='best')
plt.show()
plot_stock(df_GBP)
def load_data(stock,normalize,seq_len,split,ma):
amount_of_features = len(stock.columns)
print ("Amount of features = {}".format(amount_of_features))
sequence_length = seq_len + 1
result_train = []
result_test= []
row = round(split * stock.shape[0])
df_train=stock[0:row].copy()
print ("Amount of training data = {}".format(df_train.shape[0]))
df_test=stock[row:len(stock)].copy()
print ("Amount of testing data = {}".format(df_test.shape[0]))
if normalize:
#Training
min_max_scaler = preprocessing.MinMaxScaler()
df_train['Open'] = min_max_scaler.fit_transform(df_train.Open.values.reshape(-1,1))
df_train['High'] = min_max_scaler.fit_transform(df_train.High.values.reshape(-1,1))
df_train['Low'] = min_max_scaler.fit_transform(df_train.Low.values.reshape(-1,1))
df_train['Volume'] = min_max_scaler.fit_transform(df_train.Volume.values.reshape(-1,1))
df_train['Adj Close'] = min_max_scaler.fit_transform(df_train['Adj Close'].values.reshape(-1,1))
df_train['Pct'] = min_max_scaler.fit_transform(df_train['Pct'].values.reshape(-1,1))
if ma != []:
for moving in ma:
df_train['{}ma'.format(moving)] = min_max_scaler.fit_transform(df_train['{}ma'.format(moving)].values.reshape(-1,1))
#Test
df_test['Open'] = min_max_scaler.fit_transform(df_test.Open.values.reshape(-1,1))
df_test['High'] = min_max_scaler.fit_transform(df_test.High.values.reshape(-1,1))
df_test['Low'] = min_max_scaler.fit_transform(df_test.Low.values.reshape(-1,1))
df_test['Volume'] = min_max_scaler.fit_transform(df_test.Volume.values.reshape(-1,1))
df_test['Adj Close'] = min_max_scaler.fit_transform(df_test['Adj Close'].values.reshape(-1,1))
df_test['Pct'] = min_max_scaler.fit_transform(df_test['Pct'].values.reshape(-1,1))
if ma != []:
for moving in ma:
df_test['{}ma'.format(moving)] = min_max_scaler.fit_transform(df_test['{}ma'.format(moving)].values.reshape(-1,1))
#Training
data_train = df_train.as_matrix()
for index in range(len(data_train) - sequence_length):
result_train.append(data_train[index: index + sequence_length])
train = np.array(result_train)
X_train = train[:, :-1].copy() # all data until day m
y_train = train[:, -1][:,-1].copy() # day m + 1 adjusted close price
#Test
data_test = df_test.as_matrix()
for index in range(len(data_test) - sequence_length):
result_test.append(data_test[index: index + sequence_length])
test = np.array(result_train)
X_test = test[:, :-1].copy()
y_test = test[:, -1][:,-1].copy()
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], amount_of_features))
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], amount_of_features))
return [X_train, y_train, X_test, y_test]
X_train, y_train, X_test, y_test = load_data(df_GBP,True,seq_len,split=0.7,ma=[50, 100, 200])
def build_model(shape, neurons, dropout, decay):
model = Sequential()
#model.add(Dense(neurons[0],activation="relu", input_shape=(shape[0], shape[1])))
model.add(LSTM(neurons[0], input_shape=(shape[0], shape[1]), return_sequences=True))
model.add(Dropout(dropout))
model.add(LSTM(neurons[1], input_shape=(shape[0], shape[1]), return_sequences=False))
model.add(Dropout(dropout))
model.add(Dense(neurons[2],kernel_initializer="uniform",activation='relu'))
model.add(Dense(neurons[3],kernel_initializer="uniform",activation='linear'))
adam = keras.optimizers.Adam(decay=decay)
model.compile(loss='mse',optimizer='adam', metrics=['accuracy'])
model.summary()
return model
def build_model_CNN(shape, neurons, dropout, decay):
model = Sequential()
model.add(Convolution1D(input_shape = (shape[0], shape[1]),
nb_filter=64,
filter_length=2,
border_mode='valid',
activation='relu',
subsample_length=1))
model.add(MaxPooling1D(pool_length=2))
model.add(Convolution1D(input_shape = (shape[0], shape[1]),
nb_filter=64,
filter_length=2,
border_mode='valid',
activation='relu',
subsample_length=1))
model.add(MaxPooling1D(pool_length=2))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(250))
model.add(Dropout(0.25))
model.add(Activation('relu'))
model.add(Dense(1))
model.add(Activation('linear'))
adam = keras.optimizers.Adam(decay=decay)
model.compile(loss='mse',optimizer='adam', metrics=['accuracy'])
model.summary()
return model
model = build_model_CNN(shape, neurons, dropout, decay)
model.fit(X_train,y_train,batch_size=512,epochs=epochs,validation_split=0.3,verbose=1)
def model_score(model, X_train, y_train, X_test, y_test):
trainScore = model.evaluate(X_train, y_train, verbose=0)
print('Train Score: %.5f MSE (%.2f RMSE)' % (trainScore[0], math.sqrt(trainScore[0])))
testScore = model.evaluate(X_test, y_test, verbose=0)
print('Test Score: %.5f MSE (%.2f RMSE)' % (testScore[0], math.sqrt(testScore[0])))
return trainScore[0], testScore[0]
model_score(model, X_train, y_train, X_test, y_test)
def percentage_difference(model, X_test, y_test):
percentage_diff=[]
p = model.predict(X_test)
for u in range(len(y_test)): # for each data index in test data
pr = p[u][0] # pr = prediction on day u
percentage_diff.append((pr-y_test[u]/pr)*100)
print(mean(percentage_diff))
return p
p = percentage_difference(model, X_test, y_test)
def plot_result_norm(stock_name, normalized_value_p, normalized_value_y_test):
newp=normalized_value_p
newy_test=normalized_value_y_test
plt2.plot(newp, color='red', label='Prediction')
plt2.plot(newy_test,color='blue', label='Actual')
plt2.legend(loc='best')
plt2.title('The test result for {}'.format(stock_name))
plt2.xlabel('5 Min ahead Forecast')
plt2.ylabel('Price')
plt2.show()
plot_result_norm("GBP Curncy", p, y_test)
def denormalize(stock_name, normalized_value,split=0.7,predict=True):
"""
Return a dataframe of that stock and normalize all the values.
(Optional: create moving average)
"""
df = xl.parse(stock_name)
df.drop(['VOLUME'], 1, inplace=True)
df.set_index('Date', inplace=True)
# Renaming all the columns so that we can use the old version code
df.rename(columns={'OPEN': 'Open', 'HIGH': 'High', 'LOW': 'Low', 'NUMBER_TICKS': 'Volume', 'LAST_PRICE': 'Adj Close'}, inplace=True)
df.dropna(inplace=True)
df = df['Adj Close'].values.reshape(-1,1)
normalized_value = normalized_value.reshape(-1,1)
row = round(split * df.shape[0])
if predict:
df_p=df[0:row].copy()
else:
df_p=df[row:len(df)].copy()
#return df.shape, p.shape
mean_df=np.mean(df_p)
std_df=np.std(df_p)
new=normalized_value*mean_df+std_df
return new
def portfolio(currency_list,file = 'FX-5.xlsx',seq_len = 22,shape = [seq_len, 9, 1],neurons = [256, 256, 32, 1],dropout = 0.3,decay = 0.5,
epochs = 90,ma=[50, 100, 200],split=0.7):
i=0
mini=99999999
for currency in currency_list:
df=get_stock_data(currency, ma)
X_train, y_train, X_test, y_test = load_data(df,True,seq_len,split,ma)
model = build_model_CNN(shape, neurons, dropout, decay)
model.fit(X_train,y_train,batch_size=512,epochs=epochs,validation_split=0.3,verbose=1)
p = percentage_difference(model, X_test, y_test)
newp = denormalize(currency, p,predict=True)
if mini>p.size:
mini=p.size
if i==0:
predict=p.copy()
else:
predict=np.hstack((predict[0:mini],p[0:mini]))
i+=1
return predict
currency_list=[ 'GBP Curncy',
'JPY Curncy',
'EUR Curncy',
'CAD Curncy',
'NZD Curncy',
'SEK Curncy',
'AUD Curncy',
'CHF Curncy',
'NOK Curncy']
#currency_list=['JPY Curncy']
predictcur=portfolio(currency_list,file = 'FX-5.xlsx',seq_len = 22,shape = [seq_len, 9, 1],neurons = [256, 256, 32, 1],dropout = 0.3,decay = 0.5,
epochs = 1,ma=[50, 100, 200],split=0.7)
"""
Description:
Extends the basic Markowitz model with a market cost term.
Input:
n: Number of assets
mu: An n dimensional vector of expected returns
GT: A matrix with n columns so (GT')*GT = covariance matrix
x0: Initial holdings
w: Initial cash holding
gamma: Maximum risk (=std. dev) accepted
f: If asset j is traded then a fixed cost f_j must be paid
g: If asset j is traded then a cost g_j must be paid for each unit traded
Output:
Optimal expected return and the optimal portfolio
"""
def MarkowitzWithTransactionsCost(n,mu,GT,x0,w,gamma,f,g):
# Upper bound on the traded amount
w0 = w+sum(x0)
u = n*[w0]
with Model("Markowitz portfolio with transaction costs") as M:
#M.setLogHandler(sys.stdout)
# Defines the variables. No shortselling is allowed.
x = M.variable("x", n, Domain.greaterThan(0.0))
# Additional "helper" variables
z = M.variable("z", n, Domain.unbounded())
# Binary variables
y = M.variable("y", n, Domain.binary())
# Maximize expected return
M.objective('obj', ObjectiveSense.Maximize, Expr.dot(mu,x))
# Invest amount + transactions costs = initial wealth
M.constraint('budget', Expr.add([ Expr.sum(x), Expr.dot(f,y),Expr.dot(g,z)] ), Domain.equalsTo(w0))
# Imposes a bound on the risk
M.constraint('risk', Expr.vstack( gamma,Expr.mul(GT,x)), Domain.inQCone())
# z >= |x-x0|
M.constraint('buy', Expr.sub(z,Expr.sub(x,x0)),Domain.greaterThan(0.0))
M.constraint('sell', Expr.sub(z,Expr.sub(x0,x)),Domain.greaterThan(0.0))
# Alternatively, formulate the two constraints as
#M.constraint('trade', Expr.hstack(z,Expr.sub(x,x0)), Domain.inQcone())
# Constraints for turning y off and on. z-diag(u)*y<=0 i.e. z_j <= u_j*y_j
M.constraint('y_on_off', Expr.sub(z,Expr.mulElm(u,y)), Domain.lessThan(0.0))
# Integer optimization problems can be very hard to solve so limiting the
# maximum amount of time is a valuable safe guard
M.setSolverParam('mioMaxTime', 180.0)
M.solve()
print("\n-----------------------------------------------------------------------------------");
print('Markowitz portfolio optimization with transactions cost')
print("-----------------------------------------------------------------------------------\n");
print('Expected return: %.4e Std. deviation: %.4e Transactions cost: %.4e' % \
(np.dot(mu,x.level()),gamma,np.dot(f,y.level())+np.dot(g,z.level())))
return (np.dot(mu,x.level()), x.level())
def rebalance(n,previous_prices,x0,w,mu,gamma=1):
GT=np.cov(previous_prices)
f = n*[0.01]
g = n*[0.001]
weights=MarkowitzWithTransactionsCost(n,mu,GT,x0,w,gamma,f,g)
return weights
rebalance(9,dq,mu=predictcur[1],x0=[0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0],w=1,gamma=1)
# Backtesting using rebalancing function for weights
def log_diff(data):
return np.diff(np.log(data))
def backtest(prices, predictions, initial_weights):
t_prices = len(prices[1,:])
t_predictions = len(predictions[:,1])
length_past = t_prices - t_predictions
returns = np.apply_along_axis(log_diff, 1, prices)
prediction_return = []
for k in range(t_predictions):
prediction_return.append(np.log(predictions[k]/prices[:,length_past+k]))
weights = initial_weights
portfolio_return = []
prev_weight = weights
for i in range(0,t_predictions-1):
predicted_return = prediction_return[i]
previous_return = returns[:,length_past+i]
previous_returns = returns[:,0:length_past+i]
if i==0:
new_weight = rebalance_y(3,previous_returns,mu=predicted_return.tolist(),x0=prev_weight,w=1,gamma=0.5)
else:
new_weight = rebalance_y(3,previous_returns,mu=predicted_return.tolist(),x0=prev_weight,w=0,gamma=0.5)
period_return = new_weight*np.log(prices[:,length_past+i+1]/prices[:,length_past+i])
portfolio_return.append(np.sum(period_return))
prev_weight = new_weight
return portfolio_return
x = backtest(dq.T, predictcur, np.repeat(1/10,10))
def plot_result(stock_name, normalized_value_p, normalized_value_y_test):
newp = denormalize(stock_name, normalized_value_p,predict=True)
newy_test = denormalize(stock_name, normalized_value_y_test,predict=False)
plt2.plot(newp, color='red', label='Prediction')
plt2.plot(newy_test,color='blue', label='Actual')
plt2.legend(loc='best')
plt2.title('The test result for {}'.format(stock_name))
plt2.xlabel('5 Min ahead Forecast')
plt2.ylabel('Price')
plt2.show()
plot_result("GBP Curncy", p, y_test)
| [
"keras.layers.core.Flatten",
"matplotlib.pyplot.ylabel",
"numpy.hstack",
"numpy.log",
"math.sqrt",
"numpy.array",
"numpy.cov",
"numpy.mean",
"numpy.reshape",
"numpy.repeat",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"pandas.concat",
"keras.layers.convolutional.MaxPooling1D",
... | [((1014, 1090), 'os.chdir', 'os.chdir', (['"""/Users/michelcassard/Dropbox (MIT)/15.960 Independant Study/Data"""'], {}), "('/Users/michelcassard/Dropbox (MIT)/15.960 Independant Study/Data')\n", (1022, 1090), False, 'import os\n'), ((1134, 1152), 'pandas.ExcelFile', 'pd.ExcelFile', (['file'], {}), '(file)\n', (1146, 1152), True, 'import pandas as pd\n'), ((2096, 2130), 'pandas.concat', 'pd.concat', (['[df, adj_close]'], {'axis': '(1)'}), '([df, adj_close], axis=1)\n', (2105, 2130), True, 'import pandas as pd\n'), ((2255, 2271), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (2266, 2271), True, 'import matplotlib.pyplot as plt\n'), ((2276, 2333), 'matplotlib.pyplot.plot', 'plt.plot', (["df['Adj Close']"], {'color': '"""red"""', 'label': '"""Adj Close"""'}), "(df['Adj Close'], color='red', label='Adj Close')\n", (2284, 2333), True, 'import matplotlib.pyplot as plt\n'), ((2338, 2360), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (2348, 2360), True, 'import matplotlib.pyplot as plt\n'), ((2365, 2381), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (2376, 2381), True, 'import matplotlib.pyplot as plt\n'), ((2386, 2446), 'matplotlib.pyplot.plot', 'plt.plot', (["df['Pct']"], {'color': '"""blue"""', 'label': '"""Percentage change"""'}), "(df['Pct'], color='blue', label='Percentage change')\n", (2394, 2446), True, 'import matplotlib.pyplot as plt\n'), ((2451, 2473), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (2461, 2473), True, 'import matplotlib.pyplot as plt\n'), ((2478, 2488), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2486, 2488), True, 'import matplotlib.pyplot as plt\n'), ((4814, 4836), 'numpy.array', 'np.array', (['result_train'], {}), '(result_train)\n', (4822, 4836), True, 'import numpy as np\n'), ((5156, 5178), 'numpy.array', 'np.array', (['result_train'], {}), '(result_train)\n', (5164, 5178), True, 'import numpy as np\n'), ((5270, 5347), 'numpy.reshape', 'np.reshape', (['X_train', '(X_train.shape[0], X_train.shape[1], amount_of_features)'], {}), '(X_train, (X_train.shape[0], X_train.shape[1], amount_of_features))\n', (5280, 5347), True, 'import numpy as np\n'), ((5361, 5435), 'numpy.reshape', 'np.reshape', (['X_test', '(X_test.shape[0], X_test.shape[1], amount_of_features)'], {}), '(X_test, (X_test.shape[0], X_test.shape[1], amount_of_features))\n', (5371, 5435), True, 'import numpy as np\n'), ((5659, 5671), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (5669, 5671), False, 'from keras.models import Sequential\n'), ((6187, 6221), 'keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'decay': 'decay'}), '(decay=decay)\n', (6208, 6221), False, 'import keras\n'), ((6395, 6407), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (6405, 6407), False, 'from keras.models import Sequential\n'), ((7266, 7300), 'keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'decay': 'decay'}), '(decay=decay)\n', (7287, 7300), False, 'import keras\n'), ((8529, 8577), 'matplotlib.pyplot.plot', 'plt2.plot', (['newp'], {'color': '"""red"""', 'label': '"""Prediction"""'}), "(newp, color='red', label='Prediction')\n", (8538, 8577), True, 'import matplotlib.pyplot as plt2\n'), ((8582, 8632), 'matplotlib.pyplot.plot', 'plt2.plot', (['newy_test'], {'color': '"""blue"""', 'label': '"""Actual"""'}), "(newy_test, color='blue', label='Actual')\n", (8591, 8632), True, 'import matplotlib.pyplot as plt2\n'), ((8636, 8659), 'matplotlib.pyplot.legend', 'plt2.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (8647, 8659), True, 'import matplotlib.pyplot as plt2\n'), ((8724, 8759), 'matplotlib.pyplot.xlabel', 'plt2.xlabel', (['"""5 Min ahead Forecast"""'], {}), "('5 Min ahead Forecast')\n", (8735, 8759), True, 'import matplotlib.pyplot as plt2\n'), ((8764, 8784), 'matplotlib.pyplot.ylabel', 'plt2.ylabel', (['"""Price"""'], {}), "('Price')\n", (8775, 8784), True, 'import matplotlib.pyplot as plt2\n'), ((8789, 8800), 'matplotlib.pyplot.show', 'plt2.show', ([], {}), '()\n', (8798, 8800), True, 'import matplotlib.pyplot as plt2\n'), ((9673, 9686), 'numpy.mean', 'np.mean', (['df_p'], {}), '(df_p)\n', (9680, 9686), True, 'import numpy as np\n'), ((9698, 9710), 'numpy.std', 'np.std', (['df_p'], {}), '(df_p)\n', (9704, 9710), True, 'import numpy as np\n'), ((13782, 13805), 'numpy.cov', 'np.cov', (['previous_prices'], {}), '(previous_prices)\n', (13788, 13805), True, 'import numpy as np\n'), ((14307, 14347), 'numpy.apply_along_axis', 'np.apply_along_axis', (['log_diff', '(1)', 'prices'], {}), '(log_diff, 1, prices)\n', (14326, 14347), True, 'import numpy as np\n'), ((15269, 15290), 'numpy.repeat', 'np.repeat', (['(1 / 10)', '(10)'], {}), '(1 / 10, 10)\n', (15278, 15290), True, 'import numpy as np\n'), ((15519, 15567), 'matplotlib.pyplot.plot', 'plt2.plot', (['newp'], {'color': '"""red"""', 'label': '"""Prediction"""'}), "(newp, color='red', label='Prediction')\n", (15528, 15567), True, 'import matplotlib.pyplot as plt2\n'), ((15572, 15622), 'matplotlib.pyplot.plot', 'plt2.plot', (['newy_test'], {'color': '"""blue"""', 'label': '"""Actual"""'}), "(newy_test, color='blue', label='Actual')\n", (15581, 15622), True, 'import matplotlib.pyplot as plt2\n'), ((15626, 15649), 'matplotlib.pyplot.legend', 'plt2.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (15637, 15649), True, 'import matplotlib.pyplot as plt2\n'), ((15714, 15749), 'matplotlib.pyplot.xlabel', 'plt2.xlabel', (['"""5 Min ahead Forecast"""'], {}), "('5 Min ahead Forecast')\n", (15725, 15749), True, 'import matplotlib.pyplot as plt2\n'), ((15754, 15774), 'matplotlib.pyplot.ylabel', 'plt2.ylabel', (['"""Price"""'], {}), "('Price')\n", (15765, 15774), True, 'import matplotlib.pyplot as plt2\n'), ((15779, 15790), 'matplotlib.pyplot.show', 'plt2.show', ([], {}), '()\n', (15788, 15790), True, 'import matplotlib.pyplot as plt2\n'), ((3074, 3102), 'sklearn.preprocessing.MinMaxScaler', 'preprocessing.MinMaxScaler', ([], {}), '()\n', (3100, 3102), False, 'from sklearn import preprocessing\n'), ((5778, 5851), 'keras.layers.recurrent.LSTM', 'LSTM', (['neurons[0]'], {'input_shape': '(shape[0], shape[1])', 'return_sequences': '(True)'}), '(neurons[0], input_shape=(shape[0], shape[1]), return_sequences=True)\n', (5782, 5851), False, 'from keras.layers.recurrent import LSTM\n'), ((5867, 5883), 'keras.layers.core.Dropout', 'Dropout', (['dropout'], {}), '(dropout)\n', (5874, 5883), False, 'from keras.layers.core import Dense, Dropout, Activation\n'), ((5900, 5974), 'keras.layers.recurrent.LSTM', 'LSTM', (['neurons[1]'], {'input_shape': '(shape[0], shape[1])', 'return_sequences': '(False)'}), '(neurons[1], input_shape=(shape[0], shape[1]), return_sequences=False)\n', (5904, 5974), False, 'from keras.layers.recurrent import LSTM\n'), ((5990, 6006), 'keras.layers.core.Dropout', 'Dropout', (['dropout'], {}), '(dropout)\n', (5997, 6006), False, 'from keras.layers.core import Dense, Dropout, Activation\n'), ((6023, 6089), 'keras.layers.core.Dense', 'Dense', (['neurons[2]'], {'kernel_initializer': '"""uniform"""', 'activation': '"""relu"""'}), "(neurons[2], kernel_initializer='uniform', activation='relu')\n", (6028, 6089), False, 'from keras.layers.core import Dense, Dropout, Activation\n'), ((6103, 6171), 'keras.layers.core.Dense', 'Dense', (['neurons[3]'], {'kernel_initializer': '"""uniform"""', 'activation': '"""linear"""'}), "(neurons[3], kernel_initializer='uniform', activation='linear')\n", (6108, 6171), False, 'from keras.layers.core import Dense, Dropout, Activation\n'), ((6422, 6565), 'keras.layers.convolutional.Convolution1D', 'Convolution1D', ([], {'input_shape': '(shape[0], shape[1])', 'nb_filter': '(64)', 'filter_length': '(2)', 'border_mode': '"""valid"""', 'activation': '"""relu"""', 'subsample_length': '(1)'}), "(input_shape=(shape[0], shape[1]), nb_filter=64, filter_length\n =2, border_mode='valid', activation='relu', subsample_length=1)\n", (6435, 6565), False, 'from keras.layers.convolutional import Convolution1D, MaxPooling1D\n'), ((6699, 6726), 'keras.layers.convolutional.MaxPooling1D', 'MaxPooling1D', ([], {'pool_length': '(2)'}), '(pool_length=2)\n', (6711, 6726), False, 'from keras.layers.convolutional import Convolution1D, MaxPooling1D\n'), ((6743, 6886), 'keras.layers.convolutional.Convolution1D', 'Convolution1D', ([], {'input_shape': '(shape[0], shape[1])', 'nb_filter': '(64)', 'filter_length': '(2)', 'border_mode': '"""valid"""', 'activation': '"""relu"""', 'subsample_length': '(1)'}), "(input_shape=(shape[0], shape[1]), nb_filter=64, filter_length\n =2, border_mode='valid', activation='relu', subsample_length=1)\n", (6756, 6886), False, 'from keras.layers.convolutional import Convolution1D, MaxPooling1D\n'), ((7020, 7047), 'keras.layers.convolutional.MaxPooling1D', 'MaxPooling1D', ([], {'pool_length': '(2)'}), '(pool_length=2)\n', (7032, 7047), False, 'from keras.layers.convolutional import Convolution1D, MaxPooling1D\n'), ((7064, 7077), 'keras.layers.core.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (7071, 7077), False, 'from keras.layers.core import Dense, Dropout, Activation\n'), ((7093, 7102), 'keras.layers.core.Flatten', 'Flatten', ([], {}), '()\n', (7100, 7102), False, 'from keras.layers.core import Flatten\n'), ((7119, 7129), 'keras.layers.core.Dense', 'Dense', (['(250)'], {}), '(250)\n', (7124, 7129), False, 'from keras.layers.core import Dense, Dropout, Activation\n'), ((7145, 7158), 'keras.layers.core.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (7152, 7158), False, 'from keras.layers.core import Dense, Dropout, Activation\n'), ((7174, 7192), 'keras.layers.core.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (7184, 7192), False, 'from keras.layers.core import Dense, Dropout, Activation\n'), ((7209, 7217), 'keras.layers.core.Dense', 'Dense', (['(1)'], {}), '(1)\n', (7214, 7217), False, 'from keras.layers.core import Dense, Dropout, Activation\n'), ((7233, 7253), 'keras.layers.core.Activation', 'Activation', (['"""linear"""'], {}), "('linear')\n", (7243, 7253), False, 'from keras.layers.core import Dense, Dropout, Activation\n'), ((8289, 8310), 'statistics.mean', 'mean', (['percentage_diff'], {}), '(percentage_diff)\n', (8293, 8310), False, 'from statistics import mean\n'), ((14108, 14120), 'numpy.log', 'np.log', (['data'], {}), '(data)\n', (14114, 14120), True, 'import numpy as np\n'), ((10547, 10586), 'numpy.hstack', 'np.hstack', (['(predict[0:mini], p[0:mini])'], {}), '((predict[0:mini], p[0:mini]))\n', (10556, 10586), True, 'import numpy as np\n'), ((14443, 14494), 'numpy.log', 'np.log', (['(predictions[k] / prices[:, length_past + k])'], {}), '(predictions[k] / prices[:, length_past + k])\n', (14449, 14494), True, 'import numpy as np\n'), ((15061, 15128), 'numpy.log', 'np.log', (['(prices[:, length_past + i + 1] / prices[:, length_past + i])'], {}), '(prices[:, length_past + i + 1] / prices[:, length_past + i])\n', (15067, 15128), True, 'import numpy as np\n'), ((15151, 15172), 'numpy.sum', 'np.sum', (['period_return'], {}), '(period_return)\n', (15157, 15172), True, 'import numpy as np\n'), ((7736, 7760), 'math.sqrt', 'math.sqrt', (['trainScore[0]'], {}), '(trainScore[0])\n', (7745, 7760), False, 'import math, time\n'), ((7884, 7907), 'math.sqrt', 'math.sqrt', (['testScore[0]'], {}), '(testScore[0])\n', (7893, 7907), False, 'import math, time\n')] |
import csv
import matplotlib.pyplot as plt
import numpy as np
from scipy.interpolate import make_interp_spline
x1 = []
y1 = []
x2 = []
y2 = []
with open("barrier_width.csv", "r", encoding='utf8') as csvfile:
plots = csv.reader(csvfile, delimiter=",")
for row in plots:
x1.append(float(row[0]))
y1.append(float(row[1]) * 10)
with open("barrier_height.csv", "r", encoding='utf8') as csvfile:
plots = csv.reader(csvfile, delimiter=",")
for row in plots:
x2.append(float(row[0]))
y2.append(float(row[1]) * 10)
x1 = np.array([x for x in x1], dtype="float")
y1 = np.array([y for y in y1], dtype="float")
x2 = np.array([x for x in x2], dtype="float")
y2 = np.array([y for y in y2], dtype="float")
# define x as 200 equally spaced values between the min and max of original x
x1new = np.linspace(x1.min(), x1.max(), 200)
x2new = np.linspace(x2.min(), x2.max(), 200)
# define spline with degree k=7
spl1 = make_interp_spline(x1, y1, k=3)
spl2 = make_interp_spline(x2, y2, k=3)
y1_smooth = spl1(x1new)
y2_smooth = spl2(x2new)
# create smooth line chart
fig, axs = plt.subplots(2)
fig.suptitle('Tunneling probability vs. barrier width and barrier height')
# start linear regression
coef = np.polyfit(x1,y1,1)
poly1d_fn = np.poly1d(coef)
axs[0].plot(x1, poly1d_fn(x1), '--r') #'--k'=black dashed line, 'yo' = yellow circle marker
# end lineal regression
axs[0].plot(x1new, y1_smooth)
axs[0].set_xlabel('Barrier width')
axs[0].set_ylabel('Tunneling probability')
# start linear regression
coef = np.polyfit(x2,y2,1)
poly1d_fn = np.poly1d(coef)
axs[1].plot(x2, poly1d_fn(x2), '--r') #'--k'=black dashed line, 'yo' = yellow circle marker
# end lineal regression
axs[1].plot(x2new, y2_smooth)
axs[1].set_xlabel('Barrier height')
axs[1].set_ylabel('Tunneling probability')
plt.show()
fig.savefig('tunneling_probability.png')
| [
"numpy.polyfit",
"numpy.array",
"matplotlib.pyplot.subplots",
"scipy.interpolate.make_interp_spline",
"csv.reader",
"numpy.poly1d",
"matplotlib.pyplot.show"
] | [((586, 626), 'numpy.array', 'np.array', (['[x for x in x1]'], {'dtype': '"""float"""'}), "([x for x in x1], dtype='float')\n", (594, 626), True, 'import numpy as np\n'), ((633, 673), 'numpy.array', 'np.array', (['[y for y in y1]'], {'dtype': '"""float"""'}), "([y for y in y1], dtype='float')\n", (641, 673), True, 'import numpy as np\n'), ((680, 720), 'numpy.array', 'np.array', (['[x for x in x2]'], {'dtype': '"""float"""'}), "([x for x in x2], dtype='float')\n", (688, 720), True, 'import numpy as np\n'), ((727, 767), 'numpy.array', 'np.array', (['[y for y in y2]'], {'dtype': '"""float"""'}), "([y for y in y2], dtype='float')\n", (735, 767), True, 'import numpy as np\n'), ((984, 1015), 'scipy.interpolate.make_interp_spline', 'make_interp_spline', (['x1', 'y1'], {'k': '(3)'}), '(x1, y1, k=3)\n', (1002, 1015), False, 'from scipy.interpolate import make_interp_spline\n'), ((1024, 1055), 'scipy.interpolate.make_interp_spline', 'make_interp_spline', (['x2', 'y2'], {'k': '(3)'}), '(x2, y2, k=3)\n', (1042, 1055), False, 'from scipy.interpolate import make_interp_spline\n'), ((1148, 1163), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {}), '(2)\n', (1160, 1163), True, 'import matplotlib.pyplot as plt\n'), ((1275, 1296), 'numpy.polyfit', 'np.polyfit', (['x1', 'y1', '(1)'], {}), '(x1, y1, 1)\n', (1285, 1296), True, 'import numpy as np\n'), ((1308, 1323), 'numpy.poly1d', 'np.poly1d', (['coef'], {}), '(coef)\n', (1317, 1323), True, 'import numpy as np\n'), ((1591, 1612), 'numpy.polyfit', 'np.polyfit', (['x2', 'y2', '(1)'], {}), '(x2, y2, 1)\n', (1601, 1612), True, 'import numpy as np\n'), ((1624, 1639), 'numpy.poly1d', 'np.poly1d', (['coef'], {}), '(coef)\n', (1633, 1639), True, 'import numpy as np\n'), ((1874, 1884), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1882, 1884), True, 'import matplotlib.pyplot as plt\n'), ((234, 268), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (244, 268), False, 'import csv\n'), ((447, 481), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (457, 481), False, 'import csv\n')] |
import os
import time
import yaml
import pickle
import numpy as np
from random import shuffle
from sklearn.neighbors import KDTree
ALL_LAYERS = np.array([[8,2],
[8,4],
[8,6],
[8,8],
[13,2],
[13,4],
[13,6],
[13,8],
[17,2],
[17,4]])
def construct_dataset(paths, nb_samples, feature_names):
t0 = time.time()
nb_processed = 0
hits_a = []
hits_b = []
targets = []
print("Sampling hit pairs for training dataset. \nWARNING: ASSUMING FIRST 3 FEATURES OF HITS ARE XYZ")
for i, path in enumerate(paths):
sample = load_event(path)
hits, particle_ids, vols, layers = process_sample(sample, feature_names)
h_a, h_b, t = build_pairs(hits, particle_ids, vols, layers)
hits_a.extend(h_a)
hits_b.extend(h_b)
targets.extend(t)
if (i%2)==0:
elapsed = (time.time() - t0)/60
remain = (nb_samples-len(hits_a)) / len(hits_a) * elapsed # THIS ALGORITHM IS OFF??
print("file {:4d}, {:8d}. Elapsed: {:4.1f}m, Remain: {:4.1f}m".format(i,
len(hits_a), elapsed, remain))
if len(hits_a) > nb_samples:
break
return (hits_a[:nb_samples], hits_b[:nb_samples], targets[:nb_samples])
def process_sample(sample, feature_names):
hits = sample[0]
truth = sample[1]
volume_ids = hits['volume_id'].values
layer_ids = hits['layer_id'].values
hits = hits[feature_names].values.tolist()
particle_ids = truth['particle_id'].values.tolist()
return hits, particle_ids, volume_ids, layer_ids
def get_dense_pairs(hits, where_track):
hits_a = []
hits_b = []
len_track = len(where_track)
for i in range(len_track):
for j in range(len_track):
hits_a.append(hits[where_track[i]])
hits_b.append(hits[where_track[j]])
return hits_a, hits_b
def is_match(hit_id_a, hit_id_b, vols, layers):
va = vols[hit_id_a]
vb = vols[hit_id_b]
la = layers[hit_id_a]
lb = layers[hit_id_b]
for i, p in enumerate(ALL_LAYERS):
if (p==[va, la]).all():
if i==0:
match_lower = False
else:
match_lower = ([vb,lb]==ALL_LAYERS[i-1]).all()
if (i+1)==len(ALL_LAYERS):
match_upper=False
else:
match_upper = ([vb,lb]==ALL_LAYERS[i+1]).all()
if match_lower or match_upper:
return True
return False
def get_true_pairs_layerwise(hits, where_track, vols, layers):
hits_a = []
hits_b = []
len_track = len(where_track)
for i in range(len_track):
for j in range((i+1), len_track):
ha = where_track[i]
hb = where_track[j]
if is_match(ha, hb, vols, layers):
hits_a.append(hits[ha])
hits_b.append(hits[hb])
hits_a.append(hits[hb])
hits_b.append(hits[ha])
return hits_a, hits_b
# def get_true_pairs_layerwise(hits, where_track, z):
# sorted_by_z = np.argsort(z[where_track]).tolist()
# track_hits = [hits[i] for i in where_track]
# track_hits = [track_hits[s] for s in sorted_by_z]
#
# hits_a = []
# hits_b = []
# len_track = len(where_track)
# nb_processed = 0
# for i in range(len_track):
# lower_bound = i-min(1,nb_processed)
# upper_bound = i+min(2,len_track-nb_processed)
# for j in range(lower_bound, upper_bound):
# hits_a.append(track_hits[i])
# hits_b.append(track_hits[j])
# nb_processed += 1
#
# return hits_a, hits_b
#
def get_false_pairs(hits, where_track, particle_ids, pid, nb_false_pairs):
h_a = []
h_b = []
where_not_track = np.where(particle_ids!=pid)[0]
where_not_track = list(np.random.choice(where_not_track, nb_false_pairs, replace=False))
seed_hit = hits[where_track[np.random.randint(len(where_track))]]
track_hit_order = list(np.random.choice(where_track, nb_false_pairs))
for i,j in zip(track_hit_order, where_not_track):
h_a.append(hits[i])
h_b.append(hits[j])
return h_a, h_b
def get_pairs_one_pid(hits, particle_ids, pid, z, vols, layers):
where_track = list(np.where(particle_ids==pid)[0])
# h_true_a, h_true_b = get_dense_pairs(hits, where_track)
h_true_a, h_true_b = get_true_pairs_layerwise(hits, where_track, vols, layers)
target_true = [1] * len(h_true_a)
if len(h_true_a)==0:
return [], [], []
h_false_a, h_false_b = get_false_pairs(hits, where_track, particle_ids, pid, len(h_true_a))
target_false = [0] * len(h_false_a)
return h_true_a+h_false_a, h_true_b+h_false_b, target_true+target_false
def build_pairs(hits, particle_ids, vols, layers, nb_particles_per_sample=2000):
unique_pids = list(set(particle_ids))
unique_pids.remove(0)
pids = np.array(particle_ids)
shuffle(unique_pids)
hits_a = []
hits_b = []
target = []
z = np.array(hits)[:,2]
for i in range(nb_particles_per_sample):
pid = unique_pids[i]
h_a, h_b, t = get_pairs_one_pid(hits, pids, pid, z, vols, layers)
hits_a.extend(h_a)
hits_b.extend(h_b)
target.extend(t)
return hits_a, hits_b, target
def combine_samples(hits_a, hits_b, targets):
t = np.array(targets, dtype=np.float32).reshape(-1,1)
return np.concatenate((hits_a, hits_b, t),axis=1).astype(np.float32)
def preprocess_dataset(paths, nb_samples, feature_names):
h_a, h_b, t = construct_dataset(paths, nb_samples, feature_names)
dataset = combine_samples(h_a, h_b, t)
mean, std = extract_stats(h_a, h_b)
stats = {'mean':mean, 'std':std}
return dataset, stats
def extract_stats(h_a, h_b):
h_a = np.array(h_a, dtype=np.float32)
h_b = np.array(h_b, dtype=np.float32)
h_combined = np.concatenate((h_a,h_b),axis=0)
mean = np.mean(h_combined,axis=0)
std = np.std(h_combined, axis=0)
return mean, std
#############################################
# UTILS #
#############################################
def save_stats(stats, save_path, name):
save_file = os.path.join(save_path, "{}.yml".format('stats'))
with open(save_file, 'w') as f:
yaml.dump(stats, f, default_flow_style=False)
def save_dataset(dataset, save_path, name):
save_file = os.path.join(save_path, "{}.pickle".format(name))
with open(save_file, 'wb') as f:
pickle.dump(dataset, f)
def load_event(path):
with open(path, 'rb') as f:
sample = pickle.load(f)
return sample
def split_dataset(dataset, nb_train, nb_valid, nb_test):
print(len(dataset), nb_train, nb_valid, nb_test)
assert len(dataset) >= (nb_train + nb_valid + nb_test)
np.random.shuffle(dataset)
train = dataset[:nb_train]
valid = dataset[nb_train:(nb_train+nb_valid)]
test = dataset[-nb_test:]
return train, valid, test
#############################################
# MAIN #
#############################################
def preprocess(experiment_name, artifact_storage, data_path, feature_names, save_dir, nb_train, nb_valid, nb_test, force=False):
if os.path.isdir(save_dir) and (force!=True):
print("Stage 1 preprocessing dir exists")
elif os.path.isfile(os.path.join(artifact_storage, 'metric_learning_emb', 'best_model.pkl')) and (force!=True):
print("Best embedding model exists from previous run. Not forcing preprocessing stage 1.")
else:
print("Saving to:",str(os.path.join(artifact_storage, experiment_name, 'metric_learning_emb', 'best_model.pkl')))
event_files = os.listdir(data_path)
event_paths = [os.path.join(data_path, f) for f in event_files]
shuffle(event_paths)
nb_samples = nb_train + nb_valid + nb_test
dataset, stats = preprocess_dataset(event_paths, nb_samples, feature_names)
os.makedirs(save_dir, exist_ok=True)
save_stats(stats, save_dir, 'stage_1')
train, valid, test = split_dataset(dataset, nb_train, nb_valid, nb_test)
save_dataset(train, save_dir, 'train')
save_dataset(valid, save_dir, 'valid')
save_dataset(test, save_dir, 'test')
return save_dir
def main(args, force=False):
save_path = os.path.join(args.data_storage_path, 'metric_stage_1')
load_path = os.path.join(args.data_storage_path, 'preprocess_raw')
preprocess(args.name,
args.artifact_storage_path,
load_path,
args.feature_names,
save_path,
args.nb_train,
args.nb_valid,
args.nb_test,
force)
if __name__ == "__main__":
args = read_args()
main(args)
| [
"numpy.mean",
"os.listdir",
"pickle.dump",
"random.shuffle",
"os.makedirs",
"yaml.dump",
"numpy.where",
"numpy.random.choice",
"os.path.join",
"pickle.load",
"numpy.array",
"os.path.isdir",
"numpy.concatenate",
"numpy.std",
"time.time",
"numpy.random.shuffle"
] | [((146, 247), 'numpy.array', 'np.array', (['[[8, 2], [8, 4], [8, 6], [8, 8], [13, 2], [13, 4], [13, 6], [13, 8], [17, 2\n ], [17, 4]]'], {}), '([[8, 2], [8, 4], [8, 6], [8, 8], [13, 2], [13, 4], [13, 6], [13, 8\n ], [17, 2], [17, 4]])\n', (154, 247), True, 'import numpy as np\n'), ((507, 518), 'time.time', 'time.time', ([], {}), '()\n', (516, 518), False, 'import time\n'), ((5080, 5102), 'numpy.array', 'np.array', (['particle_ids'], {}), '(particle_ids)\n', (5088, 5102), True, 'import numpy as np\n'), ((5107, 5127), 'random.shuffle', 'shuffle', (['unique_pids'], {}), '(unique_pids)\n', (5114, 5127), False, 'from random import shuffle\n'), ((5959, 5990), 'numpy.array', 'np.array', (['h_a'], {'dtype': 'np.float32'}), '(h_a, dtype=np.float32)\n', (5967, 5990), True, 'import numpy as np\n'), ((6001, 6032), 'numpy.array', 'np.array', (['h_b'], {'dtype': 'np.float32'}), '(h_b, dtype=np.float32)\n', (6009, 6032), True, 'import numpy as np\n'), ((6050, 6084), 'numpy.concatenate', 'np.concatenate', (['(h_a, h_b)'], {'axis': '(0)'}), '((h_a, h_b), axis=0)\n', (6064, 6084), True, 'import numpy as np\n'), ((6094, 6121), 'numpy.mean', 'np.mean', (['h_combined'], {'axis': '(0)'}), '(h_combined, axis=0)\n', (6101, 6121), True, 'import numpy as np\n'), ((6132, 6158), 'numpy.std', 'np.std', (['h_combined'], {'axis': '(0)'}), '(h_combined, axis=0)\n', (6138, 6158), True, 'import numpy as np\n'), ((6974, 7000), 'numpy.random.shuffle', 'np.random.shuffle', (['dataset'], {}), '(dataset)\n', (6991, 7000), True, 'import numpy as np\n'), ((8523, 8577), 'os.path.join', 'os.path.join', (['args.data_storage_path', '"""metric_stage_1"""'], {}), "(args.data_storage_path, 'metric_stage_1')\n", (8535, 8577), False, 'import os\n'), ((8594, 8648), 'os.path.join', 'os.path.join', (['args.data_storage_path', '"""preprocess_raw"""'], {}), "(args.data_storage_path, 'preprocess_raw')\n", (8606, 8648), False, 'import os\n'), ((3948, 3977), 'numpy.where', 'np.where', (['(particle_ids != pid)'], {}), '(particle_ids != pid)\n', (3956, 3977), True, 'import numpy as np\n'), ((4006, 4070), 'numpy.random.choice', 'np.random.choice', (['where_not_track', 'nb_false_pairs'], {'replace': '(False)'}), '(where_not_track, nb_false_pairs, replace=False)\n', (4022, 4070), True, 'import numpy as np\n'), ((4170, 4215), 'numpy.random.choice', 'np.random.choice', (['where_track', 'nb_false_pairs'], {}), '(where_track, nb_false_pairs)\n', (4186, 4215), True, 'import numpy as np\n'), ((5185, 5199), 'numpy.array', 'np.array', (['hits'], {}), '(hits)\n', (5193, 5199), True, 'import numpy as np\n'), ((6469, 6514), 'yaml.dump', 'yaml.dump', (['stats', 'f'], {'default_flow_style': '(False)'}), '(stats, f, default_flow_style=False)\n', (6478, 6514), False, 'import yaml\n'), ((6671, 6694), 'pickle.dump', 'pickle.dump', (['dataset', 'f'], {}), '(dataset, f)\n', (6682, 6694), False, 'import pickle\n'), ((6767, 6781), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6778, 6781), False, 'import pickle\n'), ((7418, 7441), 'os.path.isdir', 'os.path.isdir', (['save_dir'], {}), '(save_dir)\n', (7431, 7441), False, 'import os\n'), ((4437, 4466), 'numpy.where', 'np.where', (['(particle_ids == pid)'], {}), '(particle_ids == pid)\n', (4445, 4466), True, 'import numpy as np\n'), ((5521, 5556), 'numpy.array', 'np.array', (['targets'], {'dtype': 'np.float32'}), '(targets, dtype=np.float32)\n', (5529, 5556), True, 'import numpy as np\n'), ((5582, 5625), 'numpy.concatenate', 'np.concatenate', (['(hits_a, hits_b, t)'], {'axis': '(1)'}), '((hits_a, hits_b, t), axis=1)\n', (5596, 5625), True, 'import numpy as np\n'), ((7880, 7901), 'os.listdir', 'os.listdir', (['data_path'], {}), '(data_path)\n', (7890, 7901), False, 'import os\n'), ((7982, 8002), 'random.shuffle', 'shuffle', (['event_paths'], {}), '(event_paths)\n', (7989, 8002), False, 'from random import shuffle\n'), ((8148, 8184), 'os.makedirs', 'os.makedirs', (['save_dir'], {'exist_ok': '(True)'}), '(save_dir, exist_ok=True)\n', (8159, 8184), False, 'import os\n'), ((7535, 7606), 'os.path.join', 'os.path.join', (['artifact_storage', '"""metric_learning_emb"""', '"""best_model.pkl"""'], {}), "(artifact_storage, 'metric_learning_emb', 'best_model.pkl')\n", (7547, 7606), False, 'import os\n'), ((7925, 7951), 'os.path.join', 'os.path.join', (['data_path', 'f'], {}), '(data_path, f)\n', (7937, 7951), False, 'import os\n'), ((1040, 1051), 'time.time', 'time.time', ([], {}), '()\n', (1049, 1051), False, 'import time\n'), ((7767, 7859), 'os.path.join', 'os.path.join', (['artifact_storage', 'experiment_name', '"""metric_learning_emb"""', '"""best_model.pkl"""'], {}), "(artifact_storage, experiment_name, 'metric_learning_emb',\n 'best_model.pkl')\n", (7779, 7859), False, 'import os\n')] |
import os
import unittest
import numpy as np
import pandas as pd
from lusidtools import logger
from lusidtools.cocoon.dateorcutlabel import DateOrCutLabel
from parameterized import parameterized
from datetime import datetime
import pytz
class CocoonDateOrCutLabelTests(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.logger = logger.LusidLogger(os.getenv("FBN_LOG_LEVEL", "info"))
@parameterized.expand(
[
[
"Already in ISO format positive offset",
"2019-11-04T13:25:34+00:00",
"2019-11-04T13:25:34+00:00",
],
[
"Already in ISO format negative offset",
"2020-04-29T09:30:00-05:00",
"2020-04-29T14:30:00+00:00",
],
[
"Already in ISO format with microseconds",
"2012-05-21T00:00:00.1234500+00:00",
"2012-05-21T00:00:00.123450+00:00",
],
[
"Already in ISO format UTC",
"2019-11-04T13:25:34Z",
"2019-11-04T13:25:34Z",
],
["A date with year first", "2019-11-04", "2019-11-04T00:00:00+00:00"],
["A date with day first", "04-11-2019", "2019-11-04T00:00:00+00:00"],
["A date with month first", "11-04-2019", "2019-11-04T00:00:00+00:00"],
["A cut label", "2019-11-04NNYSEClose", "2019-11-04NNYSEClose"],
[
"Datetime object with no timezone info",
datetime(year=2019, month=8, day=5, tzinfo=None),
"2019-08-05T00:00:00+00:00",
],
[
"Datetime object with a timezone other than UTC",
pytz.timezone("America/New_York").localize(
datetime(year=2019, month=8, day=5, hour=10, minute=30)
),
"2019-08-05T14:30:00+00:00",
],
[
"ISO format with no timezone info",
"2019-04-11T00:00:00",
"2019-04-11T00:00:00+00:00",
],
[
"ISO format with no timezone info and has milliseconds specified",
"2019-11-20T00:00:00.000000000",
"2019-11-20T00:00:00.000000000+00:00",
],
[
"Already in ISO format with mircoseconds",
"2019-09-01T09:31:22.664000+00:00",
"2019-09-01T09:31:22.664000+00:00",
],
[
"Already in ISO format with mircoseconds Z timezone",
"2019-09-01T09:31:22.664000Z",
"2019-09-01T09:31:22.664000Z",
],
[
"numpy datetime with microseconds",
np.array(["2019-09-01T09:31:22.664"], dtype="datetime64[ns]"),
"2019-09-01T09:31:22.664000Z",
],
[
"pandas datetime with microseconds",
pd.Timestamp("2019-09-01T09:31:22.664"),
"2019-09-01T09:31:22.664000+00:00",
],
[
"numpy datetime64",
np.datetime64("2019-07-02"),
"2019-07-02T00:00:00.000000Z",
],
]
)
def test_dateorcutlabel(self, test_name, datetime_value, expected_outcome):
# There is no handling for month first, it will assume it is day first
ignore = ["A date with month first"]
if test_name in ignore:
self.skipTest("Test not implemented ")
date_or_cut_label = DateOrCutLabel(datetime_value)
self.assertEqual(first=expected_outcome, second=str(date_or_cut_label.data))
@parameterized.expand(
[
[
"YYYY-mm-dd_dashes",
"2019-09-01",
"%Y-%m-%d",
"2019-09-01T00:00:00+00:00",
],
[
"dd/mm/YYYY_ forwardslashes",
"01/09/2019",
"%d/%m/%Y",
"2019-09-01T00:00:00+00:00",
],
[
"YYYY-mm-dd HH:MM:SS_dashes_and_colons",
"2019-09-01 6:30:30",
"%Y-%m-%d %H:%M:%S",
"2019-09-01T06:30:30+00:00",
],
[
"YYYY-mm-dd HH:MM:SS.000001_dashes_colons_and microseconds",
"2019-09-01 6:30:30.005001",
"%Y-%m-%d %H:%M:%S.%f",
"2019-09-01T06:30:30.005001+00:00",
],
[
"YYYY-mm-dd HH:MM:SS.000001_dashes_colons_and microseconds and timezone",
"2019-09-01 6:30:30.005001-10:00",
"%Y-%m-%d %H:%M:%S.%f%z",
"2019-09-01T16:30:30.005001+00:00",
],
]
)
def test_dateorcutlabel_with_custom_format(
self, test_name, datetime_value, custom_format, expected_outcome
):
date_or_cut_label = DateOrCutLabel(datetime_value, custom_format)
self.assertEqual(first=expected_outcome, second=str(date_or_cut_label.data))
| [
"datetime.datetime",
"pytz.timezone",
"os.getenv",
"parameterized.parameterized.expand",
"lusidtools.cocoon.dateorcutlabel.DateOrCutLabel",
"numpy.array",
"numpy.datetime64",
"pandas.Timestamp"
] | [((3750, 4420), 'parameterized.parameterized.expand', 'parameterized.expand', (["[['YYYY-mm-dd_dashes', '2019-09-01', '%Y-%m-%d',\n '2019-09-01T00:00:00+00:00'], ['dd/mm/YYYY_ forwardslashes',\n '01/09/2019', '%d/%m/%Y', '2019-09-01T00:00:00+00:00'], [\n 'YYYY-mm-dd HH:MM:SS_dashes_and_colons', '2019-09-01 6:30:30',\n '%Y-%m-%d %H:%M:%S', '2019-09-01T06:30:30+00:00'], [\n 'YYYY-mm-dd HH:MM:SS.000001_dashes_colons_and microseconds',\n '2019-09-01 6:30:30.005001', '%Y-%m-%d %H:%M:%S.%f',\n '2019-09-01T06:30:30.005001+00:00'], [\n 'YYYY-mm-dd HH:MM:SS.000001_dashes_colons_and microseconds and timezone',\n '2019-09-01 6:30:30.005001-10:00', '%Y-%m-%d %H:%M:%S.%f%z',\n '2019-09-01T16:30:30.005001+00:00']]"], {}), "([['YYYY-mm-dd_dashes', '2019-09-01', '%Y-%m-%d',\n '2019-09-01T00:00:00+00:00'], ['dd/mm/YYYY_ forwardslashes',\n '01/09/2019', '%d/%m/%Y', '2019-09-01T00:00:00+00:00'], [\n 'YYYY-mm-dd HH:MM:SS_dashes_and_colons', '2019-09-01 6:30:30',\n '%Y-%m-%d %H:%M:%S', '2019-09-01T06:30:30+00:00'], [\n 'YYYY-mm-dd HH:MM:SS.000001_dashes_colons_and microseconds',\n '2019-09-01 6:30:30.005001', '%Y-%m-%d %H:%M:%S.%f',\n '2019-09-01T06:30:30.005001+00:00'], [\n 'YYYY-mm-dd HH:MM:SS.000001_dashes_colons_and microseconds and timezone',\n '2019-09-01 6:30:30.005001-10:00', '%Y-%m-%d %H:%M:%S.%f%z',\n '2019-09-01T16:30:30.005001+00:00']])\n", (3770, 4420), False, 'from parameterized import parameterized\n'), ((3627, 3657), 'lusidtools.cocoon.dateorcutlabel.DateOrCutLabel', 'DateOrCutLabel', (['datetime_value'], {}), '(datetime_value)\n', (3641, 3657), False, 'from lusidtools.cocoon.dateorcutlabel import DateOrCutLabel\n'), ((5015, 5060), 'lusidtools.cocoon.dateorcutlabel.DateOrCutLabel', 'DateOrCutLabel', (['datetime_value', 'custom_format'], {}), '(datetime_value, custom_format)\n', (5029, 5060), False, 'from lusidtools.cocoon.dateorcutlabel import DateOrCutLabel\n'), ((383, 417), 'os.getenv', 'os.getenv', (['"""FBN_LOG_LEVEL"""', '"""info"""'], {}), "('FBN_LOG_LEVEL', 'info')\n", (392, 417), False, 'import os\n'), ((1569, 1617), 'datetime.datetime', 'datetime', ([], {'year': '(2019)', 'month': '(8)', 'day': '(5)', 'tzinfo': 'None'}), '(year=2019, month=8, day=5, tzinfo=None)\n', (1577, 1617), False, 'from datetime import datetime\n'), ((2822, 2883), 'numpy.array', 'np.array', (["['2019-09-01T09:31:22.664']"], {'dtype': '"""datetime64[ns]"""'}), "(['2019-09-01T09:31:22.664'], dtype='datetime64[ns]')\n", (2830, 2883), True, 'import numpy as np\n'), ((3030, 3069), 'pandas.Timestamp', 'pd.Timestamp', (['"""2019-09-01T09:31:22.664"""'], {}), "('2019-09-01T09:31:22.664')\n", (3042, 3069), True, 'import pandas as pd\n'), ((3204, 3231), 'numpy.datetime64', 'np.datetime64', (['"""2019-07-02"""'], {}), "('2019-07-02')\n", (3217, 3231), True, 'import numpy as np\n'), ((1839, 1894), 'datetime.datetime', 'datetime', ([], {'year': '(2019)', 'month': '(8)', 'day': '(5)', 'hour': '(10)', 'minute': '(30)'}), '(year=2019, month=8, day=5, hour=10, minute=30)\n', (1847, 1894), False, 'from datetime import datetime\n'), ((1775, 1808), 'pytz.timezone', 'pytz.timezone', (['"""America/New_York"""'], {}), "('America/New_York')\n", (1788, 1808), False, 'import pytz\n')] |
# -*- coding: utf-8 -*-
"""
Unit tests for the statistics module.
:copyright: Copyright 2014-2016 by the Elephant team, see `doc/authors.rst`.
:license: Modified BSD, see LICENSE.txt for details.
"""
from __future__ import division
import itertools
import math
import sys
import unittest
import neo
import numpy as np
import quantities as pq
import scipy.integrate as spint
from numpy.testing import assert_array_almost_equal, assert_array_equal, \
assert_array_less
import elephant.kernels as kernels
from elephant import statistics
from elephant.spike_train_generation import homogeneous_poisson_process
if sys.version_info.major == 2:
import unittest2 as unittest
class isi_TestCase(unittest.TestCase):
def setUp(self):
self.test_array_2d = np.array([[0.3, 0.56, 0.87, 1.23],
[0.02, 0.71, 1.82, 8.46],
[0.03, 0.14, 0.15, 0.92]])
self.targ_array_2d_0 = np.array([[-0.28, 0.15, 0.95, 7.23],
[0.01, -0.57, -1.67, -7.54]])
self.targ_array_2d_1 = np.array([[0.26, 0.31, 0.36],
[0.69, 1.11, 6.64],
[0.11, 0.01, 0.77]])
self.targ_array_2d_default = self.targ_array_2d_1
self.test_array_1d = self.test_array_2d[0, :]
self.targ_array_1d = self.targ_array_2d_1[0, :]
def test_isi_with_spiketrain(self):
st = neo.SpikeTrain(
self.test_array_1d, units='ms', t_stop=10.0, t_start=0.29)
target = pq.Quantity(self.targ_array_1d, 'ms')
res = statistics.isi(st)
assert_array_almost_equal(res, target, decimal=9)
def test_isi_with_quantities_1d(self):
st = pq.Quantity(self.test_array_1d, units='ms')
target = pq.Quantity(self.targ_array_1d, 'ms')
res = statistics.isi(st)
assert_array_almost_equal(res, target, decimal=9)
def test_isi_with_plain_array_1d(self):
st = self.test_array_1d
target = self.targ_array_1d
res = statistics.isi(st)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_isi_with_plain_array_2d_default(self):
st = self.test_array_2d
target = self.targ_array_2d_default
res = statistics.isi(st)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_isi_with_plain_array_2d_0(self):
st = self.test_array_2d
target = self.targ_array_2d_0
res = statistics.isi(st, axis=0)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_isi_with_plain_array_2d_1(self):
st = self.test_array_2d
target = self.targ_array_2d_1
res = statistics.isi(st, axis=1)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_unsorted_array(self):
np.random.seed(0)
array = np.random.rand(100)
with self.assertWarns(UserWarning):
isi = statistics.isi(array)
class isi_cv_TestCase(unittest.TestCase):
def setUp(self):
self.test_array_regular = np.arange(1, 6)
def test_cv_isi_regular_spiketrain_is_zero(self):
st = neo.SpikeTrain(self.test_array_regular, units='ms', t_stop=10.0)
targ = 0.0
res = statistics.cv(statistics.isi(st))
self.assertEqual(res, targ)
def test_cv_isi_regular_array_is_zero(self):
st = self.test_array_regular
targ = 0.0
res = statistics.cv(statistics.isi(st))
self.assertEqual(res, targ)
class mean_firing_rate_TestCase(unittest.TestCase):
def setUp(self):
self.test_array_3d = np.ones([5, 7, 13])
self.test_array_2d = np.array([[0.3, 0.56, 0.87, 1.23],
[0.02, 0.71, 1.82, 8.46],
[0.03, 0.14, 0.15, 0.92]])
self.targ_array_2d_0 = np.array([3, 3, 3, 3])
self.targ_array_2d_1 = np.array([4, 4, 4])
self.targ_array_2d_None = 12
self.targ_array_2d_default = self.targ_array_2d_None
self.max_array_2d_0 = np.array([0.3, 0.71, 1.82, 8.46])
self.max_array_2d_1 = np.array([1.23, 8.46, 0.92])
self.max_array_2d_None = 8.46
self.max_array_2d_default = self.max_array_2d_None
self.test_array_1d = self.test_array_2d[0, :]
self.targ_array_1d = self.targ_array_2d_1[0]
self.max_array_1d = self.max_array_2d_1[0]
def test_invalid_input_spiketrain(self):
# empty spiketrain
self.assertRaises(ValueError, statistics.mean_firing_rate, [])
for st_invalid in (None, 0.1):
self.assertRaises(TypeError, statistics.mean_firing_rate,
st_invalid)
def test_mean_firing_rate_with_spiketrain(self):
st = neo.SpikeTrain(self.test_array_1d, units='ms', t_stop=10.0)
target = pq.Quantity(self.targ_array_1d / 10., '1/ms')
res = statistics.mean_firing_rate(st)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_typical_use_case(self):
np.random.seed(92)
st = homogeneous_poisson_process(rate=100 * pq.Hz, t_stop=100 * pq.s)
rate1 = statistics.mean_firing_rate(st)
rate2 = statistics.mean_firing_rate(st, t_start=st.t_start,
t_stop=st.t_stop)
self.assertEqual(rate1.units, rate2.units)
self.assertAlmostEqual(rate1.item(), rate2.item())
def test_mean_firing_rate_with_spiketrain_set_ends(self):
st = neo.SpikeTrain(self.test_array_1d, units='ms', t_stop=10.0)
target = pq.Quantity(2 / 0.5, '1/ms')
res = statistics.mean_firing_rate(st, t_start=0.4 * pq.ms,
t_stop=0.9 * pq.ms)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_quantities_1d(self):
st = pq.Quantity(self.test_array_1d, units='ms')
target = pq.Quantity(self.targ_array_1d / self.max_array_1d, '1/ms')
res = statistics.mean_firing_rate(st)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_quantities_1d_set_ends(self):
st = pq.Quantity(self.test_array_1d, units='ms')
# t_stop is not a Quantity
self.assertRaises(TypeError, statistics.mean_firing_rate, st,
t_start=400 * pq.us, t_stop=1.)
# t_start is not a Quantity
self.assertRaises(TypeError, statistics.mean_firing_rate, st,
t_start=0.4, t_stop=1. * pq.ms)
def test_mean_firing_rate_with_plain_array_1d(self):
st = self.test_array_1d
target = self.targ_array_1d / self.max_array_1d
res = statistics.mean_firing_rate(st)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_1d_set_ends(self):
st = self.test_array_1d
target = self.targ_array_1d / (1.23 - 0.3)
res = statistics.mean_firing_rate(st, t_start=0.3, t_stop=1.23)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_2d_default(self):
st = self.test_array_2d
target = self.targ_array_2d_default / self.max_array_2d_default
res = statistics.mean_firing_rate(st)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_2d_0(self):
st = self.test_array_2d
target = self.targ_array_2d_0 / self.max_array_2d_0
res = statistics.mean_firing_rate(st, axis=0)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_2d_1(self):
st = self.test_array_2d
target = self.targ_array_2d_1 / self.max_array_2d_1
res = statistics.mean_firing_rate(st, axis=1)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_3d_None(self):
st = self.test_array_3d
target = np.sum(self.test_array_3d, None) / 5.
res = statistics.mean_firing_rate(st, axis=None, t_stop=5.)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_3d_0(self):
st = self.test_array_3d
target = np.sum(self.test_array_3d, 0) / 5.
res = statistics.mean_firing_rate(st, axis=0, t_stop=5.)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_3d_1(self):
st = self.test_array_3d
target = np.sum(self.test_array_3d, 1) / 5.
res = statistics.mean_firing_rate(st, axis=1, t_stop=5.)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_3d_2(self):
st = self.test_array_3d
target = np.sum(self.test_array_3d, 2) / 5.
res = statistics.mean_firing_rate(st, axis=2, t_stop=5.)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_2d_1_set_ends(self):
st = self.test_array_2d
target = np.array([4, 1, 3]) / (1.23 - 0.14)
res = statistics.mean_firing_rate(st, axis=1, t_start=0.14,
t_stop=1.23)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_2d_None(self):
st = self.test_array_2d
target = self.targ_array_2d_None / self.max_array_2d_None
res = statistics.mean_firing_rate(st, axis=None)
assert not isinstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_mean_firing_rate_with_plain_array_and_units_start_stop_typeerror(
self):
st = self.test_array_2d
self.assertRaises(TypeError, statistics.mean_firing_rate, st,
t_start=pq.Quantity(0, 'ms'))
self.assertRaises(TypeError, statistics.mean_firing_rate, st,
t_stop=pq.Quantity(10, 'ms'))
self.assertRaises(TypeError, statistics.mean_firing_rate, st,
t_start=pq.Quantity(0, 'ms'),
t_stop=pq.Quantity(10, 'ms'))
self.assertRaises(TypeError, statistics.mean_firing_rate, st,
t_start=pq.Quantity(0, 'ms'),
t_stop=10.)
self.assertRaises(TypeError, statistics.mean_firing_rate, st,
t_start=0.,
t_stop=pq.Quantity(10, 'ms'))
class FanoFactorTestCase(unittest.TestCase):
def setUp(self):
np.random.seed(100)
num_st = 300
self.test_spiketrains = []
self.test_array = []
self.test_quantity = []
self.test_list = []
self.sp_counts = np.zeros(num_st)
for i in range(num_st):
r = np.random.rand(np.random.randint(20) + 1)
st = neo.core.SpikeTrain(r * pq.ms,
t_start=0.0 * pq.ms,
t_stop=20.0 * pq.ms)
self.test_spiketrains.append(st)
self.test_array.append(r)
self.test_quantity.append(r * pq.ms)
self.test_list.append(list(r))
# for cross-validation
self.sp_counts[i] = len(st)
def test_fanofactor_spiketrains(self):
# Test with list of spiketrains
self.assertEqual(
np.var(self.sp_counts) / np.mean(self.sp_counts),
statistics.fanofactor(self.test_spiketrains))
# One spiketrain in list
st = self.test_spiketrains[0]
self.assertEqual(statistics.fanofactor([st]), 0.0)
def test_fanofactor_empty(self):
# Test with empty list
self.assertTrue(np.isnan(statistics.fanofactor([])))
self.assertTrue(np.isnan(statistics.fanofactor([[]])))
# Test with empty quantity
self.assertTrue(np.isnan(statistics.fanofactor([] * pq.ms)))
# Empty spiketrain
st = neo.core.SpikeTrain([] * pq.ms, t_start=0 * pq.ms,
t_stop=1.5 * pq.ms)
self.assertTrue(np.isnan(statistics.fanofactor(st)))
def test_fanofactor_spiketrains_same(self):
# Test with same spiketrains in list
sts = [self.test_spiketrains[0]] * 3
self.assertEqual(statistics.fanofactor(sts), 0.0)
def test_fanofactor_array(self):
self.assertEqual(statistics.fanofactor(self.test_array),
np.var(self.sp_counts) / np.mean(self.sp_counts))
def test_fanofactor_array_same(self):
lst = [self.test_array[0]] * 3
self.assertEqual(statistics.fanofactor(lst), 0.0)
def test_fanofactor_quantity(self):
self.assertEqual(statistics.fanofactor(self.test_quantity),
np.var(self.sp_counts) / np.mean(self.sp_counts))
def test_fanofactor_quantity_same(self):
lst = [self.test_quantity[0]] * 3
self.assertEqual(statistics.fanofactor(lst), 0.0)
def test_fanofactor_list(self):
self.assertEqual(statistics.fanofactor(self.test_list),
np.var(self.sp_counts) / np.mean(self.sp_counts))
def test_fanofactor_list_same(self):
lst = [self.test_list[0]] * 3
self.assertEqual(statistics.fanofactor(lst), 0.0)
def test_fanofactor_different_durations(self):
st1 = neo.SpikeTrain([1, 2, 3] * pq.s, t_stop=4 * pq.s)
st2 = neo.SpikeTrain([1, 2, 3] * pq.s, t_stop=4.5 * pq.s)
self.assertWarns(UserWarning, statistics.fanofactor, (st1, st2))
def test_fanofactor_wrong_type(self):
# warn_tolerance is not a quantity
st1 = neo.SpikeTrain([1, 2, 3] * pq.s, t_stop=4 * pq.s)
self.assertRaises(TypeError, statistics.fanofactor, [st1],
warn_tolerance=1e-4)
class LVTestCase(unittest.TestCase):
def setUp(self):
self.test_seq = [1, 28, 4, 47, 5, 16, 2, 5, 21, 12,
4, 12, 59, 2, 4, 18, 33, 25, 2, 34,
4, 1, 1, 14, 8, 1, 10, 1, 8, 20,
5, 1, 6, 5, 12, 2, 8, 8, 2, 8,
2, 10, 2, 1, 1, 2, 15, 3, 20, 6,
11, 6, 18, 2, 5, 17, 4, 3, 13, 6,
1, 18, 1, 16, 12, 2, 52, 2, 5, 7,
6, 25, 6, 5, 3, 15, 4, 3, 16, 3,
6, 5, 24, 21, 3, 3, 4, 8, 4, 11,
5, 7, 5, 6, 8, 11, 33, 10, 7, 4]
self.target = 0.971826029994
def test_lv_with_quantities(self):
seq = pq.Quantity(self.test_seq, units='ms')
assert_array_almost_equal(statistics.lv(seq), self.target, decimal=9)
def test_lv_with_plain_array(self):
seq = np.array(self.test_seq)
assert_array_almost_equal(statistics.lv(seq), self.target, decimal=9)
def test_lv_with_list(self):
seq = self.test_seq
assert_array_almost_equal(statistics.lv(seq), self.target, decimal=9)
def test_lv_raise_error(self):
seq = self.test_seq
self.assertRaises(ValueError, statistics.lv, [])
self.assertRaises(ValueError, statistics.lv, 1)
self.assertRaises(ValueError, statistics.lv, np.array([seq, seq]))
def test_2short_spike_train(self):
seq = [1]
with self.assertWarns(UserWarning):
"""
Catches UserWarning: Input size is too small. Please provide
an input with more than 1 entry.
"""
self.assertTrue(math.isnan(statistics.lv(seq, with_nan=True)))
class LVRTestCase(unittest.TestCase):
def setUp(self):
self.test_seq = [1, 28, 4, 47, 5, 16, 2, 5, 21, 12,
4, 12, 59, 2, 4, 18, 33, 25, 2, 34,
4, 1, 1, 14, 8, 1, 10, 1, 8, 20,
5, 1, 6, 5, 12, 2, 8, 8, 2, 8,
2, 10, 2, 1, 1, 2, 15, 3, 20, 6,
11, 6, 18, 2, 5, 17, 4, 3, 13, 6,
1, 18, 1, 16, 12, 2, 52, 2, 5, 7,
6, 25, 6, 5, 3, 15, 4, 3, 16, 3,
6, 5, 24, 21, 3, 3, 4, 8, 4, 11,
5, 7, 5, 6, 8, 11, 33, 10, 7, 4]
self.target = 2.1845363464753134
def test_lvr_with_quantities(self):
seq = pq.Quantity(self.test_seq, units='ms')
assert_array_almost_equal(statistics.lvr(seq), self.target, decimal=9)
def test_lvr_with_plain_array(self):
seq = np.array(self.test_seq)
assert_array_almost_equal(statistics.lvr(seq), self.target, decimal=9)
def test_lvr_with_list(self):
seq = self.test_seq
assert_array_almost_equal(statistics.lvr(seq), self.target, decimal=9)
def test_lvr_raise_error(self):
seq = self.test_seq
self.assertRaises(ValueError, statistics.lvr, [])
self.assertRaises(ValueError, statistics.lvr, 1)
self.assertRaises(ValueError, statistics.lvr, np.array([seq, seq]))
self.assertRaises(ValueError, statistics.lvr, seq, -1 * pq.ms)
def test_lvr_refractoriness_kwarg(self):
seq = np.array(self.test_seq)
with self.assertWarns(UserWarning):
assert_array_almost_equal(statistics.lvr(seq, R=5),
self.target, decimal=9)
def test_2short_spike_train(self):
seq = [1]
with self.assertWarns(UserWarning):
"""
Catches UserWarning: Input size is too small. Please provide
an input with more than 1 entry.
"""
self.assertTrue(math.isnan(statistics.lvr(seq, with_nan=True)))
class CV2TestCase(unittest.TestCase):
def setUp(self):
self.test_seq = [1, 28, 4, 47, 5, 16, 2, 5, 21, 12,
4, 12, 59, 2, 4, 18, 33, 25, 2, 34,
4, 1, 1, 14, 8, 1, 10, 1, 8, 20,
5, 1, 6, 5, 12, 2, 8, 8, 2, 8,
2, 10, 2, 1, 1, 2, 15, 3, 20, 6,
11, 6, 18, 2, 5, 17, 4, 3, 13, 6,
1, 18, 1, 16, 12, 2, 52, 2, 5, 7,
6, 25, 6, 5, 3, 15, 4, 3, 16, 3,
6, 5, 24, 21, 3, 3, 4, 8, 4, 11,
5, 7, 5, 6, 8, 11, 33, 10, 7, 4]
self.target = 1.0022235296529176
def test_cv2_with_quantities(self):
seq = pq.Quantity(self.test_seq, units='ms')
assert_array_almost_equal(statistics.cv2(seq), self.target, decimal=9)
def test_cv2_with_plain_array(self):
seq = np.array(self.test_seq)
assert_array_almost_equal(statistics.cv2(seq), self.target, decimal=9)
def test_cv2_with_list(self):
seq = self.test_seq
assert_array_almost_equal(statistics.cv2(seq), self.target, decimal=9)
def test_cv2_raise_error(self):
seq = self.test_seq
self.assertRaises(ValueError, statistics.cv2, [])
self.assertRaises(ValueError, statistics.cv2, 1)
self.assertRaises(ValueError, statistics.cv2, np.array([seq, seq]))
class InstantaneousRateTest(unittest.TestCase):
def setUp(self):
# create a poisson spike train:
self.st_tr = (0, 20.0) # seconds
self.st_dur = self.st_tr[1] - self.st_tr[0] # seconds
self.st_margin = 5.0 # seconds
self.st_rate = 10.0 # Hertz
np.random.seed(19)
duration_effective = self.st_dur - 2 * self.st_margin
st_num_spikes = np.random.poisson(self.st_rate * duration_effective)
spike_train = sorted(
np.random.rand(st_num_spikes) *
duration_effective +
self.st_margin)
# convert spike train into neo objects
self.spike_train = neo.SpikeTrain(spike_train * pq.s,
t_start=self.st_tr[0] * pq.s,
t_stop=self.st_tr[1] * pq.s)
# generation of a multiply used specific kernel
self.kernel = kernels.TriangularKernel(sigma=0.03 * pq.s)
def test_instantaneous_rate_and_warnings(self):
st = self.spike_train
sampling_period = 0.01 * pq.s
with self.assertWarns(UserWarning):
# Catches warning: The width of the kernel was adjusted to a
# minimally allowed width.
inst_rate = statistics.instantaneous_rate(
st, sampling_period, self.kernel, cutoff=0)
self.assertIsInstance(inst_rate, neo.core.AnalogSignal)
self.assertEqual(
inst_rate.sampling_period.simplified, sampling_period.simplified)
self.assertEqual(inst_rate.simplified.units, pq.Hz)
self.assertEqual(inst_rate.t_stop.simplified, st.t_stop.simplified)
self.assertEqual(inst_rate.t_start.simplified, st.t_start.simplified)
def test_error_instantaneous_rate(self):
self.assertRaises(
TypeError, statistics.instantaneous_rate,
spiketrains=[1, 2, 3] * pq.s,
sampling_period=0.01 * pq.ms, kernel=self.kernel)
self.assertRaises(
TypeError, statistics.instantaneous_rate, spiketrains=[1, 2, 3],
sampling_period=0.01 * pq.ms, kernel=self.kernel)
st = self.spike_train
self.assertRaises(
TypeError, statistics.instantaneous_rate, spiketrains=st,
sampling_period=0.01, kernel=self.kernel)
self.assertRaises(
ValueError, statistics.instantaneous_rate, spiketrains=st,
sampling_period=-0.01 * pq.ms, kernel=self.kernel)
self.assertRaises(
TypeError, statistics.instantaneous_rate, spiketrains=st,
sampling_period=0.01 * pq.ms, kernel='NONE')
self.assertRaises(TypeError, statistics.instantaneous_rate,
self.spike_train,
sampling_period=0.01 * pq.s, kernel='wrong_string',
t_start=self.st_tr[0] * pq.s,
t_stop=self.st_tr[1] * pq.s,
trim=False)
self.assertRaises(
TypeError, statistics.instantaneous_rate, spiketrains=st,
sampling_period=0.01 * pq.ms, kernel=self.kernel,
cutoff=20 * pq.ms)
self.assertRaises(
TypeError, statistics.instantaneous_rate, spiketrains=st,
sampling_period=0.01 * pq.ms, kernel=self.kernel, t_start=2)
self.assertRaises(
TypeError, statistics.instantaneous_rate, spiketrains=st,
sampling_period=0.01 * pq.ms, kernel=self.kernel,
t_stop=20 * pq.mV)
self.assertRaises(
TypeError, statistics.instantaneous_rate, spiketrains=st,
sampling_period=0.01 * pq.ms, kernel=self.kernel, trim=1)
# cannot estimate a kernel for a list of spiketrains
self.assertRaises(ValueError, statistics.instantaneous_rate,
spiketrains=[st, st], sampling_period=10 * pq.ms,
kernel='auto')
def test_rate_estimation_consistency(self):
"""
Test, whether the integral of the rate estimation curve is (almost)
equal to the number of spikes of the spike train.
"""
kernel_types = tuple(
kern_cls for kern_cls in kernels.__dict__.values()
if isinstance(kern_cls, type) and
issubclass(kern_cls, kernels.Kernel) and
kern_cls is not kernels.Kernel and
kern_cls is not kernels.SymmetricKernel)
kernels_available = [kern_cls(sigma=0.5 * pq.s, invert=False)
for kern_cls in kernel_types]
kernels_available.append('auto')
kernel_resolution = 0.01 * pq.s
for kernel in kernels_available:
for center_kernel in (False, True):
rate_estimate = statistics.instantaneous_rate(
self.spike_train,
sampling_period=kernel_resolution,
kernel=kernel,
t_start=self.st_tr[0] * pq.s,
t_stop=self.st_tr[1] * pq.s,
trim=False,
center_kernel=center_kernel)
num_spikes = len(self.spike_train)
auc = spint.cumtrapz(
y=rate_estimate.magnitude.squeeze(),
x=rate_estimate.times.simplified.magnitude)[-1]
self.assertAlmostEqual(num_spikes, auc,
delta=0.01 * num_spikes)
def test_not_center_kernel(self):
# issue 107
t_spike = 1 * pq.s
st = neo.SpikeTrain([t_spike], t_start=0 * pq.s, t_stop=2 * pq.s,
units=pq.s)
kernel = kernels.AlphaKernel(200 * pq.ms)
fs = 0.1 * pq.ms
rate = statistics.instantaneous_rate(st,
sampling_period=fs,
kernel=kernel,
center_kernel=False)
rate_nonzero_index = np.nonzero(rate > 1e-6)[0]
# where the mass is concentrated
rate_mass = rate.times.rescale(t_spike.units)[rate_nonzero_index]
all_after_response_onset = (rate_mass >= t_spike).all()
self.assertTrue(all_after_response_onset)
def test_regression_288(self):
np.random.seed(9)
sampling_period = 200 * pq.ms
spiketrain = homogeneous_poisson_process(10 * pq.Hz,
t_start=0 * pq.s,
t_stop=10 * pq.s)
kernel = kernels.AlphaKernel(sigma=5 * pq.ms, invert=True)
# check that instantaneous_rate "works" for kernels with small sigma
# without triggering an incomprehensible error
rate = statistics.instantaneous_rate(spiketrain,
sampling_period=sampling_period,
kernel=kernel)
self.assertEqual(
len(rate), (spiketrain.t_stop / sampling_period).simplified.item())
def test_small_kernel_sigma(self):
# Test that the instantaneous rate is overestimated when
# kernel.sigma << sampling_period and center_kernel is True.
# The setup is set to match the issue 288.
np.random.seed(9)
sampling_period = 200 * pq.ms
sigma = 5 * pq.ms
rate_expected = 10 * pq.Hz
spiketrain = homogeneous_poisson_process(rate_expected,
t_start=0 * pq.s,
t_stop=10 * pq.s)
kernel_types = tuple(
kern_cls for kern_cls in kernels.__dict__.values()
if isinstance(kern_cls, type) and
issubclass(kern_cls, kernels.Kernel) and
kern_cls is not kernels.Kernel and
kern_cls is not kernels.SymmetricKernel)
for kern_cls, invert in itertools.product(kernel_types, (False, True)):
kernel = kern_cls(sigma=sigma, invert=invert)
with self.subTest(kernel=kernel):
rate = statistics.instantaneous_rate(
spiketrain,
sampling_period=sampling_period,
kernel=kernel, center_kernel=True)
self.assertGreater(rate.mean(), rate_expected)
def test_spikes_on_edges(self):
# this test demonstrates that the trimming (convolve valid mode)
# removes the edge spikes, underestimating the true firing rate and
# thus is not able to reconstruct the number of spikes in a
# spiketrain (see test_rate_estimation_consistency)
cutoff = 5
sampling_period = 0.01 * pq.s
t_spikes = np.array([-cutoff, cutoff]) * pq.s
spiketrain = neo.SpikeTrain(t_spikes, t_start=t_spikes[0],
t_stop=t_spikes[-1])
kernel_types = tuple(
kern_cls for kern_cls in kernels.__dict__.values()
if isinstance(kern_cls, type) and
issubclass(kern_cls, kernels.Kernel) and
kern_cls is not kernels.Kernel and
kern_cls is not kernels.SymmetricKernel)
kernels_available = [kern_cls(sigma=1 * pq.s, invert=False)
for kern_cls in kernel_types]
for kernel in kernels_available:
for center_kernel in (False, True):
rate = statistics.instantaneous_rate(
spiketrain,
sampling_period=sampling_period,
kernel=kernel,
cutoff=cutoff, trim=True,
center_kernel=center_kernel)
assert_array_almost_equal(rate.magnitude, 0, decimal=3)
def test_trim_as_convolve_mode(self):
cutoff = 5
sampling_period = 0.01 * pq.s
t_spikes = np.linspace(-cutoff, cutoff, num=(2 * cutoff + 1)) * pq.s
spiketrain = neo.SpikeTrain(t_spikes, t_start=t_spikes[0],
t_stop=t_spikes[-1])
kernel = kernels.RectangularKernel(sigma=1 * pq.s)
assert cutoff > kernel.min_cutoff, "Choose larger cutoff"
kernel_types = tuple(
kern_cls for kern_cls in kernels.__dict__.values()
if isinstance(kern_cls, type) and
issubclass(kern_cls, kernels.SymmetricKernel) and
kern_cls is not kernels.SymmetricKernel)
kernels_symmetric = [kern_cls(sigma=1 * pq.s, invert=False)
for kern_cls in kernel_types]
for kernel in kernels_symmetric:
for trim in (False, True):
rate_centered = statistics.instantaneous_rate(
spiketrain, sampling_period=sampling_period,
kernel=kernel, cutoff=cutoff, trim=trim)
rate_convolve = statistics.instantaneous_rate(
spiketrain, sampling_period=sampling_period,
kernel=kernel, cutoff=cutoff, trim=trim,
center_kernel=False)
assert_array_almost_equal(rate_centered, rate_convolve)
def test_instantaneous_rate_spiketrainlist(self):
np.random.seed(19)
duration_effective = self.st_dur - 2 * self.st_margin
st_num_spikes = np.random.poisson(self.st_rate * duration_effective)
spike_train2 = sorted(
np.random.rand(st_num_spikes) *
duration_effective +
self.st_margin)
spike_train2 = neo.SpikeTrain(spike_train2 * pq.s,
t_start=self.st_tr[0] * pq.s,
t_stop=self.st_tr[1] * pq.s)
st_rate_1 = statistics.instantaneous_rate(self.spike_train,
sampling_period=0.01 * pq.s,
kernel=self.kernel)
st_rate_2 = statistics.instantaneous_rate(spike_train2,
sampling_period=0.01 * pq.s,
kernel=self.kernel)
combined_rate = statistics.instantaneous_rate(
[self.spike_train, spike_train2],
sampling_period=0.01 * pq.s,
kernel=self.kernel)
rate_concat = np.c_[st_rate_1, st_rate_2]
# 'time_vector.dtype' in instantaneous_rate() is changed from float64
# to float32 which results in 3e-6 abs difference
assert_array_almost_equal(combined_rate.magnitude,
rate_concat.magnitude, decimal=5)
# Regression test for #144
def test_instantaneous_rate_regression_144(self):
# The following spike train contains spikes that are so close to each
# other, that the optimal kernel cannot be detected. Therefore, the
# function should react with a ValueError.
st = neo.SpikeTrain([2.12, 2.13, 2.15] * pq.s, t_stop=10 * pq.s)
self.assertRaises(ValueError, statistics.instantaneous_rate, st,
1 * pq.ms)
# Regression test for #245
def test_instantaneous_rate_regression_245(self):
# This test makes sure that the correct kernel width is chosen when
# selecting 'auto' as kernel
spiketrain = neo.SpikeTrain(
range(1, 30) * pq.ms, t_start=0 * pq.ms, t_stop=30 * pq.ms)
# This is the correct procedure to attain the kernel: first, the result
# of sskernel retrieves the kernel bandwidth of an optimal Gaussian
# kernel in terms of its standard deviation sigma, then uses this value
# directly in the function for creating the Gaussian kernel
kernel_width_sigma = statistics.optimal_kernel_bandwidth(
spiketrain.magnitude, times=None, bootstrap=False)['optw']
kernel = kernels.GaussianKernel(kernel_width_sigma * spiketrain.units)
result_target = statistics.instantaneous_rate(
spiketrain, 10 * pq.ms, kernel=kernel)
# Here, we check if the 'auto' argument leads to the same operation. In
# the regression, it was incorrectly assumed that the sskernel()
# function returns the actual bandwidth of the kernel, which is defined
# as approximately bandwidth = sigma * 5.5 = sigma * (2 * 2.75).
# factor 2.0 connects kernel width with its half width,
# factor 2.7 connects half width of Gaussian distribution with
# 99% probability mass with its standard deviation.
result_automatic = statistics.instantaneous_rate(
spiketrain, 10 * pq.ms, kernel='auto')
assert_array_almost_equal(result_target, result_automatic)
def test_instantaneous_rate_grows_with_sampling_period(self):
np.random.seed(0)
rate_expected = 10 * pq.Hz
spiketrain = homogeneous_poisson_process(rate=rate_expected,
t_stop=10 * pq.s)
kernel = kernels.GaussianKernel(sigma=100 * pq.ms)
rates_mean = []
for sampling_period in np.linspace(1, 1000, num=10) * pq.ms:
with self.subTest(sampling_period=sampling_period):
rate = statistics.instantaneous_rate(
spiketrain,
sampling_period=sampling_period,
kernel=kernel)
rates_mean.append(rate.mean())
# rate means are greater or equal the expected rate
assert_array_less(rate_expected, rates_mean)
# check sorted
self.assertTrue(np.all(rates_mean[:-1] < rates_mean[1:]))
# Regression test for #360
def test_centered_at_origin(self):
# Skip RectangularKernel because it doesn't have a strong peak.
kernel_types = tuple(
kern_cls for kern_cls in kernels.__dict__.values()
if isinstance(kern_cls, type) and
issubclass(kern_cls, kernels.SymmetricKernel) and
kern_cls not in (kernels.SymmetricKernel,
kernels.RectangularKernel))
kernels_symmetric = [kern_cls(sigma=50 * pq.ms, invert=False)
for kern_cls in kernel_types]
# first part: a symmetric spiketrain with a symmetric kernel
spiketrain = neo.SpikeTrain(np.array([-0.0001, 0, 0.0001]) * pq.s,
t_start=-1,
t_stop=1)
for kernel in kernels_symmetric:
rate = statistics.instantaneous_rate(spiketrain,
sampling_period=20 * pq.ms,
kernel=kernel)
# the peak time must be centered at origin
self.assertEqual(rate.times[np.argmax(rate)], 0)
# second part: a single spike at t=0
periods = [2 ** c for c in range(-3, 6)]
for period in periods:
with self.subTest(period=period):
spiketrain = neo.SpikeTrain(np.array([0]) * pq.s,
t_start=-period * 10 * pq.ms,
t_stop=period * 10 * pq.ms)
for kernel in kernels_symmetric:
rate = statistics.instantaneous_rate(
spiketrain,
sampling_period=period * pq.ms,
kernel=kernel)
self.assertEqual(rate.times[np.argmax(rate)], 0)
def test_annotations(self):
spiketrain = neo.SpikeTrain([1, 2], t_stop=2 * pq.s, units=pq.s)
kernel = kernels.AlphaKernel(sigma=100 * pq.ms)
rate = statistics.instantaneous_rate(spiketrain,
sampling_period=10 * pq.ms,
kernel=kernel)
kernel_annotation = dict(type=type(kernel).__name__,
sigma=str(kernel.sigma),
invert=kernel.invert)
self.assertIn('kernel', rate.annotations)
self.assertEqual(rate.annotations['kernel'], kernel_annotation)
class TimeHistogramTestCase(unittest.TestCase):
def setUp(self):
self.spiketrain_a = neo.SpikeTrain(
[0.5, 0.7, 1.2, 3.1, 4.3, 5.5, 6.7] * pq.s, t_stop=10.0 * pq.s)
self.spiketrain_b = neo.SpikeTrain(
[0.1, 0.7, 1.2, 2.2, 4.3, 5.5, 8.0] * pq.s, t_stop=10.0 * pq.s)
self.spiketrains = [self.spiketrain_a, self.spiketrain_b]
def tearDown(self):
del self.spiketrain_a
self.spiketrain_a = None
del self.spiketrain_b
self.spiketrain_b = None
def test_time_histogram(self):
targ = np.array([4, 2, 1, 1, 2, 2, 1, 0, 1, 0])
histogram = statistics.time_histogram(self.spiketrains, bin_size=pq.s)
assert_array_equal(targ, histogram.magnitude[:, 0])
def test_time_histogram_binary(self):
targ = np.array([2, 2, 1, 1, 2, 2, 1, 0, 1, 0])
histogram = statistics.time_histogram(self.spiketrains, bin_size=pq.s,
binary=True)
assert_array_equal(targ, histogram.magnitude[:, 0])
def test_time_histogram_tstart_tstop(self):
# Start, stop short range
targ = np.array([2, 1])
histogram = statistics.time_histogram(self.spiketrains, bin_size=pq.s,
t_start=5 * pq.s,
t_stop=7 * pq.s)
assert_array_equal(targ, histogram.magnitude[:, 0])
# Test without t_stop
targ = np.array([4, 2, 1, 1, 2, 2, 1, 0, 1, 0])
histogram = statistics.time_histogram(self.spiketrains,
bin_size=1 * pq.s,
t_start=0 * pq.s)
assert_array_equal(targ, histogram.magnitude[:, 0])
# Test without t_start
histogram = statistics.time_histogram(self.spiketrains,
bin_size=1 * pq.s,
t_stop=10 * pq.s)
assert_array_equal(targ, histogram.magnitude[:, 0])
def test_time_histogram_output(self):
# Normalization mean
histogram = statistics.time_histogram(self.spiketrains, bin_size=pq.s,
output='mean')
targ = np.array([4, 2, 1, 1, 2, 2, 1, 0, 1, 0], dtype=float) / 2
assert_array_equal(targ.reshape(targ.size, 1), histogram.magnitude)
# Normalization rate
histogram = statistics.time_histogram(self.spiketrains, bin_size=pq.s,
output='rate')
assert_array_equal(histogram.view(pq.Quantity),
targ.reshape(targ.size, 1) * 1 / pq.s)
# Normalization unspecified, raises error
self.assertRaises(ValueError, statistics.time_histogram,
self.spiketrains,
bin_size=pq.s, output=' ')
def test_annotations(self):
np.random.seed(1)
spiketrains = [homogeneous_poisson_process(
rate=10 * pq.Hz, t_stop=10 * pq.s) for _ in range(10)]
for output in ("counts", "mean", "rate"):
histogram = statistics.time_histogram(spiketrains,
bin_size=3 * pq.ms,
output=output)
self.assertIn('normalization', histogram.annotations)
self.assertEqual(histogram.annotations['normalization'], output)
class ComplexityPdfTestCase(unittest.TestCase):
def setUp(self):
self.spiketrain_a = neo.SpikeTrain(
[0.5, 0.7, 1.2, 2.3, 4.3, 5.5, 6.7] * pq.s, t_stop=10.0 * pq.s)
self.spiketrain_b = neo.SpikeTrain(
[0.5, 0.7, 1.2, 2.3, 4.3, 5.5, 8.0] * pq.s, t_stop=10.0 * pq.s)
self.spiketrain_c = neo.SpikeTrain(
[0.5, 0.7, 1.2, 2.3, 4.3, 5.5, 8.0] * pq.s, t_stop=10.0 * pq.s)
self.spiketrains = [
self.spiketrain_a, self.spiketrain_b, self.spiketrain_c]
def tearDown(self):
del self.spiketrain_a
self.spiketrain_a = None
del self.spiketrain_b
self.spiketrain_b = None
def test_complexity_pdf(self):
targ = np.array([0.92, 0.01, 0.01, 0.06])
complexity = statistics.complexity_pdf(self.spiketrains,
bin_size=0.1 * pq.s)
assert_array_equal(targ, complexity.magnitude[:, 0])
self.assertEqual(1, complexity.magnitude[:, 0].sum())
self.assertEqual(len(self.spiketrains) + 1, len(complexity))
self.assertIsInstance(complexity, neo.AnalogSignal)
self.assertEqual(complexity.units, 1 * pq.dimensionless)
if __name__ == '__main__':
unittest.main()
| [
"numpy.random.rand",
"elephant.kernels.GaussianKernel",
"elephant.statistics.fanofactor",
"elephant.statistics.lvr",
"elephant.kernels.__dict__.values",
"quantities.Quantity",
"elephant.kernels.TriangularKernel",
"numpy.array",
"elephant.statistics.isi",
"neo.core.SpikeTrain",
"numpy.arange",
... | [((42616, 42631), 'unittest2.main', 'unittest.main', ([], {}), '()\n', (42629, 42631), True, 'import unittest2 as unittest\n'), ((771, 863), 'numpy.array', 'np.array', (['[[0.3, 0.56, 0.87, 1.23], [0.02, 0.71, 1.82, 8.46], [0.03, 0.14, 0.15, 0.92]]'], {}), '([[0.3, 0.56, 0.87, 1.23], [0.02, 0.71, 1.82, 8.46], [0.03, 0.14, \n 0.15, 0.92]])\n', (779, 863), True, 'import numpy as np\n'), ((968, 1034), 'numpy.array', 'np.array', (['[[-0.28, 0.15, 0.95, 7.23], [0.01, -0.57, -1.67, -7.54]]'], {}), '([[-0.28, 0.15, 0.95, 7.23], [0.01, -0.57, -1.67, -7.54]])\n', (976, 1034), True, 'import numpy as np\n'), ((1107, 1177), 'numpy.array', 'np.array', (['[[0.26, 0.31, 0.36], [0.69, 1.11, 6.64], [0.11, 0.01, 0.77]]'], {}), '([[0.26, 0.31, 0.36], [0.69, 1.11, 6.64], [0.11, 0.01, 0.77]])\n', (1115, 1177), True, 'import numpy as np\n'), ((1483, 1556), 'neo.SpikeTrain', 'neo.SpikeTrain', (['self.test_array_1d'], {'units': '"""ms"""', 't_stop': '(10.0)', 't_start': '(0.29)'}), "(self.test_array_1d, units='ms', t_stop=10.0, t_start=0.29)\n", (1497, 1556), False, 'import neo\n'), ((1587, 1624), 'quantities.Quantity', 'pq.Quantity', (['self.targ_array_1d', '"""ms"""'], {}), "(self.targ_array_1d, 'ms')\n", (1598, 1624), True, 'import quantities as pq\n'), ((1639, 1657), 'elephant.statistics.isi', 'statistics.isi', (['st'], {}), '(st)\n', (1653, 1657), False, 'from elephant import statistics\n'), ((1666, 1715), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['res', 'target'], {'decimal': '(9)'}), '(res, target, decimal=9)\n', (1691, 1715), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_array_less\n'), ((1773, 1816), 'quantities.Quantity', 'pq.Quantity', (['self.test_array_1d'], {'units': '"""ms"""'}), "(self.test_array_1d, units='ms')\n", (1784, 1816), True, 'import quantities as pq\n'), ((1834, 1871), 'quantities.Quantity', 'pq.Quantity', (['self.targ_array_1d', '"""ms"""'], {}), "(self.targ_array_1d, 'ms')\n", (1845, 1871), True, 'import quantities as pq\n'), ((1886, 1904), 'elephant.statistics.isi', 'statistics.isi', (['st'], {}), '(st)\n', (1900, 1904), False, 'from elephant import statistics\n'), ((1913, 1962), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['res', 'target'], {'decimal': '(9)'}), '(res, target, decimal=9)\n', (1938, 1962), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_array_less\n'), ((2090, 2108), 'elephant.statistics.isi', 'statistics.isi', (['st'], {}), '(st)\n', (2104, 2108), False, 'from elephant import statistics\n'), ((2165, 2214), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['res', 'target'], {'decimal': '(9)'}), '(res, target, decimal=9)\n', (2190, 2214), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_array_less\n'), ((2358, 2376), 'elephant.statistics.isi', 'statistics.isi', (['st'], {}), '(st)\n', (2372, 2376), False, 'from elephant import statistics\n'), ((2433, 2482), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['res', 'target'], {'decimal': '(9)'}), '(res, target, decimal=9)\n', (2458, 2482), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_array_less\n'), ((2614, 2640), 'elephant.statistics.isi', 'statistics.isi', (['st'], {'axis': '(0)'}), '(st, axis=0)\n', (2628, 2640), False, 'from elephant import statistics\n'), ((2697, 2746), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['res', 'target'], {'decimal': '(9)'}), '(res, target, decimal=9)\n', (2722, 2746), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_array_less\n'), ((2878, 2904), 'elephant.statistics.isi', 'statistics.isi', (['st'], {'axis': '(1)'}), '(st, axis=1)\n', (2892, 2904), False, 'from elephant import statistics\n'), ((2961, 3010), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['res', 'target'], {'decimal': '(9)'}), '(res, target, decimal=9)\n', (2986, 3010), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_array_less\n'), ((3055, 3072), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (3069, 3072), True, 'import numpy as np\n'), ((3089, 3108), 'numpy.random.rand', 'np.random.rand', (['(100)'], {}), '(100)\n', (3103, 3108), True, 'import numpy as np\n'), ((3292, 3307), 'numpy.arange', 'np.arange', (['(1)', '(6)'], {}), '(1, 6)\n', (3301, 3307), True, 'import numpy as np\n'), ((3376, 3440), 'neo.SpikeTrain', 'neo.SpikeTrain', (['self.test_array_regular'], {'units': '"""ms"""', 't_stop': '(10.0)'}), "(self.test_array_regular, units='ms', t_stop=10.0)\n", (3390, 3440), False, 'import neo\n'), ((3838, 3857), 'numpy.ones', 'np.ones', (['[5, 7, 13]'], {}), '([5, 7, 13])\n', (3845, 3857), True, 'import numpy as np\n'), ((3887, 3979), 'numpy.array', 'np.array', (['[[0.3, 0.56, 0.87, 1.23], [0.02, 0.71, 1.82, 8.46], [0.03, 0.14, 0.15, 0.92]]'], {}), '([[0.3, 0.56, 0.87, 1.23], [0.02, 0.71, 1.82, 8.46], [0.03, 0.14, \n 0.15, 0.92]])\n', (3895, 3979), True, 'import numpy as np\n'), ((4085, 4107), 'numpy.array', 'np.array', (['[3, 3, 3, 3]'], {}), '([3, 3, 3, 3])\n', (4093, 4107), True, 'import numpy as np\n'), ((4139, 4158), 'numpy.array', 'np.array', (['[4, 4, 4]'], {}), '([4, 4, 4])\n', (4147, 4158), True, 'import numpy as np\n'), ((4288, 4321), 'numpy.array', 'np.array', (['[0.3, 0.71, 1.82, 8.46]'], {}), '([0.3, 0.71, 1.82, 8.46])\n', (4296, 4321), True, 'import numpy as np\n'), ((4352, 4380), 'numpy.array', 'np.array', (['[1.23, 8.46, 0.92]'], {}), '([1.23, 8.46, 0.92])\n', (4360, 4380), True, 'import numpy as np\n'), ((4999, 5058), 'neo.SpikeTrain', 'neo.SpikeTrain', (['self.test_array_1d'], {'units': '"""ms"""', 't_stop': '(10.0)'}), "(self.test_array_1d, units='ms', t_stop=10.0)\n", (5013, 5058), False, 'import neo\n'), ((5076, 5122), 'quantities.Quantity', 'pq.Quantity', (['(self.targ_array_1d / 10.0)', '"""1/ms"""'], {}), "(self.targ_array_1d / 10.0, '1/ms')\n", (5087, 5122), True, 'import quantities as pq\n'), ((5136, 5167), 'elephant.statistics.mean_firing_rate', 'statistics.mean_firing_rate', (['st'], {}), '(st)\n', (5163, 5167), False, 'from elephant import statistics\n'), ((5176, 5225), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['res', 'target'], {'decimal': '(9)'}), '(res, target, decimal=9)\n', (5201, 5225), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_array_less\n'), ((5289, 5307), 'numpy.random.seed', 'np.random.seed', (['(92)'], {}), '(92)\n', (5303, 5307), True, 'import numpy as np\n'), ((5321, 5385), 'elephant.spike_train_generation.homogeneous_poisson_process', 'homogeneous_poisson_process', ([], {'rate': '(100 * pq.Hz)', 't_stop': '(100 * pq.s)'}), '(rate=100 * pq.Hz, t_stop=100 * pq.s)\n', (5348, 5385), False, 'from elephant.spike_train_generation import homogeneous_poisson_process\n'), ((5402, 5433), 'elephant.statistics.mean_firing_rate', 'statistics.mean_firing_rate', (['st'], {}), '(st)\n', (5429, 5433), False, 'from elephant import statistics\n'), ((5450, 5519), 'elephant.statistics.mean_firing_rate', 'statistics.mean_firing_rate', (['st'], {'t_start': 'st.t_start', 't_stop': 'st.t_stop'}), '(st, t_start=st.t_start, t_stop=st.t_stop)\n', (5477, 5519), False, 'from elephant import statistics\n'), ((5750, 5809), 'neo.SpikeTrain', 'neo.SpikeTrain', (['self.test_array_1d'], {'units': '"""ms"""', 't_stop': '(10.0)'}), "(self.test_array_1d, units='ms', t_stop=10.0)\n", (5764, 5809), False, 'import neo\n'), ((5827, 5855), 'quantities.Quantity', 'pq.Quantity', (['(2 / 0.5)', '"""1/ms"""'], {}), "(2 / 0.5, '1/ms')\n", (5838, 5855), True, 'import quantities as pq\n'), ((5870, 5942), 'elephant.statistics.mean_firing_rate', 'statistics.mean_firing_rate', (['st'], {'t_start': '(0.4 * pq.ms)', 't_stop': '(0.9 * pq.ms)'}), '(st, t_start=0.4 * pq.ms, t_stop=0.9 * pq.ms)\n', (5897, 5942), False, 'from elephant import statistics\n'), ((5993, 6042), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['res', 'target'], {'decimal': '(9)'}), '(res, target, decimal=9)\n', (6018, 6042), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_array_less\n'), ((6113, 6156), 'quantities.Quantity', 'pq.Quantity', (['self.test_array_1d'], {'units': '"""ms"""'}), "(self.test_array_1d, units='ms')\n", (6124, 6156), True, 'import quantities as pq\n'), ((6174, 6233), 'quantities.Quantity', 'pq.Quantity', (['(self.targ_array_1d / self.max_array_1d)', '"""1/ms"""'], {}), "(self.targ_array_1d / self.max_array_1d, '1/ms')\n", (6185, 6233), True, 'import quantities as pq\n'), ((6248, 6279), 'elephant.statistics.mean_firing_rate', 'statistics.mean_firing_rate', (['st'], {}), '(st)\n', (6275, 6279), False, 'from elephant import statistics\n'), ((6288, 6337), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['res', 'target'], {'decimal': '(9)'}), '(res, target, decimal=9)\n', (6313, 6337), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_array_less\n'), ((6417, 6460), 'quantities.Quantity', 'pq.Quantity', (['self.test_array_1d'], {'units': '"""ms"""'}), "(self.test_array_1d, units='ms')\n", (6428, 6460), True, 'import quantities as pq\n'), ((6950, 6981), 'elephant.statistics.mean_firing_rate', 'statistics.mean_firing_rate', (['st'], {}), '(st)\n', (6977, 6981), False, 'from elephant import statistics\n'), ((7038, 7087), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['res', 'target'], {'decimal': '(9)'}), '(res, target, decimal=9)\n', (7063, 7087), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_array_less\n'), ((7252, 7309), 'elephant.statistics.mean_firing_rate', 'statistics.mean_firing_rate', (['st'], {'t_start': '(0.3)', 't_stop': '(1.23)'}), '(st, t_start=0.3, t_stop=1.23)\n', (7279, 7309), False, 'from elephant import statistics\n'), ((7366, 7415), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['res', 'target'], {'decimal': '(9)'}), '(res, target, decimal=9)\n', (7391, 7415), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_array_less\n'), ((7600, 7631), 'elephant.statistics.mean_firing_rate', 'statistics.mean_firing_rate', (['st'], {}), '(st)\n', (7627, 7631), False, 'from elephant import statistics\n'), ((7688, 7737), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['res', 'target'], {'decimal': '(9)'}), '(res, target, decimal=9)\n', (7713, 7737), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_array_less\n'), ((7904, 7943), 'elephant.statistics.mean_firing_rate', 'statistics.mean_firing_rate', (['st'], {'axis': '(0)'}), '(st, axis=0)\n', (7931, 7943), False, 'from elephant import statistics\n'), ((8000, 8049), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['res', 'target'], {'decimal': '(9)'}), '(res, target, decimal=9)\n', (8025, 8049), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_array_less\n'), ((8216, 8255), 'elephant.statistics.mean_firing_rate', 'statistics.mean_firing_rate', (['st'], {'axis': '(1)'}), '(st, axis=1)\n', (8243, 8255), False, 'from elephant import statistics\n'), ((8312, 8361), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['res', 'target'], {'decimal': '(9)'}), '(res, target, decimal=9)\n', (8337, 8361), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_array_less\n'), ((8526, 8580), 'elephant.statistics.mean_firing_rate', 'statistics.mean_firing_rate', (['st'], {'axis': 'None', 't_stop': '(5.0)'}), '(st, axis=None, t_stop=5.0)\n', (8553, 8580), False, 'from elephant import statistics\n'), ((8636, 8685), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['res', 'target'], {'decimal': '(9)'}), '(res, target, decimal=9)\n', (8661, 8685), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_array_less\n'), ((8844, 8895), 'elephant.statistics.mean_firing_rate', 'statistics.mean_firing_rate', (['st'], {'axis': '(0)', 't_stop': '(5.0)'}), '(st, axis=0, t_stop=5.0)\n', (8871, 8895), False, 'from elephant import statistics\n'), ((8951, 9000), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['res', 'target'], {'decimal': '(9)'}), '(res, target, decimal=9)\n', (8976, 9000), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_array_less\n'), ((9159, 9210), 'elephant.statistics.mean_firing_rate', 'statistics.mean_firing_rate', (['st'], {'axis': '(1)', 't_stop': '(5.0)'}), '(st, axis=1, t_stop=5.0)\n', (9186, 9210), False, 'from elephant import statistics\n'), ((9266, 9315), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['res', 'target'], {'decimal': '(9)'}), '(res, target, decimal=9)\n', (9291, 9315), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_array_less\n'), ((9474, 9525), 'elephant.statistics.mean_firing_rate', 'statistics.mean_firing_rate', (['st'], {'axis': '(2)', 't_stop': '(5.0)'}), '(st, axis=2, t_stop=5.0)\n', (9501, 9525), False, 'from elephant import statistics\n'), ((9581, 9630), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['res', 'target'], {'decimal': '(9)'}), '(res, target, decimal=9)\n', (9606, 9630), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_array_less\n'), ((9799, 9865), 'elephant.statistics.mean_firing_rate', 'statistics.mean_firing_rate', (['st'], {'axis': '(1)', 't_start': '(0.14)', 't_stop': '(1.23)'}), '(st, axis=1, t_start=0.14, t_stop=1.23)\n', (9826, 9865), False, 'from elephant import statistics\n'), ((9964, 10013), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['res', 'target'], {'decimal': '(9)'}), '(res, target, decimal=9)\n', (9989, 10013), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_array_less\n'), ((10189, 10231), 'elephant.statistics.mean_firing_rate', 'statistics.mean_firing_rate', (['st'], {'axis': 'None'}), '(st, axis=None)\n', (10216, 10231), False, 'from elephant import statistics\n'), ((10288, 10337), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['res', 'target'], {'decimal': '(9)'}), '(res, target, decimal=9)\n', (10313, 10337), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_array_less\n'), ((11307, 11326), 'numpy.random.seed', 'np.random.seed', (['(100)'], {}), '(100)\n', (11321, 11326), True, 'import numpy as np\n'), ((11497, 11513), 'numpy.zeros', 'np.zeros', (['num_st'], {}), '(num_st)\n', (11505, 11513), True, 'import numpy as np\n'), ((12718, 12788), 'neo.core.SpikeTrain', 'neo.core.SpikeTrain', (['([] * pq.ms)'], {'t_start': '(0 * pq.ms)', 't_stop': '(1.5 * pq.ms)'}), '([] * pq.ms, t_start=0 * pq.ms, t_stop=1.5 * pq.ms)\n', (12737, 12788), False, 'import neo\n'), ((14108, 14157), 'neo.SpikeTrain', 'neo.SpikeTrain', (['([1, 2, 3] * pq.s)'], {'t_stop': '(4 * pq.s)'}), '([1, 2, 3] * pq.s, t_stop=4 * pq.s)\n', (14122, 14157), False, 'import neo\n'), ((14172, 14223), 'neo.SpikeTrain', 'neo.SpikeTrain', (['([1, 2, 3] * pq.s)'], {'t_stop': '(4.5 * pq.s)'}), '([1, 2, 3] * pq.s, t_stop=4.5 * pq.s)\n', (14186, 14223), False, 'import neo\n'), ((14397, 14446), 'neo.SpikeTrain', 'neo.SpikeTrain', (['([1, 2, 3] * pq.s)'], {'t_stop': '(4 * pq.s)'}), '([1, 2, 3] * pq.s, t_stop=4 * pq.s)\n', (14411, 14446), False, 'import neo\n'), ((15298, 15336), 'quantities.Quantity', 'pq.Quantity', (['self.test_seq'], {'units': '"""ms"""'}), "(self.test_seq, units='ms')\n", (15309, 15336), True, 'import quantities as pq\n'), ((15470, 15493), 'numpy.array', 'np.array', (['self.test_seq'], {}), '(self.test_seq)\n', (15478, 15493), True, 'import numpy as np\n'), ((17034, 17072), 'quantities.Quantity', 'pq.Quantity', (['self.test_seq'], {'units': '"""ms"""'}), "(self.test_seq, units='ms')\n", (17045, 17072), True, 'import quantities as pq\n'), ((17208, 17231), 'numpy.array', 'np.array', (['self.test_seq'], {}), '(self.test_seq)\n', (17216, 17231), True, 'import numpy as np\n'), ((17840, 17863), 'numpy.array', 'np.array', (['self.test_seq'], {}), '(self.test_seq)\n', (17848, 17863), True, 'import numpy as np\n'), ((19105, 19143), 'quantities.Quantity', 'pq.Quantity', (['self.test_seq'], {'units': '"""ms"""'}), "(self.test_seq, units='ms')\n", (19116, 19143), True, 'import quantities as pq\n'), ((19279, 19302), 'numpy.array', 'np.array', (['self.test_seq'], {}), '(self.test_seq)\n', (19287, 19302), True, 'import numpy as np\n'), ((20083, 20101), 'numpy.random.seed', 'np.random.seed', (['(19)'], {}), '(19)\n', (20097, 20101), True, 'import numpy as np\n'), ((20188, 20240), 'numpy.random.poisson', 'np.random.poisson', (['(self.st_rate * duration_effective)'], {}), '(self.st_rate * duration_effective)\n', (20205, 20240), True, 'import numpy as np\n'), ((20451, 20549), 'neo.SpikeTrain', 'neo.SpikeTrain', (['(spike_train * pq.s)'], {'t_start': '(self.st_tr[0] * pq.s)', 't_stop': '(self.st_tr[1] * pq.s)'}), '(spike_train * pq.s, t_start=self.st_tr[0] * pq.s, t_stop=\n self.st_tr[1] * pq.s)\n', (20465, 20549), False, 'import neo\n'), ((20708, 20751), 'elephant.kernels.TriangularKernel', 'kernels.TriangularKernel', ([], {'sigma': '(0.03 * pq.s)'}), '(sigma=0.03 * pq.s)\n', (20732, 20751), True, 'import elephant.kernels as kernels\n'), ((25325, 25397), 'neo.SpikeTrain', 'neo.SpikeTrain', (['[t_spike]'], {'t_start': '(0 * pq.s)', 't_stop': '(2 * pq.s)', 'units': 'pq.s'}), '([t_spike], t_start=0 * pq.s, t_stop=2 * pq.s, units=pq.s)\n', (25339, 25397), False, 'import neo\n'), ((25443, 25475), 'elephant.kernels.AlphaKernel', 'kernels.AlphaKernel', (['(200 * pq.ms)'], {}), '(200 * pq.ms)\n', (25462, 25475), True, 'import elephant.kernels as kernels\n'), ((25516, 25609), 'elephant.statistics.instantaneous_rate', 'statistics.instantaneous_rate', (['st'], {'sampling_period': 'fs', 'kernel': 'kernel', 'center_kernel': '(False)'}), '(st, sampling_period=fs, kernel=kernel,\n center_kernel=False)\n', (25545, 25609), False, 'from elephant import statistics\n'), ((26070, 26087), 'numpy.random.seed', 'np.random.seed', (['(9)'], {}), '(9)\n', (26084, 26087), True, 'import numpy as np\n'), ((26147, 26222), 'elephant.spike_train_generation.homogeneous_poisson_process', 'homogeneous_poisson_process', (['(10 * pq.Hz)'], {'t_start': '(0 * pq.s)', 't_stop': '(10 * pq.s)'}), '(10 * pq.Hz, t_start=0 * pq.s, t_stop=10 * pq.s)\n', (26174, 26222), False, 'from elephant.spike_train_generation import homogeneous_poisson_process\n'), ((26338, 26387), 'elephant.kernels.AlphaKernel', 'kernels.AlphaKernel', ([], {'sigma': '(5 * pq.ms)', 'invert': '(True)'}), '(sigma=5 * pq.ms, invert=True)\n', (26357, 26387), True, 'import elephant.kernels as kernels\n'), ((26535, 26628), 'elephant.statistics.instantaneous_rate', 'statistics.instantaneous_rate', (['spiketrain'], {'sampling_period': 'sampling_period', 'kernel': 'kernel'}), '(spiketrain, sampling_period=sampling_period,\n kernel=kernel)\n', (26564, 26628), False, 'from elephant import statistics\n'), ((27054, 27071), 'numpy.random.seed', 'np.random.seed', (['(9)'], {}), '(9)\n', (27068, 27071), True, 'import numpy as np\n'), ((27192, 27270), 'elephant.spike_train_generation.homogeneous_poisson_process', 'homogeneous_poisson_process', (['rate_expected'], {'t_start': '(0 * pq.s)', 't_stop': '(10 * pq.s)'}), '(rate_expected, t_start=0 * pq.s, t_stop=10 * pq.s)\n', (27219, 27270), False, 'from elephant.spike_train_generation import homogeneous_poisson_process\n'), ((27693, 27739), 'itertools.product', 'itertools.product', (['kernel_types', '(False, True)'], {}), '(kernel_types, (False, True))\n', (27710, 27739), False, 'import itertools\n'), ((28548, 28614), 'neo.SpikeTrain', 'neo.SpikeTrain', (['t_spikes'], {'t_start': 't_spikes[0]', 't_stop': 't_spikes[-1]'}), '(t_spikes, t_start=t_spikes[0], t_stop=t_spikes[-1])\n', (28562, 28614), False, 'import neo\n'), ((29698, 29764), 'neo.SpikeTrain', 'neo.SpikeTrain', (['t_spikes'], {'t_start': 't_spikes[0]', 't_stop': 't_spikes[-1]'}), '(t_spikes, t_start=t_spikes[0], t_stop=t_spikes[-1])\n', (29712, 29764), False, 'import neo\n'), ((29818, 29859), 'elephant.kernels.RectangularKernel', 'kernels.RectangularKernel', ([], {'sigma': '(1 * pq.s)'}), '(sigma=1 * pq.s)\n', (29843, 29859), True, 'import elephant.kernels as kernels\n'), ((30942, 30960), 'numpy.random.seed', 'np.random.seed', (['(19)'], {}), '(19)\n', (30956, 30960), True, 'import numpy as np\n'), ((31047, 31099), 'numpy.random.poisson', 'np.random.poisson', (['(self.st_rate * duration_effective)'], {}), '(self.st_rate * duration_effective)\n', (31064, 31099), True, 'import numpy as np\n'), ((31259, 31358), 'neo.SpikeTrain', 'neo.SpikeTrain', (['(spike_train2 * pq.s)'], {'t_start': '(self.st_tr[0] * pq.s)', 't_stop': '(self.st_tr[1] * pq.s)'}), '(spike_train2 * pq.s, t_start=self.st_tr[0] * pq.s, t_stop=\n self.st_tr[1] * pq.s)\n', (31273, 31358), False, 'import neo\n'), ((31450, 31550), 'elephant.statistics.instantaneous_rate', 'statistics.instantaneous_rate', (['self.spike_train'], {'sampling_period': '(0.01 * pq.s)', 'kernel': 'self.kernel'}), '(self.spike_train, sampling_period=0.01 * pq.s,\n kernel=self.kernel)\n', (31479, 31550), False, 'from elephant import statistics\n'), ((31667, 31763), 'elephant.statistics.instantaneous_rate', 'statistics.instantaneous_rate', (['spike_train2'], {'sampling_period': '(0.01 * pq.s)', 'kernel': 'self.kernel'}), '(spike_train2, sampling_period=0.01 * pq.s,\n kernel=self.kernel)\n', (31696, 31763), False, 'from elephant import statistics\n'), ((31884, 32000), 'elephant.statistics.instantaneous_rate', 'statistics.instantaneous_rate', (['[self.spike_train, spike_train2]'], {'sampling_period': '(0.01 * pq.s)', 'kernel': 'self.kernel'}), '([self.spike_train, spike_train2],\n sampling_period=0.01 * pq.s, kernel=self.kernel)\n', (31913, 32000), False, 'from elephant import statistics\n'), ((32228, 32316), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['combined_rate.magnitude', 'rate_concat.magnitude'], {'decimal': '(5)'}), '(combined_rate.magnitude, rate_concat.magnitude,\n decimal=5)\n', (32253, 32316), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_array_less\n'), ((32651, 32710), 'neo.SpikeTrain', 'neo.SpikeTrain', (['([2.12, 2.13, 2.15] * pq.s)'], {'t_stop': '(10 * pq.s)'}), '([2.12, 2.13, 2.15] * pq.s, t_stop=10 * pq.s)\n', (32665, 32710), False, 'import neo\n'), ((33588, 33649), 'elephant.kernels.GaussianKernel', 'kernels.GaussianKernel', (['(kernel_width_sigma * spiketrain.units)'], {}), '(kernel_width_sigma * spiketrain.units)\n', (33610, 33649), True, 'import elephant.kernels as kernels\n'), ((33674, 33742), 'elephant.statistics.instantaneous_rate', 'statistics.instantaneous_rate', (['spiketrain', '(10 * pq.ms)'], {'kernel': 'kernel'}), '(spiketrain, 10 * pq.ms, kernel=kernel)\n', (33703, 33742), False, 'from elephant import statistics\n'), ((34296, 34364), 'elephant.statistics.instantaneous_rate', 'statistics.instantaneous_rate', (['spiketrain', '(10 * pq.ms)'], {'kernel': '"""auto"""'}), "(spiketrain, 10 * pq.ms, kernel='auto')\n", (34325, 34364), False, 'from elephant import statistics\n'), ((34387, 34445), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['result_target', 'result_automatic'], {}), '(result_target, result_automatic)\n', (34412, 34445), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_array_less\n'), ((34521, 34538), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (34535, 34538), True, 'import numpy as np\n'), ((34595, 34660), 'elephant.spike_train_generation.homogeneous_poisson_process', 'homogeneous_poisson_process', ([], {'rate': 'rate_expected', 't_stop': '(10 * pq.s)'}), '(rate=rate_expected, t_stop=10 * pq.s)\n', (34622, 34660), False, 'from elephant.spike_train_generation import homogeneous_poisson_process\n'), ((34727, 34768), 'elephant.kernels.GaussianKernel', 'kernels.GaussianKernel', ([], {'sigma': '(100 * pq.ms)'}), '(sigma=100 * pq.ms)\n', (34749, 34768), True, 'import elephant.kernels as kernels\n'), ((35215, 35259), 'numpy.testing.assert_array_less', 'assert_array_less', (['rate_expected', 'rates_mean'], {}), '(rate_expected, rates_mean)\n', (35232, 35259), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_array_less\n'), ((37276, 37327), 'neo.SpikeTrain', 'neo.SpikeTrain', (['[1, 2]'], {'t_stop': '(2 * pq.s)', 'units': 'pq.s'}), '([1, 2], t_stop=2 * pq.s, units=pq.s)\n', (37290, 37327), False, 'import neo\n'), ((37345, 37383), 'elephant.kernels.AlphaKernel', 'kernels.AlphaKernel', ([], {'sigma': '(100 * pq.ms)'}), '(sigma=100 * pq.ms)\n', (37364, 37383), True, 'import elephant.kernels as kernels\n'), ((37399, 37487), 'elephant.statistics.instantaneous_rate', 'statistics.instantaneous_rate', (['spiketrain'], {'sampling_period': '(10 * pq.ms)', 'kernel': 'kernel'}), '(spiketrain, sampling_period=10 * pq.ms,\n kernel=kernel)\n', (37428, 37487), False, 'from elephant import statistics\n'), ((37969, 38047), 'neo.SpikeTrain', 'neo.SpikeTrain', (['([0.5, 0.7, 1.2, 3.1, 4.3, 5.5, 6.7] * pq.s)'], {'t_stop': '(10.0 * pq.s)'}), '([0.5, 0.7, 1.2, 3.1, 4.3, 5.5, 6.7] * pq.s, t_stop=10.0 * pq.s)\n', (37983, 38047), False, 'import neo\n'), ((38089, 38167), 'neo.SpikeTrain', 'neo.SpikeTrain', (['([0.1, 0.7, 1.2, 2.2, 4.3, 5.5, 8.0] * pq.s)'], {'t_stop': '(10.0 * pq.s)'}), '([0.1, 0.7, 1.2, 2.2, 4.3, 5.5, 8.0] * pq.s, t_stop=10.0 * pq.s)\n', (38103, 38167), False, 'import neo\n'), ((38449, 38489), 'numpy.array', 'np.array', (['[4, 2, 1, 1, 2, 2, 1, 0, 1, 0]'], {}), '([4, 2, 1, 1, 2, 2, 1, 0, 1, 0])\n', (38457, 38489), True, 'import numpy as np\n'), ((38510, 38568), 'elephant.statistics.time_histogram', 'statistics.time_histogram', (['self.spiketrains'], {'bin_size': 'pq.s'}), '(self.spiketrains, bin_size=pq.s)\n', (38535, 38568), False, 'from elephant import statistics\n'), ((38577, 38628), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['targ', 'histogram.magnitude[:, 0]'], {}), '(targ, histogram.magnitude[:, 0])\n', (38595, 38628), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_array_less\n'), ((38687, 38727), 'numpy.array', 'np.array', (['[2, 2, 1, 1, 2, 2, 1, 0, 1, 0]'], {}), '([2, 2, 1, 1, 2, 2, 1, 0, 1, 0])\n', (38695, 38727), True, 'import numpy as np\n'), ((38748, 38819), 'elephant.statistics.time_histogram', 'statistics.time_histogram', (['self.spiketrains'], {'bin_size': 'pq.s', 'binary': '(True)'}), '(self.spiketrains, bin_size=pq.s, binary=True)\n', (38773, 38819), False, 'from elephant import statistics\n'), ((38874, 38925), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['targ', 'histogram.magnitude[:, 0]'], {}), '(targ, histogram.magnitude[:, 0])\n', (38892, 38925), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_array_less\n'), ((39024, 39040), 'numpy.array', 'np.array', (['[2, 1]'], {}), '([2, 1])\n', (39032, 39040), True, 'import numpy as np\n'), ((39061, 39158), 'elephant.statistics.time_histogram', 'statistics.time_histogram', (['self.spiketrains'], {'bin_size': 'pq.s', 't_start': '(5 * pq.s)', 't_stop': '(7 * pq.s)'}), '(self.spiketrains, bin_size=pq.s, t_start=5 * pq.s,\n t_stop=7 * pq.s)\n', (39086, 39158), False, 'from elephant import statistics\n'), ((39255, 39306), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['targ', 'histogram.magnitude[:, 0]'], {}), '(targ, histogram.magnitude[:, 0])\n', (39273, 39306), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_array_less\n'), ((39353, 39393), 'numpy.array', 'np.array', (['[4, 2, 1, 1, 2, 2, 1, 0, 1, 0]'], {}), '([4, 2, 1, 1, 2, 2, 1, 0, 1, 0])\n', (39361, 39393), True, 'import numpy as np\n'), ((39414, 39499), 'elephant.statistics.time_histogram', 'statistics.time_histogram', (['self.spiketrains'], {'bin_size': '(1 * pq.s)', 't_start': '(0 * pq.s)'}), '(self.spiketrains, bin_size=1 * pq.s, t_start=0 * pq.s\n )\n', (39439, 39499), False, 'from elephant import statistics\n'), ((39595, 39646), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['targ', 'histogram.magnitude[:, 0]'], {}), '(targ, histogram.magnitude[:, 0])\n', (39613, 39646), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_array_less\n'), ((39699, 39784), 'elephant.statistics.time_histogram', 'statistics.time_histogram', (['self.spiketrains'], {'bin_size': '(1 * pq.s)', 't_stop': '(10 * pq.s)'}), '(self.spiketrains, bin_size=1 * pq.s, t_stop=10 * pq.s\n )\n', (39724, 39784), False, 'from elephant import statistics\n'), ((39880, 39931), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['targ', 'histogram.magnitude[:, 0]'], {}), '(targ, histogram.magnitude[:, 0])\n', (39898, 39931), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_array_less\n'), ((40024, 40097), 'elephant.statistics.time_histogram', 'statistics.time_histogram', (['self.spiketrains'], {'bin_size': 'pq.s', 'output': '"""mean"""'}), "(self.spiketrains, bin_size=pq.s, output='mean')\n", (40049, 40097), False, 'from elephant import statistics\n'), ((40343, 40416), 'elephant.statistics.time_histogram', 'statistics.time_histogram', (['self.spiketrains'], {'bin_size': 'pq.s', 'output': '"""rate"""'}), "(self.spiketrains, bin_size=pq.s, output='rate')\n", (40368, 40416), False, 'from elephant import statistics\n'), ((40839, 40856), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (40853, 40856), True, 'import numpy as np\n'), ((41466, 41544), 'neo.SpikeTrain', 'neo.SpikeTrain', (['([0.5, 0.7, 1.2, 2.3, 4.3, 5.5, 6.7] * pq.s)'], {'t_stop': '(10.0 * pq.s)'}), '([0.5, 0.7, 1.2, 2.3, 4.3, 5.5, 6.7] * pq.s, t_stop=10.0 * pq.s)\n', (41480, 41544), False, 'import neo\n'), ((41586, 41664), 'neo.SpikeTrain', 'neo.SpikeTrain', (['([0.5, 0.7, 1.2, 2.3, 4.3, 5.5, 8.0] * pq.s)'], {'t_stop': '(10.0 * pq.s)'}), '([0.5, 0.7, 1.2, 2.3, 4.3, 5.5, 8.0] * pq.s, t_stop=10.0 * pq.s)\n', (41600, 41664), False, 'import neo\n'), ((41706, 41784), 'neo.SpikeTrain', 'neo.SpikeTrain', (['([0.5, 0.7, 1.2, 2.3, 4.3, 5.5, 8.0] * pq.s)'], {'t_stop': '(10.0 * pq.s)'}), '([0.5, 0.7, 1.2, 2.3, 4.3, 5.5, 8.0] * pq.s, t_stop=10.0 * pq.s)\n', (41720, 41784), False, 'import neo\n'), ((42098, 42132), 'numpy.array', 'np.array', (['[0.92, 0.01, 0.01, 0.06]'], {}), '([0.92, 0.01, 0.01, 0.06])\n', (42106, 42132), True, 'import numpy as np\n'), ((42154, 42218), 'elephant.statistics.complexity_pdf', 'statistics.complexity_pdf', (['self.spiketrains'], {'bin_size': '(0.1 * pq.s)'}), '(self.spiketrains, bin_size=0.1 * pq.s)\n', (42179, 42218), False, 'from elephant import statistics\n'), ((42274, 42326), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['targ', 'complexity.magnitude[:, 0]'], {}), '(targ, complexity.magnitude[:, 0])\n', (42292, 42326), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_array_less\n'), ((3171, 3192), 'elephant.statistics.isi', 'statistics.isi', (['array'], {}), '(array)\n', (3185, 3192), False, 'from elephant import statistics\n'), ((3488, 3506), 'elephant.statistics.isi', 'statistics.isi', (['st'], {}), '(st)\n', (3502, 3506), False, 'from elephant import statistics\n'), ((3678, 3696), 'elephant.statistics.isi', 'statistics.isi', (['st'], {}), '(st)\n', (3692, 3696), False, 'from elephant import statistics\n'), ((8474, 8506), 'numpy.sum', 'np.sum', (['self.test_array_3d', 'None'], {}), '(self.test_array_3d, None)\n', (8480, 8506), True, 'import numpy as np\n'), ((8795, 8824), 'numpy.sum', 'np.sum', (['self.test_array_3d', '(0)'], {}), '(self.test_array_3d, 0)\n', (8801, 8824), True, 'import numpy as np\n'), ((9110, 9139), 'numpy.sum', 'np.sum', (['self.test_array_3d', '(1)'], {}), '(self.test_array_3d, 1)\n', (9116, 9139), True, 'import numpy as np\n'), ((9425, 9454), 'numpy.sum', 'np.sum', (['self.test_array_3d', '(2)'], {}), '(self.test_array_3d, 2)\n', (9431, 9454), True, 'import numpy as np\n'), ((9749, 9768), 'numpy.array', 'np.array', (['[4, 1, 3]'], {}), '([4, 1, 3])\n', (9757, 9768), True, 'import numpy as np\n'), ((11621, 11693), 'neo.core.SpikeTrain', 'neo.core.SpikeTrain', (['(r * pq.ms)'], {'t_start': '(0.0 * pq.ms)', 't_stop': '(20.0 * pq.ms)'}), '(r * pq.ms, t_start=0.0 * pq.ms, t_stop=20.0 * pq.ms)\n', (11640, 11693), False, 'import neo\n'), ((12202, 12246), 'elephant.statistics.fanofactor', 'statistics.fanofactor', (['self.test_spiketrains'], {}), '(self.test_spiketrains)\n', (12223, 12246), False, 'from elephant import statistics\n'), ((12345, 12372), 'elephant.statistics.fanofactor', 'statistics.fanofactor', (['[st]'], {}), '([st])\n', (12366, 12372), False, 'from elephant import statistics\n'), ((13047, 13073), 'elephant.statistics.fanofactor', 'statistics.fanofactor', (['sts'], {}), '(sts)\n', (13068, 13073), False, 'from elephant import statistics\n'), ((13143, 13181), 'elephant.statistics.fanofactor', 'statistics.fanofactor', (['self.test_array'], {}), '(self.test_array)\n', (13164, 13181), False, 'from elephant import statistics\n'), ((13365, 13391), 'elephant.statistics.fanofactor', 'statistics.fanofactor', (['lst'], {}), '(lst)\n', (13386, 13391), False, 'from elephant import statistics\n'), ((13464, 13505), 'elephant.statistics.fanofactor', 'statistics.fanofactor', (['self.test_quantity'], {}), '(self.test_quantity)\n', (13485, 13505), False, 'from elephant import statistics\n'), ((13695, 13721), 'elephant.statistics.fanofactor', 'statistics.fanofactor', (['lst'], {}), '(lst)\n', (13716, 13721), False, 'from elephant import statistics\n'), ((13790, 13827), 'elephant.statistics.fanofactor', 'statistics.fanofactor', (['self.test_list'], {}), '(self.test_list)\n', (13811, 13827), False, 'from elephant import statistics\n'), ((14009, 14035), 'elephant.statistics.fanofactor', 'statistics.fanofactor', (['lst'], {}), '(lst)\n', (14030, 14035), False, 'from elephant import statistics\n'), ((15371, 15389), 'elephant.statistics.lv', 'statistics.lv', (['seq'], {}), '(seq)\n', (15384, 15389), False, 'from elephant import statistics\n'), ((15528, 15546), 'elephant.statistics.lv', 'statistics.lv', (['seq'], {}), '(seq)\n', (15541, 15546), False, 'from elephant import statistics\n'), ((15668, 15686), 'elephant.statistics.lv', 'statistics.lv', (['seq'], {}), '(seq)\n', (15681, 15686), False, 'from elephant import statistics\n'), ((15942, 15962), 'numpy.array', 'np.array', (['[seq, seq]'], {}), '([seq, seq])\n', (15950, 15962), True, 'import numpy as np\n'), ((17107, 17126), 'elephant.statistics.lvr', 'statistics.lvr', (['seq'], {}), '(seq)\n', (17121, 17126), False, 'from elephant import statistics\n'), ((17266, 17285), 'elephant.statistics.lvr', 'statistics.lvr', (['seq'], {}), '(seq)\n', (17280, 17285), False, 'from elephant import statistics\n'), ((17408, 17427), 'elephant.statistics.lvr', 'statistics.lvr', (['seq'], {}), '(seq)\n', (17422, 17427), False, 'from elephant import statistics\n'), ((17687, 17707), 'numpy.array', 'np.array', (['[seq, seq]'], {}), '([seq, seq])\n', (17695, 17707), True, 'import numpy as np\n'), ((19178, 19197), 'elephant.statistics.cv2', 'statistics.cv2', (['seq'], {}), '(seq)\n', (19192, 19197), False, 'from elephant import statistics\n'), ((19337, 19356), 'elephant.statistics.cv2', 'statistics.cv2', (['seq'], {}), '(seq)\n', (19351, 19356), False, 'from elephant import statistics\n'), ((19479, 19498), 'elephant.statistics.cv2', 'statistics.cv2', (['seq'], {}), '(seq)\n', (19493, 19498), False, 'from elephant import statistics\n'), ((19758, 19778), 'numpy.array', 'np.array', (['[seq, seq]'], {}), '([seq, seq])\n', (19766, 19778), True, 'import numpy as np\n'), ((21053, 21126), 'elephant.statistics.instantaneous_rate', 'statistics.instantaneous_rate', (['st', 'sampling_period', 'self.kernel'], {'cutoff': '(0)'}), '(st, sampling_period, self.kernel, cutoff=0)\n', (21082, 21126), False, 'from elephant import statistics\n'), ((25770, 25794), 'numpy.nonzero', 'np.nonzero', (['(rate > 1e-06)'], {}), '(rate > 1e-06)\n', (25780, 25794), True, 'import numpy as np\n'), ((28492, 28519), 'numpy.array', 'np.array', (['[-cutoff, cutoff]'], {}), '([-cutoff, cutoff])\n', (28500, 28519), True, 'import numpy as np\n'), ((29619, 29667), 'numpy.linspace', 'np.linspace', (['(-cutoff)', 'cutoff'], {'num': '(2 * cutoff + 1)'}), '(-cutoff, cutoff, num=2 * cutoff + 1)\n', (29630, 29667), True, 'import numpy as np\n'), ((33463, 33553), 'elephant.statistics.optimal_kernel_bandwidth', 'statistics.optimal_kernel_bandwidth', (['spiketrain.magnitude'], {'times': 'None', 'bootstrap': '(False)'}), '(spiketrain.magnitude, times=None,\n bootstrap=False)\n', (33498, 33553), False, 'from elephant import statistics\n'), ((34824, 34852), 'numpy.linspace', 'np.linspace', (['(1)', '(1000)'], {'num': '(10)'}), '(1, 1000, num=10)\n', (34835, 34852), True, 'import numpy as np\n'), ((35307, 35347), 'numpy.all', 'np.all', (['(rates_mean[:-1] < rates_mean[1:])'], {}), '(rates_mean[:-1] < rates_mean[1:])\n', (35313, 35347), True, 'import numpy as np\n'), ((36232, 36320), 'elephant.statistics.instantaneous_rate', 'statistics.instantaneous_rate', (['spiketrain'], {'sampling_period': '(20 * pq.ms)', 'kernel': 'kernel'}), '(spiketrain, sampling_period=20 * pq.ms,\n kernel=kernel)\n', (36261, 36320), False, 'from elephant import statistics\n'), ((40159, 40212), 'numpy.array', 'np.array', (['[4, 2, 1, 1, 2, 2, 1, 0, 1, 0]'], {'dtype': 'float'}), '([4, 2, 1, 1, 2, 2, 1, 0, 1, 0], dtype=float)\n', (40167, 40212), True, 'import numpy as np\n'), ((40880, 40942), 'elephant.spike_train_generation.homogeneous_poisson_process', 'homogeneous_poisson_process', ([], {'rate': '(10 * pq.Hz)', 't_stop': '(10 * pq.s)'}), '(rate=10 * pq.Hz, t_stop=10 * pq.s)\n', (40907, 40942), False, 'from elephant.spike_train_generation import homogeneous_poisson_process\n'), ((41050, 41123), 'elephant.statistics.time_histogram', 'statistics.time_histogram', (['spiketrains'], {'bin_size': '(3 * pq.ms)', 'output': 'output'}), '(spiketrains, bin_size=3 * pq.ms, output=output)\n', (41075, 41123), False, 'from elephant import statistics\n'), ((10573, 10593), 'quantities.Quantity', 'pq.Quantity', (['(0)', '"""ms"""'], {}), "(0, 'ms')\n", (10584, 10593), True, 'import quantities as pq\n'), ((10698, 10719), 'quantities.Quantity', 'pq.Quantity', (['(10)', '"""ms"""'], {}), "(10, 'ms')\n", (10709, 10719), True, 'import quantities as pq\n'), ((10825, 10845), 'quantities.Quantity', 'pq.Quantity', (['(0)', '"""ms"""'], {}), "(0, 'ms')\n", (10836, 10845), True, 'import quantities as pq\n'), ((10880, 10901), 'quantities.Quantity', 'pq.Quantity', (['(10)', '"""ms"""'], {}), "(10, 'ms')\n", (10891, 10901), True, 'import quantities as pq\n'), ((11007, 11027), 'quantities.Quantity', 'pq.Quantity', (['(0)', '"""ms"""'], {}), "(0, 'ms')\n", (11018, 11027), True, 'import quantities as pq\n'), ((11208, 11229), 'quantities.Quantity', 'pq.Quantity', (['(10)', '"""ms"""'], {}), "(10, 'ms')\n", (11219, 11229), True, 'import quantities as pq\n'), ((12140, 12162), 'numpy.var', 'np.var', (['self.sp_counts'], {}), '(self.sp_counts)\n', (12146, 12162), True, 'import numpy as np\n'), ((12165, 12188), 'numpy.mean', 'np.mean', (['self.sp_counts'], {}), '(self.sp_counts)\n', (12172, 12188), True, 'import numpy as np\n'), ((12481, 12506), 'elephant.statistics.fanofactor', 'statistics.fanofactor', (['[]'], {}), '([])\n', (12502, 12506), False, 'from elephant import statistics\n'), ((12542, 12569), 'elephant.statistics.fanofactor', 'statistics.fanofactor', (['[[]]'], {}), '([[]])\n', (12563, 12569), False, 'from elephant import statistics\n'), ((12641, 12674), 'elephant.statistics.fanofactor', 'statistics.fanofactor', (['([] * pq.ms)'], {}), '([] * pq.ms)\n', (12662, 12674), False, 'from elephant import statistics\n'), ((12855, 12880), 'elephant.statistics.fanofactor', 'statistics.fanofactor', (['st'], {}), '(st)\n', (12876, 12880), False, 'from elephant import statistics\n'), ((13208, 13230), 'numpy.var', 'np.var', (['self.sp_counts'], {}), '(self.sp_counts)\n', (13214, 13230), True, 'import numpy as np\n'), ((13233, 13256), 'numpy.mean', 'np.mean', (['self.sp_counts'], {}), '(self.sp_counts)\n', (13240, 13256), True, 'import numpy as np\n'), ((13532, 13554), 'numpy.var', 'np.var', (['self.sp_counts'], {}), '(self.sp_counts)\n', (13538, 13554), True, 'import numpy as np\n'), ((13557, 13580), 'numpy.mean', 'np.mean', (['self.sp_counts'], {}), '(self.sp_counts)\n', (13564, 13580), True, 'import numpy as np\n'), ((13854, 13876), 'numpy.var', 'np.var', (['self.sp_counts'], {}), '(self.sp_counts)\n', (13860, 13876), True, 'import numpy as np\n'), ((13879, 13902), 'numpy.mean', 'np.mean', (['self.sp_counts'], {}), '(self.sp_counts)\n', (13886, 13902), True, 'import numpy as np\n'), ((17946, 17970), 'elephant.statistics.lvr', 'statistics.lvr', (['seq'], {'R': '(5)'}), '(seq, R=5)\n', (17960, 17970), False, 'from elephant import statistics\n'), ((24553, 24760), 'elephant.statistics.instantaneous_rate', 'statistics.instantaneous_rate', (['self.spike_train'], {'sampling_period': 'kernel_resolution', 'kernel': 'kernel', 't_start': '(self.st_tr[0] * pq.s)', 't_stop': '(self.st_tr[1] * pq.s)', 'trim': '(False)', 'center_kernel': 'center_kernel'}), '(self.spike_train, sampling_period=\n kernel_resolution, kernel=kernel, t_start=self.st_tr[0] * pq.s, t_stop=\n self.st_tr[1] * pq.s, trim=False, center_kernel=center_kernel)\n', (24582, 24760), False, 'from elephant import statistics\n'), ((27868, 27981), 'elephant.statistics.instantaneous_rate', 'statistics.instantaneous_rate', (['spiketrain'], {'sampling_period': 'sampling_period', 'kernel': 'kernel', 'center_kernel': '(True)'}), '(spiketrain, sampling_period=sampling_period,\n kernel=kernel, center_kernel=True)\n', (27897, 27981), False, 'from elephant import statistics\n'), ((29182, 29330), 'elephant.statistics.instantaneous_rate', 'statistics.instantaneous_rate', (['spiketrain'], {'sampling_period': 'sampling_period', 'kernel': 'kernel', 'cutoff': 'cutoff', 'trim': '(True)', 'center_kernel': 'center_kernel'}), '(spiketrain, sampling_period=sampling_period,\n kernel=kernel, cutoff=cutoff, trim=True, center_kernel=center_kernel)\n', (29211, 29330), False, 'from elephant import statistics\n'), ((29444, 29499), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['rate.magnitude', '(0)'], {'decimal': '(3)'}), '(rate.magnitude, 0, decimal=3)\n', (29469, 29499), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_array_less\n'), ((30419, 30538), 'elephant.statistics.instantaneous_rate', 'statistics.instantaneous_rate', (['spiketrain'], {'sampling_period': 'sampling_period', 'kernel': 'kernel', 'cutoff': 'cutoff', 'trim': 'trim'}), '(spiketrain, sampling_period=sampling_period,\n kernel=kernel, cutoff=cutoff, trim=trim)\n', (30448, 30538), False, 'from elephant import statistics\n'), ((30609, 30749), 'elephant.statistics.instantaneous_rate', 'statistics.instantaneous_rate', (['spiketrain'], {'sampling_period': 'sampling_period', 'kernel': 'kernel', 'cutoff': 'cutoff', 'trim': 'trim', 'center_kernel': '(False)'}), '(spiketrain, sampling_period=sampling_period,\n kernel=kernel, cutoff=cutoff, trim=trim, center_kernel=False)\n', (30638, 30749), False, 'from elephant import statistics\n'), ((30823, 30878), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['rate_centered', 'rate_convolve'], {}), '(rate_centered, rate_convolve)\n', (30848, 30878), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_array_less\n'), ((34949, 35042), 'elephant.statistics.instantaneous_rate', 'statistics.instantaneous_rate', (['spiketrain'], {'sampling_period': 'sampling_period', 'kernel': 'kernel'}), '(spiketrain, sampling_period=sampling_period,\n kernel=kernel)\n', (34978, 35042), False, 'from elephant import statistics\n'), ((36039, 36069), 'numpy.array', 'np.array', (['[-0.0001, 0, 0.0001]'], {}), '([-0.0001, 0, 0.0001])\n', (36047, 36069), True, 'import numpy as np\n'), ((11577, 11598), 'numpy.random.randint', 'np.random.randint', (['(20)'], {}), '(20)\n', (11594, 11598), True, 'import numpy as np\n'), ((16255, 16288), 'elephant.statistics.lv', 'statistics.lv', (['seq'], {'with_nan': '(True)'}), '(seq, with_nan=True)\n', (16268, 16288), False, 'from elephant import statistics\n'), ((18325, 18359), 'elephant.statistics.lvr', 'statistics.lvr', (['seq'], {'with_nan': '(True)'}), '(seq, with_nan=True)\n', (18339, 18359), False, 'from elephant import statistics\n'), ((20283, 20312), 'numpy.random.rand', 'np.random.rand', (['st_num_spikes'], {}), '(st_num_spikes)\n', (20297, 20312), True, 'import numpy as np\n'), ((23997, 24022), 'elephant.kernels.__dict__.values', 'kernels.__dict__.values', ([], {}), '()\n', (24020, 24022), True, 'import elephant.kernels as kernels\n'), ((27436, 27461), 'elephant.kernels.__dict__.values', 'kernels.__dict__.values', ([], {}), '()\n', (27459, 27461), True, 'import elephant.kernels as kernels\n'), ((28718, 28743), 'elephant.kernels.__dict__.values', 'kernels.__dict__.values', ([], {}), '()\n', (28741, 28743), True, 'import elephant.kernels as kernels\n'), ((29993, 30018), 'elephant.kernels.__dict__.values', 'kernels.__dict__.values', ([], {}), '()\n', (30016, 30018), True, 'import elephant.kernels as kernels\n'), ((31143, 31172), 'numpy.random.rand', 'np.random.rand', (['st_num_spikes'], {}), '(st_num_spikes)\n', (31157, 31172), True, 'import numpy as np\n'), ((35559, 35584), 'elephant.kernels.__dict__.values', 'kernels.__dict__.values', ([], {}), '()\n', (35582, 35584), True, 'import elephant.kernels as kernels\n'), ((36510, 36525), 'numpy.argmax', 'np.argmax', (['rate'], {}), '(rate)\n', (36519, 36525), True, 'import numpy as np\n'), ((36991, 37083), 'elephant.statistics.instantaneous_rate', 'statistics.instantaneous_rate', (['spiketrain'], {'sampling_period': '(period * pq.ms)', 'kernel': 'kernel'}), '(spiketrain, sampling_period=period * pq.ms,\n kernel=kernel)\n', (37020, 37083), False, 'from elephant import statistics\n'), ((36747, 36760), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (36755, 36760), True, 'import numpy as np\n'), ((37201, 37216), 'numpy.argmax', 'np.argmax', (['rate'], {}), '(rate)\n', (37210, 37216), True, 'import numpy as np\n')] |
import time
import numpy as np
from utils.misc_utils import create_testfiles
with open('parameters.txt', 'r') as inf:
parameters = eval(inf.read())
# Parameter initialization
features_per_node = 9
tree_depth = 3
nodes = 0
for i in range(tree_depth + 1):
nodes += np.power(4, i)
state_size = features_per_node * nodes * 2
action_size = 5
action_dict = dict()
nr_trials_per_test = 100
test_idx = 0
for test_nr in parameters:
current_parameters = parameters[test_nr]
create_testfiles(current_parameters, test_nr, nr_trials_per_test=100)
| [
"utils.misc_utils.create_testfiles",
"numpy.power"
] | [((275, 289), 'numpy.power', 'np.power', (['(4)', 'i'], {}), '(4, i)\n', (283, 289), True, 'import numpy as np\n'), ((485, 554), 'utils.misc_utils.create_testfiles', 'create_testfiles', (['current_parameters', 'test_nr'], {'nr_trials_per_test': '(100)'}), '(current_parameters, test_nr, nr_trials_per_test=100)\n', (501, 554), False, 'from utils.misc_utils import create_testfiles\n')] |
#%%
import os
import pandas as pd
import numpy as np
import copy
from tqdm import tqdm
from plot import plot
from utils.evaluator import evaluate, set_thresholds
from utils.evaluator_seg import compute_anomaly_scores, compute_metrics
# Univariate
from utils.data_loader import load_kpi, load_IoT_fridge
# Multivariate
from utils.data_loader import load_samsung, load_energy, load_unsw, load_IoT_modbus
def _elements(array):
return array.ndim and array.size
def train(AE_model, Temporal_AE_model, model_name, window_size, stride, lamda_t, wavelet_num, seed, dataset, temporal=False, decomposition=False, segmentation=False):
ts_scores = {'dataset': [], 'f1': [], 'precision': [], 'recall': [], 'pr_auc': [], 'roc_auc': [], 'th_index': [], 'predicted_index': []}
seg_scores = {'dataset': [], 'avg_f1': [], 'avg_p': [], 'avg_r': [], 'max_p': [], 'max_r': [], 'max_f1': [], 'correct_count': [], 'correct_ratio': []}
if temporal == True:
datasets_auxiliary = globals()[f'load_{dataset}'](window_size, stride, lamda_t, wavelet_num, temporal=temporal)
ax_trains, ax_tests = datasets_auxiliary['x_train'], datasets_auxiliary['x_test']
# There are eight cases #1-1~#1-4 & #2-1~#2-4
# 1) decomposition==True: Decompose time series and evaluate through traditional metrics (Temporal)
# 4) decomposition==False: Evaluate through traditional metrics with common methods
if segmentation == False:
datasets = globals()[f'load_{dataset}'](window_size, stride, lamda_t, wavelet_num, decomposition=decomposition, segmentation=segmentation)
x_trains, x_tests, y_tests = datasets['x_train'], datasets['x_test'], datasets['y_test']
test_seq, label_seq = datasets['test_seq'], datasets['label_seq']
if decomposition == True:
train_residual, test_residual = datasets['x_train_resid'], datasets['x_test_resid']
per_window_idx = []
for data_num in tqdm(range(len(x_trains))):
# 1) if decomposition == True
if decomposition == True:
X_test = x_tests[data_num]
residual_X_train = train_residual[data_num]
residual_X_test = test_residual[data_num]
# 1-1) temporal=True, decomposition=True, Segmentation=False
if temporal == True:
X_train_ax = ax_trains[data_num]
X_test_ax = ax_tests[data_num]
model = Temporal_AE_model(X_train_ax, residual_X_train)
rec_x = model.predict([X_test_ax, residual_X_test])
thresholds = set_thresholds(residual_X_test, rec_x, is_reconstructed=True)
test_scores = evaluate(thresholds, residual_X_test, rec_x, y_tests[data_num], is_reconstructed=True)
# 2-1) temporal=False, decomposition=True, Segmentation=False
else:
if model_name == "MS-RNN":
model = AE_model(residual_X_train)
rec_x = [np.flip(rec, axis=1) for rec in model.predict(residual_X_test)]
thresholds = set_thresholds(residual_X_test, rec_x, is_reconstructed=True, scoring='square_median')
test_scores = evaluate(thresholds, residual_X_test, rec_x, y_tests[data_num], is_reconstructed=True, scoring='square_median')
else:
model = AE_model(residual_X_train)
rec_x = model.predict(residual_X_test)
thresholds = set_thresholds(residual_X_test, rec_x, is_reconstructed=True)
test_scores = evaluate(thresholds, residual_X_test, rec_x, y_tests[data_num], is_reconstructed=True)
# 4) if decomposition == False
else:
X_train = x_trains[data_num]
X_test = x_tests[data_num]
# 1-4) temporal=True, decomposition=False, segmentation=False
if temporal == True:
X_train_ax = ax_trains[data_num]
X_test_ax = ax_tests[data_num]
model = Temporal_AE_model(X_train_ax, X_train)
rec_x = model.predict([X_test_ax, X_test])
thresholds = set_thresholds(X_test, rec_x, is_reconstructed=True)
test_scores = evaluate(thresholds, X_test, rec_x, y_tests[data_num], is_reconstructed=True)
# 2-4) temporal=False, decomposition=False, segmentation:False
else:
if model_name == "MS-RNN":
model = AE_model(X_train)
rec_x = [np.flip(rec, axis=1) for rec in model.predict(X_test)]
thresholds = set_thresholds(X_test, rec_x, is_reconstructed=True, scoring='square_median')
test_scores = evaluate(thresholds, X_test, rec_x, y_tests[data_num], is_reconstructed=True, scoring='square_median')
else:
model = AE_model(X_train)
rec_x = model.predict(X_test)
thresholds = set_thresholds(X_test, rec_x, is_reconstructed=True)
test_scores = evaluate(thresholds, X_test, rec_x, y_tests[data_num], is_reconstructed=True)
ts_scores['dataset'].append(f'Data{data_num+1}')
ts_scores['f1'].append(np.max(test_scores['f1']))
ts_scores['precision'].append(np.mean(test_scores['precision']))
ts_scores['recall'].append(np.mean(test_scores['recall']))
ts_scores['pr_auc'].append(test_scores['pr_auc'])
ts_scores['roc_auc'].append(test_scores['roc_auc'])
th_index = int(np.median(np.where(test_scores['f1']==np.max(test_scores['f1']))[0]))
ts_scores['th_index'].append(th_index)
print(f'{seed}th {model_name} Data{data_num+1}', np.max(test_scores['f1']), np.mean(test_scores['precision']), np.mean(test_scores['recall']), test_scores['pr_auc'], test_scores['roc_auc'])
pred_anomal_idx = []
for t in range(len(X_test)):
pred_anomalies = np.where(test_scores['rec_errors'][t] > thresholds[th_index])[0]
isEmpty = (_elements(pred_anomalies) == 0)
if isEmpty:
pass
else:
if pred_anomalies[0] == 0:
pred_anomal_idx.append(t)
per_window_idx.append(pred_anomal_idx)
ts_scores['predicted_index'].extend(per_window_idx)
scores_all = copy.deepcopy(ts_scores)
del ts_scores['th_index']
results_df = pd.DataFrame(ts_scores)
print("@"*5, f'{seed}th Seed {model_name} R{decomposition}_T{temporal}_Ts', "@"*5)
print(results_df.groupby('dataset').mean())
save_results_path = f'./results/{dataset}/Ts'
try:
if not(os.path.isdir(save_results_path)):
os.makedirs(os.path.join(save_results_path), exist_ok=True)
except OSError as e:
print("Failed to create directory!!!!!")
results_df.to_csv(f'{save_results_path}/{model_name}_R{decomposition}_T{temporal}_ts_seed{seed}.csv', index=False)
plot(model_name, ts_scores, test_seq, label_seq, seed, save_results_path, decomposition, temporal)
# 2) decomposition==True: Decompose time series and evalutate new metrics (Temporal+Seg_evaluation)
# 3) decomposition==False: Evaluate through new metrics with common methods (Seg_evaluation)
elif segmentation == True:
datasets = globals()[f'load_{dataset}'](window_size, stride, lamda_t, wavelet_num, decomposition=decomposition, segmentation=segmentation)
x_trains, x_tests = datasets['x_train'], datasets['x_test']
y_tests, y_segment_tests = datasets['y_test'], datasets['y_segment_test']
if decomposition == True:
train_residual, test_residual = datasets['x_train_resid'], datasets['x_test_resid']
per_window_idx = []
for data_num in tqdm(range(len(x_trains))):
# 2) if decomposition == True
if decomposition == True:
residual_X_train = train_residual[data_num]
residual_X_test = test_residual[data_num]
# 1-2) temporal=True, decomposition=True, segmentation=True
if temporal == True:
X_train_ax = ax_trains[data_num]
X_test_ax = ax_tests[data_num]
model = Temporal_AE_model(X_train_ax, residual_X_train)
scores = compute_anomaly_scores(residual_X_test, model.predict([X_test_ax, residual_X_test]))
test_scores = compute_metrics(scores, y_tests[data_num], y_segment_tests[data_num])
else:
# 2-2) temporal=False, decomposition=True, segmentation=True
if model_name == "MS-RNN":
model = AE_model(residual_X_train)
rec_x = np.mean([np.flip(rec, axis=1) for rec in model.predict(residual_X_test)], axis=0)
scores = compute_anomaly_scores(residual_X_test, rec_x, scoring='square_median')
test_scores = compute_metrics(scores, y_tests[data_num], y_segment_tests[data_num])
else:
model = AE_model(residual_X_train)
scores = compute_anomaly_scores(residual_X_test, model.predict(residual_X_test))
test_scores = compute_metrics(scores, y_tests[data_num], y_segment_tests[data_num])
# 3) if decomposition == False
else:
X_train = x_trains[data_num]
X_test = x_tests[data_num]
# 1-3) temporal=True, decomposition=False, segmentation=True
if temporal == True:
X_train_ax = ax_trains[data_num]
X_test_ax = ax_tests[data_num]
model = Temporal_AE_model(X_train_ax, X_train)
scores = compute_anomaly_scores(X_test, model.predict([X_test_ax, X_test]))
test_scores = compute_metrics(scores, y_tests[data_num], y_segment_tests[data_num])
# 2-3) temporal=False, decomposition=False, segmentation=True
else:
if model_name == "MS-RNN":
model = AE_model(X_train)
rec_x = np.mean([np.flip(rec, axis=1) for rec in model.predict(X_test)], axis=0)
scores = compute_anomaly_scores(X_test, rec_x, scoring='square_median')
test_scores = compute_metrics(scores, y_tests[data_num], y_segment_tests[data_num])
else:
model = AE_model(X_train)
scores = compute_anomaly_scores(X_test, model.predict(X_test))
test_scores = compute_metrics(scores, y_tests[data_num], y_segment_tests[data_num])
seg_scores['dataset'].append(f'Data{data_num+1}')
seg_scores['max_f1'].append(np.max(test_scores['f1']))
seg_scores['max_p'].append(np.max(test_scores['precision']))
seg_scores['max_r'].append(np.max(test_scores['recall']))
seg_scores['avg_f1'].append(np.average(test_scores['f1']))
seg_scores['avg_p'].append(np.average(test_scores['precision']))
seg_scores['avg_r'].append(np.average(test_scores['recall']))
seg_scores['correct_count'].append(np.average(test_scores['count']))
seg_scores['correct_ratio'].append(np.average(test_scores['ratio']))
print(f'{seed}th {model_name} Data{data_num+1}', np.max(test_scores['f1']), np.mean(test_scores['precision']), np.mean(test_scores['recall']), np.mean(test_scores['count']), np.mean(test_scores['ratio']))
results_df = pd.DataFrame(seg_scores)
print("@"*5, f'{seed}th Seed {model_name} R{decomposition}_T{temporal}_Seg', "@"*5)
print(results_df.groupby('dataset').mean())
save_results_path = f'./results/{dataset}/Seg'
try:
if not(os.path.isdir(save_results_path)):
os.makedirs(os.path.join(save_results_path), exist_ok=True)
except OSError as e:
print("Failed to create directory!!!!!")
results_df.to_csv(f'{save_results_path}/{model_name}_R{decomposition}_T{temporal}_seg_seed{seed}.csv', index=False)
# %%
| [
"numpy.mean",
"numpy.flip",
"numpy.average",
"numpy.where",
"utils.evaluator.evaluate",
"os.path.join",
"numpy.max",
"plot.plot",
"os.path.isdir",
"copy.deepcopy",
"pandas.DataFrame",
"utils.evaluator_seg.compute_anomaly_scores",
"utils.evaluator.set_thresholds",
"utils.evaluator_seg.compu... | [((6713, 6737), 'copy.deepcopy', 'copy.deepcopy', (['ts_scores'], {}), '(ts_scores)\n', (6726, 6737), False, 'import copy\n'), ((6793, 6816), 'pandas.DataFrame', 'pd.DataFrame', (['ts_scores'], {}), '(ts_scores)\n', (6805, 6816), True, 'import pandas as pd\n'), ((7380, 7482), 'plot.plot', 'plot', (['model_name', 'ts_scores', 'test_seq', 'label_seq', 'seed', 'save_results_path', 'decomposition', 'temporal'], {}), '(model_name, ts_scores, test_seq, label_seq, seed, save_results_path,\n decomposition, temporal)\n', (7384, 7482), False, 'from plot import plot\n'), ((12190, 12214), 'pandas.DataFrame', 'pd.DataFrame', (['seg_scores'], {}), '(seg_scores)\n', (12202, 12214), True, 'import pandas as pd\n'), ((5513, 5538), 'numpy.max', 'np.max', (["test_scores['f1']"], {}), "(test_scores['f1'])\n", (5519, 5538), True, 'import numpy as np\n'), ((5582, 5615), 'numpy.mean', 'np.mean', (["test_scores['precision']"], {}), "(test_scores['precision'])\n", (5589, 5615), True, 'import numpy as np\n'), ((5656, 5686), 'numpy.mean', 'np.mean', (["test_scores['recall']"], {}), "(test_scores['recall'])\n", (5663, 5686), True, 'import numpy as np\n'), ((6023, 6048), 'numpy.max', 'np.max', (["test_scores['f1']"], {}), "(test_scores['f1'])\n", (6029, 6048), True, 'import numpy as np\n'), ((6050, 6083), 'numpy.mean', 'np.mean', (["test_scores['precision']"], {}), "(test_scores['precision'])\n", (6057, 6083), True, 'import numpy as np\n'), ((6085, 6115), 'numpy.mean', 'np.mean', (["test_scores['recall']"], {}), "(test_scores['recall'])\n", (6092, 6115), True, 'import numpy as np\n'), ((7055, 7087), 'os.path.isdir', 'os.path.isdir', (['save_results_path'], {}), '(save_results_path)\n', (7068, 7087), False, 'import os\n'), ((2640, 2701), 'utils.evaluator.set_thresholds', 'set_thresholds', (['residual_X_test', 'rec_x'], {'is_reconstructed': '(True)'}), '(residual_X_test, rec_x, is_reconstructed=True)\n', (2654, 2701), False, 'from utils.evaluator import evaluate, set_thresholds\n'), ((2736, 2826), 'utils.evaluator.evaluate', 'evaluate', (['thresholds', 'residual_X_test', 'rec_x', 'y_tests[data_num]'], {'is_reconstructed': '(True)'}), '(thresholds, residual_X_test, rec_x, y_tests[data_num],\n is_reconstructed=True)\n', (2744, 2826), False, 'from utils.evaluator import evaluate, set_thresholds\n'), ((4355, 4407), 'utils.evaluator.set_thresholds', 'set_thresholds', (['X_test', 'rec_x'], {'is_reconstructed': '(True)'}), '(X_test, rec_x, is_reconstructed=True)\n', (4369, 4407), False, 'from utils.evaluator import evaluate, set_thresholds\n'), ((4442, 4519), 'utils.evaluator.evaluate', 'evaluate', (['thresholds', 'X_test', 'rec_x', 'y_tests[data_num]'], {'is_reconstructed': '(True)'}), '(thresholds, X_test, rec_x, y_tests[data_num], is_reconstructed=True)\n', (4450, 4519), False, 'from utils.evaluator import evaluate, set_thresholds\n'), ((6284, 6345), 'numpy.where', 'np.where', (["(test_scores['rec_errors'][t] > thresholds[th_index])"], {}), "(test_scores['rec_errors'][t] > thresholds[th_index])\n", (6292, 6345), True, 'import numpy as np\n'), ((7118, 7149), 'os.path.join', 'os.path.join', (['save_results_path'], {}), '(save_results_path)\n', (7130, 7149), False, 'import os\n'), ((11386, 11411), 'numpy.max', 'np.max', (["test_scores['f1']"], {}), "(test_scores['f1'])\n", (11392, 11411), True, 'import numpy as np\n'), ((11452, 11484), 'numpy.max', 'np.max', (["test_scores['precision']"], {}), "(test_scores['precision'])\n", (11458, 11484), True, 'import numpy as np\n'), ((11525, 11554), 'numpy.max', 'np.max', (["test_scores['recall']"], {}), "(test_scores['recall'])\n", (11531, 11554), True, 'import numpy as np\n'), ((11598, 11627), 'numpy.average', 'np.average', (["test_scores['f1']"], {}), "(test_scores['f1'])\n", (11608, 11627), True, 'import numpy as np\n'), ((11668, 11704), 'numpy.average', 'np.average', (["test_scores['precision']"], {}), "(test_scores['precision'])\n", (11678, 11704), True, 'import numpy as np\n'), ((11745, 11778), 'numpy.average', 'np.average', (["test_scores['recall']"], {}), "(test_scores['recall'])\n", (11755, 11778), True, 'import numpy as np\n'), ((11827, 11859), 'numpy.average', 'np.average', (["test_scores['count']"], {}), "(test_scores['count'])\n", (11837, 11859), True, 'import numpy as np\n'), ((11908, 11940), 'numpy.average', 'np.average', (["test_scores['ratio']"], {}), "(test_scores['ratio'])\n", (11918, 11940), True, 'import numpy as np\n'), ((12004, 12029), 'numpy.max', 'np.max', (["test_scores['f1']"], {}), "(test_scores['f1'])\n", (12010, 12029), True, 'import numpy as np\n'), ((12031, 12064), 'numpy.mean', 'np.mean', (["test_scores['precision']"], {}), "(test_scores['precision'])\n", (12038, 12064), True, 'import numpy as np\n'), ((12066, 12096), 'numpy.mean', 'np.mean', (["test_scores['recall']"], {}), "(test_scores['recall'])\n", (12073, 12096), True, 'import numpy as np\n'), ((12098, 12127), 'numpy.mean', 'np.mean', (["test_scores['count']"], {}), "(test_scores['count'])\n", (12105, 12127), True, 'import numpy as np\n'), ((12129, 12158), 'numpy.mean', 'np.mean', (["test_scores['ratio']"], {}), "(test_scores['ratio'])\n", (12136, 12158), True, 'import numpy as np\n'), ((12447, 12479), 'os.path.isdir', 'os.path.isdir', (['save_results_path'], {}), '(save_results_path)\n', (12460, 12479), False, 'import os\n'), ((3163, 3254), 'utils.evaluator.set_thresholds', 'set_thresholds', (['residual_X_test', 'rec_x'], {'is_reconstructed': '(True)', 'scoring': '"""square_median"""'}), "(residual_X_test, rec_x, is_reconstructed=True, scoring=\n 'square_median')\n", (3177, 3254), False, 'from utils.evaluator import evaluate, set_thresholds\n'), ((3288, 3403), 'utils.evaluator.evaluate', 'evaluate', (['thresholds', 'residual_X_test', 'rec_x', 'y_tests[data_num]'], {'is_reconstructed': '(True)', 'scoring': '"""square_median"""'}), "(thresholds, residual_X_test, rec_x, y_tests[data_num],\n is_reconstructed=True, scoring='square_median')\n", (3296, 3403), False, 'from utils.evaluator import evaluate, set_thresholds\n'), ((3585, 3646), 'utils.evaluator.set_thresholds', 'set_thresholds', (['residual_X_test', 'rec_x'], {'is_reconstructed': '(True)'}), '(residual_X_test, rec_x, is_reconstructed=True)\n', (3599, 3646), False, 'from utils.evaluator import evaluate, set_thresholds\n'), ((3685, 3775), 'utils.evaluator.evaluate', 'evaluate', (['thresholds', 'residual_X_test', 'rec_x', 'y_tests[data_num]'], {'is_reconstructed': '(True)'}), '(thresholds, residual_X_test, rec_x, y_tests[data_num],\n is_reconstructed=True)\n', (3693, 3775), False, 'from utils.evaluator import evaluate, set_thresholds\n'), ((4861, 4938), 'utils.evaluator.set_thresholds', 'set_thresholds', (['X_test', 'rec_x'], {'is_reconstructed': '(True)', 'scoring': '"""square_median"""'}), "(X_test, rec_x, is_reconstructed=True, scoring='square_median')\n", (4875, 4938), False, 'from utils.evaluator import evaluate, set_thresholds\n'), ((4977, 5084), 'utils.evaluator.evaluate', 'evaluate', (['thresholds', 'X_test', 'rec_x', 'y_tests[data_num]'], {'is_reconstructed': '(True)', 'scoring': '"""square_median"""'}), "(thresholds, X_test, rec_x, y_tests[data_num], is_reconstructed=\n True, scoring='square_median')\n", (4985, 5084), False, 'from utils.evaluator import evaluate, set_thresholds\n'), ((5247, 5299), 'utils.evaluator.set_thresholds', 'set_thresholds', (['X_test', 'rec_x'], {'is_reconstructed': '(True)'}), '(X_test, rec_x, is_reconstructed=True)\n', (5261, 5299), False, 'from utils.evaluator import evaluate, set_thresholds\n'), ((5338, 5415), 'utils.evaluator.evaluate', 'evaluate', (['thresholds', 'X_test', 'rec_x', 'y_tests[data_num]'], {'is_reconstructed': '(True)'}), '(thresholds, X_test, rec_x, y_tests[data_num], is_reconstructed=True)\n', (5346, 5415), False, 'from utils.evaluator import evaluate, set_thresholds\n'), ((8888, 8957), 'utils.evaluator_seg.compute_metrics', 'compute_metrics', (['scores', 'y_tests[data_num]', 'y_segment_tests[data_num]'], {}), '(scores, y_tests[data_num], y_segment_tests[data_num])\n', (8903, 8957), False, 'from utils.evaluator_seg import compute_anomaly_scores, compute_metrics\n'), ((10403, 10472), 'utils.evaluator_seg.compute_metrics', 'compute_metrics', (['scores', 'y_tests[data_num]', 'y_segment_tests[data_num]'], {}), '(scores, y_tests[data_num], y_segment_tests[data_num])\n', (10418, 10472), False, 'from utils.evaluator_seg import compute_anomaly_scores, compute_metrics\n'), ((12510, 12541), 'os.path.join', 'os.path.join', (['save_results_path'], {}), '(save_results_path)\n', (12522, 12541), False, 'import os\n'), ((3062, 3082), 'numpy.flip', 'np.flip', (['rec'], {'axis': '(1)'}), '(rec, axis=1)\n', (3069, 3082), True, 'import numpy as np\n'), ((4769, 4789), 'numpy.flip', 'np.flip', (['rec'], {'axis': '(1)'}), '(rec, axis=1)\n', (4776, 4789), True, 'import numpy as np\n'), ((9310, 9381), 'utils.evaluator_seg.compute_anomaly_scores', 'compute_anomaly_scores', (['residual_X_test', 'rec_x'], {'scoring': '"""square_median"""'}), "(residual_X_test, rec_x, scoring='square_median')\n", (9332, 9381), False, 'from utils.evaluator_seg import compute_anomaly_scores, compute_metrics\n'), ((9420, 9489), 'utils.evaluator_seg.compute_metrics', 'compute_metrics', (['scores', 'y_tests[data_num]', 'y_segment_tests[data_num]'], {}), '(scores, y_tests[data_num], y_segment_tests[data_num])\n', (9435, 9489), False, 'from utils.evaluator_seg import compute_anomaly_scores, compute_metrics\n'), ((9734, 9803), 'utils.evaluator_seg.compute_metrics', 'compute_metrics', (['scores', 'y_tests[data_num]', 'y_segment_tests[data_num]'], {}), '(scores, y_tests[data_num], y_segment_tests[data_num])\n', (9749, 9803), False, 'from utils.evaluator_seg import compute_anomaly_scores, compute_metrics\n'), ((10808, 10870), 'utils.evaluator_seg.compute_anomaly_scores', 'compute_anomaly_scores', (['X_test', 'rec_x'], {'scoring': '"""square_median"""'}), "(X_test, rec_x, scoring='square_median')\n", (10830, 10870), False, 'from utils.evaluator_seg import compute_anomaly_scores, compute_metrics\n'), ((10909, 10978), 'utils.evaluator_seg.compute_metrics', 'compute_metrics', (['scores', 'y_tests[data_num]', 'y_segment_tests[data_num]'], {}), '(scores, y_tests[data_num], y_segment_tests[data_num])\n', (10924, 10978), False, 'from utils.evaluator_seg import compute_anomaly_scores, compute_metrics\n'), ((11200, 11269), 'utils.evaluator_seg.compute_metrics', 'compute_metrics', (['scores', 'y_tests[data_num]', 'y_segment_tests[data_num]'], {}), '(scores, y_tests[data_num], y_segment_tests[data_num])\n', (11215, 11269), False, 'from utils.evaluator_seg import compute_anomaly_scores, compute_metrics\n'), ((5879, 5904), 'numpy.max', 'np.max', (["test_scores['f1']"], {}), "(test_scores['f1'])\n", (5885, 5904), True, 'import numpy as np\n'), ((9204, 9224), 'numpy.flip', 'np.flip', (['rec'], {'axis': '(1)'}), '(rec, axis=1)\n', (9211, 9224), True, 'import numpy as np\n'), ((10711, 10731), 'numpy.flip', 'np.flip', (['rec'], {'axis': '(1)'}), '(rec, axis=1)\n', (10718, 10731), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.