code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
#!/usr/bin/python
import sys, time
import numpy as np
from math import *
import argparse
import general_scripts as gs
from distutils.version import LooseVersion
def read_file(fn, field, key="none"):
legs=[]
nplots=0
nlines=[]
xlist=[]
ylist=[]
tmp=0
tmpx=[]
tmpy=[]
if key=="none":
dumLeg=1
with open(fn) as fp:
for line in fp:
l = line.split()
if line[0] == '#' or line[0] == '@' or line == '\n':
continue
if l[0].isalnum():
continue
if line[0].isalpha():
# Catch Diamond q(Angs) header.
continue
if l[0].endswith(';') or l[0].endswith(':'):
# Hamburg and Australian Synchrotron specific, to catch
# opening arguements that are not technically alphanumeric.
continue
elif (line[0] == '&'):
nlines.append(tmp)
xlist.append(tmpx)
ylist.append(tmpy)
legs.append(str(dumLeg))
dumLeg+=1
tmpx=[]
tmpy=[]
tmp=0
nplots+=1
else:
tmpx.append(l[0])
tmpy.append(l[field])
tmp += 1
if len(tmpx) > 0:
nplots+=1
legs.append(str(dumLeg))
nlines.append(tmp)
xlist.append(tmpx)
ylist.append(tmpy)
else:
with open(fn) as fp:
for line in fp:
l = line.split()
if (line[0] == '#' or line[0] == '@' or line == '\n'):
if (line[0] == '@' and key in line):
legs.append( l[-1].strip('"') )
nplots += 1
continue
elif (line[0] == '&'):
nlines.append(tmp)
xlist.append(tmpx)
ylist.append(tmpy)
tmpx=[]
tmpy=[]
tmp=0
else:
tmpx.append(l[0])
tmpy.append(l[field])
tmp += 1
if len(tmpx) > 0:
nlines.append(tmp)
xlist.append(tmpx)
ylist.append(tmpy)
#Sanity check
if len(legs) != len(nlines):
print( "= = ERROR: number of legends(%d) is not equal to the number of plots (%d)!" % (len(legs), len(nlines)), file=sys.stderr )
sys.exit(1)
#for i in range(len(nlines)):
# for j in range(nlines[i]):
return legs, xlist, ylist
if __name__ == '__main__':
#Parsing.
parser = argparse.ArgumentParser(description='Takes a number of xmgrace-like files containing equivalent data'
'each from a different replicate, and perform averaging across'
'these files.'
'More info: For each file containing sets of difference curves,'
'e.g. s0, s1, s2... perform average across different files while'
'preserving the set layout.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('filelist', metavar='file', type=str, nargs='+',
help='List of file names of the data files to be averaged.')
parser.add_argument('-o', '--outfile', type=str, dest='out_file', default='out',
help='Output file for the averaged data.')
parser.add_argument('-s', '--search_key', type=str, dest='key', default='legend',
help='String to search to identify keys that delineate different graphs.'
'For example, "legend" will isolate xmgrace command: @s# legend "txt".'
'Put in "none" to have this not use keys.' )
parser.add_argument('-f', '--field', type=str, dest='field', default='end',
help='Which field to use as a key. Defaults to the last field. This is 0-indexed like python standard.')
time_start = time.time()
args = parser.parse_args()
out_filename=args.out_file
nfiles = len(args.filelist)
if nfiles < 2:
print( "= = ERROR: this script averages data from multiple curves!", file=sys.stderr )
sys.exit(-1)
if args.field == "end":
field=-1
else:
field=int(args.field)
bFirst = True
for i in range(nfiles):
fileName = args.filelist[i]
if fileName[0] == '#':
print( "= = NOTE: skipping file argument %s" % fileName, file=sys.stderr )
continue
l, x, y = read_file(fileName, field, args.key)
x=np.array(x,dtype=np.float64)
y=np.array(y,dtype=np.float64)
print( " ...plot %s read." % fileName, file=sys.stderr )
nplot = len(l)
ndat = len(x[0])
if bFirst:
bFirst = False
leglist=[]
xlist=np.zeros((nfiles,nplot,ndat),dtype=np.float64)
ylist=np.zeros((nfiles,nplot,ndat),dtype=np.float64)
check=(nplot,ndat)
leglist.append(l)
xlist[i]=x
ylist[i]=y
continue
#Sanity check
if check[0] != nplot:
print( "= = ERROR: Input data files do not contain the same number of plots!", file=sys.stderr )
sys.exit(2)
# Check if X-values are identical!
# print( xlist[0].shape, x.shape )
# print( np.array_equal( xlist[0], x) )
if not np.array_equal( xlist[0], x):
# Try to interpolate instead.
print( "= = WARNING: The latest input data file %s does not contain identical X-values!" % fileName, file=sys.stderr )
print( "= = ...will use interpolation.", file=sys.stderr )
y2=np.zeros(check)
for j in range(nplot):
#print( "Debug:", type(xlist[i,j]), type(x[j]) )
yTemp = np.interp(xlist[0,j,:],x[j,:],y[j,:])
for k in range(ndat):
y2[0,k] = yTemp[k]
leglist.append(l)
xlist[i]=xlist[0]
ylist[i]=y2
else:
leglist.append(l)
xlist[i]=x
ylist[i]=y
print( " ...all plots read. Conducting averaging.", file=sys.stderr )
yavg=ylist.mean(axis=0)
ystd=ylist.std(axis=0)/sqrt(nfiles-1)
print( " ...average finished.", file=sys.stderr )
if LooseVersion(np.version.version) >= LooseVersion('1.10'):
gs.print_sxylist(out_filename, leglist[0], x[0], np.stack((yavg,ystd), axis=-1) )
else:
shape=list(yavg.shape)
shape.append(2)
tmp = np.zeros( shape, dtype=yavg.dtype )
tmp[...,0] = yavg
tmp[...,1] = ystd
gs.print_sxylist(out_filename, leglist[0], x[0], tmp )
| [
"argparse.ArgumentParser",
"general_scripts.print_sxylist",
"numpy.array",
"numpy.zeros",
"numpy.stack",
"numpy.array_equal",
"numpy.interp",
"sys.exit",
"distutils.version.LooseVersion",
"time.time"
] | [((2807, 3199), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Takes a number of xmgrace-like files containing equivalent dataeach from a different replicate, and perform averaging acrossthese files.More info: For each file containing sets of difference curves,e.g. s0, s1, s2... perform average across different files whilepreserving the set layout."""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description=\n 'Takes a number of xmgrace-like files containing equivalent dataeach from a different replicate, and perform averaging acrossthese files.More info: For each file containing sets of difference curves,e.g. s0, s1, s2... perform average across different files whilepreserving the set layout.'\n , formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (2830, 3199), False, 'import argparse\n'), ((4385, 4396), 'time.time', 'time.time', ([], {}), '()\n', (4394, 4396), False, 'import sys, time\n'), ((2642, 2653), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2650, 2653), False, 'import sys, time\n'), ((4614, 4626), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (4622, 4626), False, 'import sys, time\n'), ((4999, 5028), 'numpy.array', 'np.array', (['x'], {'dtype': 'np.float64'}), '(x, dtype=np.float64)\n', (5007, 5028), True, 'import numpy as np\n'), ((5038, 5067), 'numpy.array', 'np.array', (['y'], {'dtype': 'np.float64'}), '(y, dtype=np.float64)\n', (5046, 5067), True, 'import numpy as np\n'), ((6759, 6791), 'distutils.version.LooseVersion', 'LooseVersion', (['np.version.version'], {}), '(np.version.version)\n', (6771, 6791), False, 'from distutils.version import LooseVersion\n'), ((6795, 6815), 'distutils.version.LooseVersion', 'LooseVersion', (['"""1.10"""'], {}), "('1.10')\n", (6807, 6815), False, 'from distutils.version import LooseVersion\n'), ((6986, 7019), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'yavg.dtype'}), '(shape, dtype=yavg.dtype)\n', (6994, 7019), True, 'import numpy as np\n'), ((7082, 7135), 'general_scripts.print_sxylist', 'gs.print_sxylist', (['out_filename', 'leglist[0]', 'x[0]', 'tmp'], {}), '(out_filename, leglist[0], x[0], tmp)\n', (7098, 7135), True, 'import general_scripts as gs\n'), ((5268, 5317), 'numpy.zeros', 'np.zeros', (['(nfiles, nplot, ndat)'], {'dtype': 'np.float64'}), '((nfiles, nplot, ndat), dtype=np.float64)\n', (5276, 5317), True, 'import numpy as np\n'), ((5333, 5382), 'numpy.zeros', 'np.zeros', (['(nfiles, nplot, ndat)'], {'dtype': 'np.float64'}), '((nfiles, nplot, ndat), dtype=np.float64)\n', (5341, 5382), True, 'import numpy as np\n'), ((5681, 5692), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (5689, 5692), False, 'import sys, time\n'), ((5842, 5869), 'numpy.array_equal', 'np.array_equal', (['xlist[0]', 'x'], {}), '(xlist[0], x)\n', (5856, 5869), True, 'import numpy as np\n'), ((6131, 6146), 'numpy.zeros', 'np.zeros', (['check'], {}), '(check)\n', (6139, 6146), True, 'import numpy as np\n'), ((6874, 6905), 'numpy.stack', 'np.stack', (['(yavg, ystd)'], {'axis': '(-1)'}), '((yavg, ystd), axis=-1)\n', (6882, 6905), True, 'import numpy as np\n'), ((6271, 6314), 'numpy.interp', 'np.interp', (['xlist[0, j, :]', 'x[j, :]', 'y[j, :]'], {}), '(xlist[0, j, :], x[j, :], y[j, :])\n', (6280, 6314), True, 'import numpy as np\n')] |
"""
Evaluation Scripts
"""
from __future__ import absolute_import
from __future__ import division
from collections import namedtuple, OrderedDict
from network import mynn
import argparse
import logging
import os
import torch
import time
import numpy as np
from config import cfg, assert_and_infer_cfg
import network
import optimizer
from ood_metrics import fpr_at_95_tpr
from tqdm import tqdm
from PIL import Image
from sklearn.metrics import roc_auc_score, roc_curve, auc, precision_recall_curve, average_precision_score, plot_roc_curve
import torchvision.transforms as standard_transforms
import tensorflow as tf
import tensorflow_datasets as tfds
from torchvision.transforms.functional import to_pil_image
import matplotlib.pyplot as plt
dirname = os.path.dirname(__file__)
pretrained_model_path = os.path.join(dirname, 'pretrained/r101_os8_base_cty.pth')
# Argument Parser
parser = argparse.ArgumentParser(description='Semantic Segmentation')
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--arch', type=str, default='network.deepv3.DeepR101V3PlusD_OS8',
help='Network architecture. We have DeepSRNX50V3PlusD (backbone: ResNeXt50) \
and deepWV3Plus (backbone: WideResNet38).')
parser.add_argument('--dataset', type=str, default='cityscapes',
help='possible datasets for statistics; cityscapes')
parser.add_argument('--fp16', action='store_true', default=False,
help='Use Nvidia Apex AMP')
parser.add_argument('--local_rank', default=0, type=int,
help='parameter used by apex library')
parser.add_argument('--trunk', type=str, default='resnet101',
help='trunk model, can be: resnet101 (default), resnet50')
parser.add_argument('--bs_mult', type=int, default=2,
help='Batch size for training per gpu')
parser.add_argument('--bs_mult_val', type=int, default=1,
help='Batch size for Validation per gpu')
parser.add_argument('--class_uniform_pct', type=float, default=0,
help='What fraction of images is uniformly sampled')
parser.add_argument('--class_uniform_tile', type=int, default=1024,
help='tile size for class uniform sampling')
parser.add_argument('--batch_weighting', action='store_true', default=False,
help='Batch weighting for class (use nll class weighting using batch stats')
parser.add_argument('--jointwtborder', action='store_true', default=False,
help='Enable boundary label relaxation')
parser.add_argument('--snapshot', type=str, default=pretrained_model_path)
parser.add_argument('--restore_optimizer', action='store_true', default=False)
parser.add_argument('--date', type=str, default='default',
help='experiment directory date name')
parser.add_argument('--exp', type=str, default='default',
help='experiment directory name')
parser.add_argument('--tb_tag', type=str, default='',
help='add tag to tb dir')
parser.add_argument('--ckpt', type=str, default='logs/ckpt',
help='Save Checkpoint Point')
parser.add_argument('--tb_path', type=str, default='logs/tb',
help='Save Tensorboard Path')
parser.add_argument('--syncbn', action='store_true', default=True,
help='Use Synchronized BN')
parser.add_argument('--dist_url', default='tcp://127.0.0.1:', type=str,
help='url used to set up distributed training')
parser.add_argument('--backbone_lr', type=float, default=0.0,
help='different learning rate on backbone network')
parser.add_argument('--pooling', type=str, default='mean',
help='pooling methods, average is better than max')
parser.add_argument('--ood_dataset_path', type=str,
default='/home/nas1_userB/dataset/ood_segmentation/fishyscapes',
help='OoD dataset path')
# Anomaly score mode - msp, max_logit, standardized_max_logit
parser.add_argument('--score_mode', type=str, default='standardized_max_logit', #change to fssd!!!
help='score mode for anomaly [msp, max_logit, standardized_max_logit, fssd, standardized_fssd]')
# Boundary suppression configs
parser.add_argument('--enable_boundary_suppression', type=bool, default=False,
help='enable boundary suppression')
parser.add_argument('--boundary_width', type=int, default=4,
help='initial boundary suppression width')
parser.add_argument('--boundary_iteration', type=int, default=4,
help='the number of boundary iterations')
# Dilated smoothing configs
parser.add_argument('--enable_dilated_smoothing', type=bool, default=False,
help='enable dilated smoothing')
parser.add_argument('--smoothing_kernel_size', type=int, default=7,
help='kernel size of dilated smoothing')
parser.add_argument('--smoothing_kernel_dilation', type=int, default=6,
help='kernel dilation rate of dilated smoothing')
args = parser.parse_args()
# Enable CUDNN Benchmarking optimization
#torch.backends.cudnn.benchmark = True
random_seed = cfg.RANDOM_SEED
torch.manual_seed(random_seed)
torch.cuda.manual_seed(random_seed)
torch.cuda.manual_seed_all(random_seed) # if use multi-GPU
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(random_seed)
args.world_size = 1
print(f'World Size: {args.world_size}')
if 'WORLD_SIZE' in os.environ:
# args.apex = int(os.environ['WORLD_SIZE']) > 1
args.world_size = int(os.environ['WORLD_SIZE'])
print("Total world size: ", int(os.environ['WORLD_SIZE']))
torch.cuda.set_device(args.local_rank)
print('My Rank:', args.local_rank)
# Initialize distributed communication
args.dist_url = args.dist_url + str(8000 + (int(time.time()%1000))//10)
torch.distributed.init_process_group(backend='nccl',
init_method=args.dist_url,
world_size=args.world_size,
rank=args.local_rank)
def get_net():
"""
Main Function
"""
# Set up the Arguments, Tensorboard Writer, Dataloader, Loss Fn, Optimizer
assert_and_infer_cfg(args)
net = network.get_net(args, criterion=None, criterion_aux=None)
net = torch.nn.SyncBatchNorm.convert_sync_batchnorm(net)
net = network.warp_network_in_dataparallel(net, args.local_rank)
if args.snapshot:
epoch, mean_iu = optimizer.load_weights(net, None, None,
args.snapshot, args.restore_optimizer)
print(f"Loading completed. Epoch {epoch} and mIoU {mean_iu}")
else:
raise ValueError(f"snapshot argument is not set!")
class_mean = np.load(f'stats/{args.dataset}_mean.npy', allow_pickle=True)
class_var = np.load(f'stats/{args.dataset}_var.npy', allow_pickle=True)
fss = np.load(f'stats/fss_init_softmax.npy', allow_pickle=True)
fssd_mean = np.load(f'stats/{args.dataset}_fssd_mean.npy', allow_pickle=True)
fssd_var = np.load(f'stats/{args.dataset}_fssd_var.npy', allow_pickle=True)
net.module.set_statistics(mean=class_mean.item(),
var=class_var.item(),
fss = fss.tolist(),
fssd_mean = fssd_mean.item(),
fssd_var = fssd_var.item())
torch.cuda.empty_cache()
net.eval()
return net
def preprocess_image(x, mean_std):
x = Image.fromarray(x)
x = standard_transforms.ToTensor()(x)
x = standard_transforms.Normalize(*mean_std)(x)
x = x.cuda()
if len(x.shape) == 3:
x = x.unsqueeze(0)
return x
if __name__ == '__main__':
net = get_net()
mean_std = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
ood_data_root = args.ood_dataset_path
image_root_path = os.path.join(ood_data_root, 'leftImg8bit_trainvaltest/leftImg8bit/val')
mask_root_path = os.path.join(ood_data_root, 'gtFine_trainvaltest/gtFine/val')
if not os.path.exists(image_root_path):
raise ValueError(f"Dataset directory {image_root_path} doesn't exist!")
anomaly_score_list = []
ood_gts_list = []
for image_file in tqdm(os.listdir(image_root_path)):
image_path = os.path.join(image_root_path, image_file)
mask_path = os.path.join(mask_root_path, image_file)
if os.path.isfile(image_path):
# 3 x H x W
image = np.array(Image.open(image_path).convert('RGB')).astype('uint8')
mask = Image.open(mask_path)
ood_gts = np.array(mask)
ood_gts_list.append(np.expand_dims(ood_gts, 0))
with torch.no_grad():
image = preprocess_image(image, mean_std)
main_out, anomaly_score = net(image)
del main_out
### save output image ###
# image = torch.clamp(-anomaly_score.cpu(), 0, 255)
# plt.imshow(to_pil_image(image), cmap='gray')
# plt.imsave('img/sml'+str(image_file),to_pil_image(image))
# image = np.array(image, dtype=np.uint8)
anomaly_score_list.append(anomaly_score.cpu().numpy())
ood_gts = np.array(ood_gts_list)
anomaly_scores = np.array(anomaly_score_list)
# drop void pixels
ood_mask = (ood_gts == 1)
ind_mask = (ood_gts == 0)
ood_out = -anomaly_scores[ood_mask]
ind_out = -anomaly_scores[ind_mask]
ood_label = np.ones(len(ood_out))
ind_label = np.zeros(len(ind_out))
val_out = np.concatenate((ind_out, ood_out))
val_label = np.concatenate((ind_label, ood_label))
print('Measuring metrics...')
#AUROC
fpr, tpr, _ = roc_curve(val_label, val_out)
roc_auc = auc(fpr, tpr)
#AUPRC
precision, recall, _ = precision_recall_curve(val_label, val_out)
prc_auc = average_precision_score(val_label, val_out)
#FPR at 95 TPR
fpr = fpr_at_95_tpr(val_out, val_label)
print(f'AUROC score: {roc_auc}')
print(f'AUPRC score: {prc_auc}')
print(f'FPR@TPR95: {fpr}')
### plot curve ###
# plt.plot(fpr, tpr)
# plt.ylabel("True Positive Rate")
# plt.xlabel("False Positive Rate")
# plt.savefig("curve/sml_roc_curve.png")
# plt.cla()
# plt.plot(precision, recall)
# plt.ylabel("recall")
# plt.xlabel("precision")
# plt.savefig("curve/sml_precision_recall_curve.png") | [
"network.get_net",
"sklearn.metrics.auc",
"config.assert_and_infer_cfg",
"numpy.array",
"sklearn.metrics.roc_curve",
"os.path.exists",
"os.listdir",
"argparse.ArgumentParser",
"numpy.random.seed",
"numpy.concatenate",
"torchvision.transforms.ToTensor",
"torch.nn.SyncBatchNorm.convert_sync_batc... | [((756, 781), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (771, 781), False, 'import os\n'), ((806, 863), 'os.path.join', 'os.path.join', (['dirname', '"""pretrained/r101_os8_base_cty.pth"""'], {}), "(dirname, 'pretrained/r101_os8_base_cty.pth')\n", (818, 863), False, 'import os\n'), ((892, 952), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Semantic Segmentation"""'}), "(description='Semantic Segmentation')\n", (915, 952), False, 'import argparse\n'), ((5257, 5287), 'torch.manual_seed', 'torch.manual_seed', (['random_seed'], {}), '(random_seed)\n', (5274, 5287), False, 'import torch\n'), ((5288, 5323), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['random_seed'], {}), '(random_seed)\n', (5310, 5323), False, 'import torch\n'), ((5324, 5363), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['random_seed'], {}), '(random_seed)\n', (5350, 5363), False, 'import torch\n'), ((5464, 5491), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (5478, 5491), True, 'import numpy as np\n'), ((5753, 5791), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.local_rank'], {}), '(args.local_rank)\n', (5774, 5791), False, 'import torch\n'), ((5939, 6073), 'torch.distributed.init_process_group', 'torch.distributed.init_process_group', ([], {'backend': '"""nccl"""', 'init_method': 'args.dist_url', 'world_size': 'args.world_size', 'rank': 'args.local_rank'}), "(backend='nccl', init_method=args.\n dist_url, world_size=args.world_size, rank=args.local_rank)\n", (5975, 6073), False, 'import torch\n'), ((6313, 6339), 'config.assert_and_infer_cfg', 'assert_and_infer_cfg', (['args'], {}), '(args)\n', (6333, 6339), False, 'from config import cfg, assert_and_infer_cfg\n'), ((6351, 6408), 'network.get_net', 'network.get_net', (['args'], {'criterion': 'None', 'criterion_aux': 'None'}), '(args, criterion=None, criterion_aux=None)\n', (6366, 6408), False, 'import network\n'), ((6420, 6470), 'torch.nn.SyncBatchNorm.convert_sync_batchnorm', 'torch.nn.SyncBatchNorm.convert_sync_batchnorm', (['net'], {}), '(net)\n', (6465, 6470), False, 'import torch\n'), ((6481, 6539), 'network.warp_network_in_dataparallel', 'network.warp_network_in_dataparallel', (['net', 'args.local_rank'], {}), '(net, args.local_rank)\n', (6517, 6539), False, 'import network\n'), ((6852, 6912), 'numpy.load', 'np.load', (['f"""stats/{args.dataset}_mean.npy"""'], {'allow_pickle': '(True)'}), "(f'stats/{args.dataset}_mean.npy', allow_pickle=True)\n", (6859, 6912), True, 'import numpy as np\n'), ((6929, 6988), 'numpy.load', 'np.load', (['f"""stats/{args.dataset}_var.npy"""'], {'allow_pickle': '(True)'}), "(f'stats/{args.dataset}_var.npy', allow_pickle=True)\n", (6936, 6988), True, 'import numpy as np\n'), ((6999, 7056), 'numpy.load', 'np.load', (['f"""stats/fss_init_softmax.npy"""'], {'allow_pickle': '(True)'}), "(f'stats/fss_init_softmax.npy', allow_pickle=True)\n", (7006, 7056), True, 'import numpy as np\n'), ((7073, 7138), 'numpy.load', 'np.load', (['f"""stats/{args.dataset}_fssd_mean.npy"""'], {'allow_pickle': '(True)'}), "(f'stats/{args.dataset}_fssd_mean.npy', allow_pickle=True)\n", (7080, 7138), True, 'import numpy as np\n'), ((7154, 7218), 'numpy.load', 'np.load', (['f"""stats/{args.dataset}_fssd_var.npy"""'], {'allow_pickle': '(True)'}), "(f'stats/{args.dataset}_fssd_var.npy', allow_pickle=True)\n", (7161, 7218), True, 'import numpy as np\n'), ((7502, 7526), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (7524, 7526), False, 'import torch\n'), ((7602, 7620), 'PIL.Image.fromarray', 'Image.fromarray', (['x'], {}), '(x)\n', (7617, 7620), False, 'from PIL import Image\n'), ((7982, 8053), 'os.path.join', 'os.path.join', (['ood_data_root', '"""leftImg8bit_trainvaltest/leftImg8bit/val"""'], {}), "(ood_data_root, 'leftImg8bit_trainvaltest/leftImg8bit/val')\n", (7994, 8053), False, 'import os\n'), ((8075, 8136), 'os.path.join', 'os.path.join', (['ood_data_root', '"""gtFine_trainvaltest/gtFine/val"""'], {}), "(ood_data_root, 'gtFine_trainvaltest/gtFine/val')\n", (8087, 8136), False, 'import os\n'), ((9400, 9422), 'numpy.array', 'np.array', (['ood_gts_list'], {}), '(ood_gts_list)\n', (9408, 9422), True, 'import numpy as np\n'), ((9444, 9472), 'numpy.array', 'np.array', (['anomaly_score_list'], {}), '(anomaly_score_list)\n', (9452, 9472), True, 'import numpy as np\n'), ((9731, 9765), 'numpy.concatenate', 'np.concatenate', (['(ind_out, ood_out)'], {}), '((ind_out, ood_out))\n', (9745, 9765), True, 'import numpy as np\n'), ((9782, 9820), 'numpy.concatenate', 'np.concatenate', (['(ind_label, ood_label)'], {}), '((ind_label, ood_label))\n', (9796, 9820), True, 'import numpy as np\n'), ((9885, 9914), 'sklearn.metrics.roc_curve', 'roc_curve', (['val_label', 'val_out'], {}), '(val_label, val_out)\n', (9894, 9914), False, 'from sklearn.metrics import roc_auc_score, roc_curve, auc, precision_recall_curve, average_precision_score, plot_roc_curve\n'), ((9929, 9942), 'sklearn.metrics.auc', 'auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (9932, 9942), False, 'from sklearn.metrics import roc_auc_score, roc_curve, auc, precision_recall_curve, average_precision_score, plot_roc_curve\n'), ((9981, 10023), 'sklearn.metrics.precision_recall_curve', 'precision_recall_curve', (['val_label', 'val_out'], {}), '(val_label, val_out)\n', (10003, 10023), False, 'from sklearn.metrics import roc_auc_score, roc_curve, auc, precision_recall_curve, average_precision_score, plot_roc_curve\n'), ((10038, 10081), 'sklearn.metrics.average_precision_score', 'average_precision_score', (['val_label', 'val_out'], {}), '(val_label, val_out)\n', (10061, 10081), False, 'from sklearn.metrics import roc_auc_score, roc_curve, auc, precision_recall_curve, average_precision_score, plot_roc_curve\n'), ((10111, 10144), 'ood_metrics.fpr_at_95_tpr', 'fpr_at_95_tpr', (['val_out', 'val_label'], {}), '(val_out, val_label)\n', (10124, 10144), False, 'from ood_metrics import fpr_at_95_tpr\n'), ((6588, 6666), 'optimizer.load_weights', 'optimizer.load_weights', (['net', 'None', 'None', 'args.snapshot', 'args.restore_optimizer'], {}), '(net, None, None, args.snapshot, args.restore_optimizer)\n', (6610, 6666), False, 'import optimizer\n'), ((7629, 7659), 'torchvision.transforms.ToTensor', 'standard_transforms.ToTensor', ([], {}), '()\n', (7657, 7659), True, 'import torchvision.transforms as standard_transforms\n'), ((7671, 7711), 'torchvision.transforms.Normalize', 'standard_transforms.Normalize', (['*mean_std'], {}), '(*mean_std)\n', (7700, 7711), True, 'import torchvision.transforms as standard_transforms\n'), ((8149, 8180), 'os.path.exists', 'os.path.exists', (['image_root_path'], {}), '(image_root_path)\n', (8163, 8180), False, 'import os\n'), ((8342, 8369), 'os.listdir', 'os.listdir', (['image_root_path'], {}), '(image_root_path)\n', (8352, 8369), False, 'import os\n'), ((8393, 8434), 'os.path.join', 'os.path.join', (['image_root_path', 'image_file'], {}), '(image_root_path, image_file)\n', (8405, 8434), False, 'import os\n'), ((8455, 8495), 'os.path.join', 'os.path.join', (['mask_root_path', 'image_file'], {}), '(mask_root_path, image_file)\n', (8467, 8495), False, 'import os\n'), ((8508, 8534), 'os.path.isfile', 'os.path.isfile', (['image_path'], {}), '(image_path)\n', (8522, 8534), False, 'import os\n'), ((8677, 8698), 'PIL.Image.open', 'Image.open', (['mask_path'], {}), '(mask_path)\n', (8687, 8698), False, 'from PIL import Image\n'), ((8721, 8735), 'numpy.array', 'np.array', (['mask'], {}), '(mask)\n', (8729, 8735), True, 'import numpy as np\n'), ((8769, 8795), 'numpy.expand_dims', 'np.expand_dims', (['ood_gts', '(0)'], {}), '(ood_gts, 0)\n', (8783, 8795), True, 'import numpy as np\n'), ((8815, 8830), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8828, 8830), False, 'import torch\n'), ((5914, 5925), 'time.time', 'time.time', ([], {}), '()\n', (5923, 5925), False, 'import time\n'), ((8602, 8624), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (8612, 8624), False, 'from PIL import Image\n')] |
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for official.nlp.data.tagging_data_loader."""
import os
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.nlp.data import tagging_dataloader
def _create_fake_dataset(output_path, seq_length, include_sentence_id):
"""Creates a fake dataset."""
writer = tf.io.TFRecordWriter(output_path)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
for i in range(100):
features = {}
input_ids = np.random.randint(100, size=(seq_length))
features['input_ids'] = create_int_feature(input_ids)
features['input_mask'] = create_int_feature(np.ones_like(input_ids))
features['segment_ids'] = create_int_feature(np.ones_like(input_ids))
features['label_ids'] = create_int_feature(
np.random.randint(10, size=(seq_length)))
if include_sentence_id:
features['sentence_id'] = create_int_feature([i])
features['sub_sentence_id'] = create_int_feature([0])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
class TaggingDataLoaderTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(True, False)
def test_load_dataset(self, include_sentence_id):
seq_length = 16
batch_size = 10
train_data_path = os.path.join(self.get_temp_dir(), 'train.tf_record')
_create_fake_dataset(train_data_path, seq_length, include_sentence_id)
data_config = tagging_dataloader.TaggingDataConfig(
input_path=train_data_path,
seq_length=seq_length,
global_batch_size=batch_size,
include_sentence_id=include_sentence_id)
dataset = tagging_dataloader.TaggingDataLoader(data_config).load()
features, labels = next(iter(dataset))
expected_keys = ['input_word_ids', 'input_mask', 'input_type_ids']
if include_sentence_id:
expected_keys.extend(['sentence_id', 'sub_sentence_id'])
self.assertCountEqual(expected_keys, features.keys())
self.assertEqual(features['input_word_ids'].shape, (batch_size, seq_length))
self.assertEqual(features['input_mask'].shape, (batch_size, seq_length))
self.assertEqual(features['input_type_ids'].shape, (batch_size, seq_length))
self.assertEqual(labels.shape, (batch_size, seq_length))
if include_sentence_id:
self.assertEqual(features['sentence_id'].shape, (batch_size,))
self.assertEqual(features['sub_sentence_id'].shape, (batch_size,))
if __name__ == '__main__':
tf.test.main()
| [
"numpy.ones_like",
"official.nlp.data.tagging_dataloader.TaggingDataConfig",
"absl.testing.parameterized.parameters",
"tensorflow.io.TFRecordWriter",
"tensorflow.test.main",
"numpy.random.randint",
"tensorflow.train.Features",
"official.nlp.data.tagging_dataloader.TaggingDataLoader"
] | [((924, 957), 'tensorflow.io.TFRecordWriter', 'tf.io.TFRecordWriter', (['output_path'], {}), '(output_path)\n', (944, 957), True, 'import tensorflow as tf\n'), ((1853, 1890), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['(True)', '(False)'], {}), '(True, False)\n', (1877, 1890), False, 'from absl.testing import parameterized\n'), ((3181, 3195), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (3193, 3195), True, 'import tensorflow as tf\n'), ((1140, 1179), 'numpy.random.randint', 'np.random.randint', (['(100)'], {'size': 'seq_length'}), '(100, size=seq_length)\n', (1157, 1179), True, 'import numpy as np\n'), ((2151, 2319), 'official.nlp.data.tagging_dataloader.TaggingDataConfig', 'tagging_dataloader.TaggingDataConfig', ([], {'input_path': 'train_data_path', 'seq_length': 'seq_length', 'global_batch_size': 'batch_size', 'include_sentence_id': 'include_sentence_id'}), '(input_path=train_data_path, seq_length\n =seq_length, global_batch_size=batch_size, include_sentence_id=\n include_sentence_id)\n', (2187, 2319), False, 'from official.nlp.data import tagging_dataloader\n'), ((1288, 1311), 'numpy.ones_like', 'np.ones_like', (['input_ids'], {}), '(input_ids)\n', (1300, 1311), True, 'import numpy as np\n'), ((1362, 1385), 'numpy.ones_like', 'np.ones_like', (['input_ids'], {}), '(input_ids)\n', (1374, 1385), True, 'import numpy as np\n'), ((1443, 1481), 'numpy.random.randint', 'np.random.randint', (['(10)'], {'size': 'seq_length'}), '(10, size=seq_length)\n', (1460, 1481), True, 'import numpy as np\n'), ((1673, 1708), 'tensorflow.train.Features', 'tf.train.Features', ([], {'feature': 'features'}), '(feature=features)\n', (1690, 1708), True, 'import tensorflow as tf\n'), ((2358, 2407), 'official.nlp.data.tagging_dataloader.TaggingDataLoader', 'tagging_dataloader.TaggingDataLoader', (['data_config'], {}), '(data_config)\n', (2394, 2407), False, 'from official.nlp.data import tagging_dataloader\n')] |
# -*- coding:utf-8 -*-
import cv2
import numpy as np
def cvcar(videoUrl, pic):
weightsPath = 'yolov3.weights' # 模型权重文件
configPath = 'yolov3.cfg' # 模型配置文件
labelsPath = 'yolov3.txt' # 模型类别标签文件
# 初始化一些参数
LABELS = open(labelsPath).read().strip().split("\n")
boxes = []
confidences = []
classIDs = []
global str
# 加载网络配置与训练的权重文件 构建网络
net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
# 读入待检测的图像
ret, frame = cv2.VideoCapture(videoUrl).read()
cv2.imwrite(pic, frame)
image = cv2.imread(pic)
# 得到图像的高和宽
(H, W) = image.shape[0: 2]
# 得到YOLO需要的输出层
ln = net.getLayerNames()
out = net.getUnconnectedOutLayers() # 得到未连接层得序号 [[200] /n [267] /n [400] ]
x = []
for i in out: # 1=[200]
x.append(ln[i[0] - 1]) # i[0]-1 取out中的数字 [200][0]=200 ln(199)= 'yolo_82'
ln = x
# 从输入图像构造一个blob,然后通过加载的模型,给我们提供边界框和相关概率
# blobFromImage(image, scalefactor=None, size=None, mean=None, swapRB=None, crop=None, ddepth=None)
blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416), swapRB=True, crop=False)
# 构造了一个blob图像,对原图像进行了图像的归一化,缩放了尺寸 ,对应训练模型
net.setInput(blob) # 将blob设为输入??? 具体作用还不是很清楚
layerOutputs = net.forward(ln) # ln此时为输出层名称 ,向前传播 得到检测结果
for output in layerOutputs: # 对三个输出层 循环
for detection in output: # 对每个输出层中的每个检测框循环
scores = detection[5:] # detection=[x,y,h,w,c,class1,class2] scores取第6位至最后
classID = np.argmax(scores) # np.argmax反馈最大值的索引
confidence = scores[classID]
if confidence > 0.5: # 过滤掉那些置信度较小的检测结果
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
# 边框的左上角
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
# 更新检测出来的框
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
classIDs.append(classID)
idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.2, 0.3)
box_seq = idxs.flatten()
a = []
if len(idxs) > 0:
for seq in box_seq:
(x, y) = (boxes[seq][0], boxes[seq][1]) # 框左上角
(w, h) = (boxes[seq][2], boxes[seq][3]) # 框宽高
if classIDs[seq] == 0: # 根据类别设定框的颜色
color = [0, 0, 255]
else:
color = [0, 255, 0]
cv2.rectangle(image, (x, y), (x + w, y + h), color, 2) # 画框
text = "{}: {:.4f}".format(LABELS[classIDs[seq]], confidences[seq])
cv2.putText(image, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.3, color, 1) # 写字
a.append(x + w / 2)
a.append(y + h / 2)
b = np.array(a)
b = 3 * b / 5
# str = '[' + ",".join(str(i) for i in b) + ']'
# # print('['+",".join(str(i) for i in b)+']')
# print(str)
# cv2.namedWindow('Image', cv2.WINDOW_NORMAL)
# cv2.imshow("Image", image)
# cv2.waitKey(0)
return b
| [
"cv2.dnn.blobFromImage",
"cv2.imwrite",
"cv2.rectangle",
"numpy.argmax",
"cv2.putText",
"numpy.array",
"cv2.VideoCapture",
"cv2.dnn.NMSBoxes",
"cv2.imread",
"cv2.dnn.readNetFromDarknet"
] | [((387, 438), 'cv2.dnn.readNetFromDarknet', 'cv2.dnn.readNetFromDarknet', (['configPath', 'weightsPath'], {}), '(configPath, weightsPath)\n', (413, 438), False, 'import cv2\n'), ((509, 532), 'cv2.imwrite', 'cv2.imwrite', (['pic', 'frame'], {}), '(pic, frame)\n', (520, 532), False, 'import cv2\n'), ((545, 560), 'cv2.imread', 'cv2.imread', (['pic'], {}), '(pic)\n', (555, 560), False, 'import cv2\n'), ((1037, 1113), 'cv2.dnn.blobFromImage', 'cv2.dnn.blobFromImage', (['image', '(1 / 255.0)', '(416, 416)'], {'swapRB': '(True)', 'crop': '(False)'}), '(image, 1 / 255.0, (416, 416), swapRB=True, crop=False)\n', (1058, 1113), False, 'import cv2\n'), ((2063, 2109), 'cv2.dnn.NMSBoxes', 'cv2.dnn.NMSBoxes', (['boxes', 'confidences', '(0.2)', '(0.3)'], {}), '(boxes, confidences, 0.2, 0.3)\n', (2079, 2109), False, 'import cv2\n'), ((2781, 2792), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (2789, 2792), True, 'import numpy as np\n'), ((471, 497), 'cv2.VideoCapture', 'cv2.VideoCapture', (['videoUrl'], {}), '(videoUrl)\n', (487, 497), False, 'import cv2\n'), ((1483, 1500), 'numpy.argmax', 'np.argmax', (['scores'], {}), '(scores)\n', (1492, 1500), True, 'import numpy as np\n'), ((2471, 2525), 'cv2.rectangle', 'cv2.rectangle', (['image', '(x, y)', '(x + w, y + h)', 'color', '(2)'], {}), '(image, (x, y), (x + w, y + h), color, 2)\n', (2484, 2525), False, 'import cv2\n'), ((2624, 2701), 'cv2.putText', 'cv2.putText', (['image', 'text', '(x, y - 5)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.3)', 'color', '(1)'], {}), '(image, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.3, color, 1)\n', (2635, 2701), False, 'import cv2\n'), ((1654, 1676), 'numpy.array', 'np.array', (['[W, H, W, H]'], {}), '([W, H, W, H])\n', (1662, 1676), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 16 15:45:10 2021
@author: trite
"""
from MIDISynth import midi2piece
import numpy as np
from pathlib import Path
file_name = 'tempest'
file_path = Path('..') / Path('data') / Path('midi') \
/ Path(file_name + 'mid')
piece = midi2piece(file_name, file_path, 1.)
piece.__str__()
# Frequency parameters
f_min = 27.5 # La 0
bins_per_octave = 12
n_bins = int(bins_per_octave * (7 + 1 / 3)) # number of bins of a piano
# Times parameters
time_resolution = 0.001 # ms resolution
# Plot
frequency_vector = f_min * 2 ** (np.arange(n_bins) / bins_per_octave)
time_vector = np.arange(0, piece.duration(), time_resolution)
piece.piano_roll(frequency_vector, time_vector,
bins_per_octave=bins_per_octave, semitone_width=1)
| [
"pathlib.Path",
"numpy.arange",
"MIDISynth.midi2piece"
] | [((289, 326), 'MIDISynth.midi2piece', 'midi2piece', (['file_name', 'file_path', '(1.0)'], {}), '(file_name, file_path, 1.0)\n', (299, 326), False, 'from MIDISynth import midi2piece\n'), ((257, 280), 'pathlib.Path', 'Path', (["(file_name + 'mid')"], {}), "(file_name + 'mid')\n", (261, 280), False, 'from pathlib import Path\n'), ((226, 238), 'pathlib.Path', 'Path', (['"""midi"""'], {}), "('midi')\n", (230, 238), False, 'from pathlib import Path\n'), ((198, 208), 'pathlib.Path', 'Path', (['""".."""'], {}), "('..')\n", (202, 208), False, 'from pathlib import Path\n'), ((211, 223), 'pathlib.Path', 'Path', (['"""data"""'], {}), "('data')\n", (215, 223), False, 'from pathlib import Path\n'), ((583, 600), 'numpy.arange', 'np.arange', (['n_bins'], {}), '(n_bins)\n', (592, 600), True, 'import numpy as np\n')] |
from collections import namedtuple, deque
from random import sample, random, randint
from math import exp
from numpy import array, zeros, argmax
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv1D, MaxPool1D, LSTM, Dropout, Flatten, Dense
from tensorflow.keras.optimizers import Adam
import stockMarketSimulator as sim
'''
State_Transition is structured way to store the state transiton of the environment and the
corresponding reward received in due course of action.
'''
State_Transition=namedtuple('Transition',('Current_State','Action','Reward','Next_State','done'))
class Experience:
'''
This class implements the replay memory which is essentially a bounded buffer which is
used to store the transitions of the environment. The sample method randomly summons
the N state transitions which our agent encountered. N is the batch size chosen by the model.
This will be used to train the agent to maximize it's expected reward.
'''
def __init__(self,capacity):
self.memory=deque([],maxlen=capacity)
def see(self,transition):
self.memory.append(transition)
def sample(self,batch_size):
return sample(self.memory,batch_size)
def __len__(self):
return (len(self.memory))
class DQN:
def __init__(self,input_shape,n_output,mode=1):
'''
[+] Mode=0, if the model needs to contain Convolution layers.
[+] Mode=1, if the model is composed of LSTM layers.
[+] ultimately few final layers shall be composed of fully connected Dense neurons.
[+] input_shape is a tuple that is supplied by the object of this class denoting the
size of input vector/matrix.
[+] n_output is an integer denoting the total number of neurons that shall be present
in output layer.
'''
self.model=Sequential()
if mode==0:
self.model.add(Conv1D(16,1,input_shape=input_shape,padding='same',activation='relu'))
self.model.add(Conv1D(32,1,padding='same',activation='relu'))
self.model.add(Conv1D(64,1,padding='same',activation='relu'))
self.model.add(Flatten())
self.model.add(Dense(input_shape[0] * 64,activation='relu'))
self.model.add(Dropout(0.2))
#self.model.add(Dense(64,activation='relu'))
#self.model.add(Dropout(0.2))
#self.model.add(Dense(32,activation='relu'))
#self.model.add(Dropout(0.2))
self.model.add(Dense(16,activation='relu'))
self.model.add(Dropout(0.2))
self.model.add(Dense(n_output))
else:
self.model.add(LSTM(8,dropout=0.2,input_shape=input_shape,return_sequences=True))
self.model.add(LSTM(16,dropout=0.2,return_sequences=True))
self.model.add(LSTM(32,dropout=0.2,return_sequences=True))
self.model.add(Flatten())
self.model.add(Dense(16,activation='relu'))
self.model.add(Dropout(0.2))
self.model.add(Dense(n_output))
self.model.compile(optimizer=Adam(learning_rate=1e-3),loss='huber',metrics=['mse'])
def getModelInstance(self):
return self.model
class Agent:
def __init__(self,input_shape,n_output,id):
self.id=id
self.GAMMA=0.999 # The discount factor for normalizing future reward.
self.BATCH_SIZE=128 # The chunk of states provisioned randomly for training.
# ---------- Epsilon greedy strategy variables -------------#
self.EPSILON_START=0.9
self.EPSILON_END=0.05
self.EPSILON_DECAY=200
#-----------------------------------------------------------#
self.TARGET_UPDATE=10
self.action_space=n_output
'''
The TARGET_VALUE is number of episodes after which the weights and biases of target network
to be set as same as that of policy network. This provides stability to the model as
suggested in orignal DQN paper.
'''
self.MEMORY_SIZE=10000 # Experience replay memory capacity.
MODEL=DQN(input_shape,n_output)
self.policy_net=MODEL.getModelInstance()
self.target_net=MODEL.getModelInstance()
self.target_net.set_weights(self.policy_net.get_weights())
self.memory=Experience(self.MEMORY_SIZE)
self.time_step=0
def selectAction(self,state):
EPSILON_THRESHOLD=self.EPSILON_END + (self.EPSILON_START - self.EPSILON_END) * exp(-1 * self.time_step / self.EPSILON_DECAY)
self.time_step+=1
if random() > EPSILON_THRESHOLD:
# Exploitation
#print(argmax(self.policy_net.predict(state)[0]))
return self.policy_net.predict(state)
else:
# Exploration
temp=zeros(self.action_space)
temp[randint(0,self.action_space)-1]=1
return array([temp])
def optimize(self,episode_number,done):
if len(self.memory) < self.BATCH_SIZE:
#print("Memory instances insuffcient for training!")
return
transitions=self.memory.sample(self.BATCH_SIZE)
current_states=array([transition[0] for transition in transitions])
current_q_values=self.policy_net.predict(current_states)
new_states=array([transition[3] for transition in transitions])
future_q_values=self.target_net.predict(new_states)
X=[]
Y=[]
for index,(Current_State,Action,Reward,Next_State,done) in enumerate(transitions):
if not done:
max_future_q=np.max(future_q_values[index])
new_q=Reward + self.GAMMA * max_future_q
else:
new_q=Reward
current_q_value=current_q_values[index]
current_q_value[argmax(Action[0])]=new_q
X.append(Current_State)
Y.append(current_q_value)
self.policy_net.fit(array(X),array(Y),batch_size=self.BATCH_SIZE,verbose=0,shuffle=False)
if episode_number % self.TARGET_UPDATE == 0 and done:
self.target_net.set_weights(self.policy_net.get_weights())
def update_memory(self,state,action,reward,next_state,done):
self.memory.see((state, action, reward,next_state,done))
| [
"random.sample",
"collections.namedtuple",
"collections.deque",
"tensorflow.keras.layers.Dropout",
"numpy.argmax",
"numpy.max",
"tensorflow.keras.optimizers.Adam",
"numpy.array",
"numpy.zeros",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.LSTM",
"tensorflow.keras.layers.Conv1D",
... | [((548, 637), 'collections.namedtuple', 'namedtuple', (['"""Transition"""', "('Current_State', 'Action', 'Reward', 'Next_State', 'done')"], {}), "('Transition', ('Current_State', 'Action', 'Reward', 'Next_State',\n 'done'))\n", (558, 637), False, 'from collections import namedtuple, deque\n'), ((1070, 1096), 'collections.deque', 'deque', (['[]'], {'maxlen': 'capacity'}), '([], maxlen=capacity)\n', (1075, 1096), False, 'from collections import namedtuple, deque\n'), ((1213, 1244), 'random.sample', 'sample', (['self.memory', 'batch_size'], {}), '(self.memory, batch_size)\n', (1219, 1244), False, 'from random import sample, random, randint\n'), ((1890, 1902), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1900, 1902), False, 'from tensorflow.keras.models import Sequential\n'), ((5215, 5267), 'numpy.array', 'array', (['[transition[0] for transition in transitions]'], {}), '([transition[0] for transition in transitions])\n', (5220, 5267), False, 'from numpy import array, zeros, argmax\n'), ((5352, 5404), 'numpy.array', 'array', (['[transition[3] for transition in transitions]'], {}), '([transition[3] for transition in transitions])\n', (5357, 5404), False, 'from numpy import array, zeros, argmax\n'), ((4626, 4634), 'random.random', 'random', ([], {}), '()\n', (4632, 4634), False, 'from random import sample, random, randint\n'), ((4852, 4876), 'numpy.zeros', 'zeros', (['self.action_space'], {}), '(self.action_space)\n', (4857, 4876), False, 'from numpy import array, zeros, argmax\n'), ((4947, 4960), 'numpy.array', 'array', (['[temp]'], {}), '([temp])\n', (4952, 4960), False, 'from numpy import array, zeros, argmax\n'), ((5978, 5986), 'numpy.array', 'array', (['X'], {}), '(X)\n', (5983, 5986), False, 'from numpy import array, zeros, argmax\n'), ((5987, 5995), 'numpy.array', 'array', (['Y'], {}), '(Y)\n', (5992, 5995), False, 'from numpy import array, zeros, argmax\n'), ((1950, 2023), 'tensorflow.keras.layers.Conv1D', 'Conv1D', (['(16)', '(1)'], {'input_shape': 'input_shape', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(16, 1, input_shape=input_shape, padding='same', activation='relu')\n", (1956, 2023), False, 'from tensorflow.keras.layers import Conv1D, MaxPool1D, LSTM, Dropout, Flatten, Dense\n'), ((2048, 2096), 'tensorflow.keras.layers.Conv1D', 'Conv1D', (['(32)', '(1)'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(32, 1, padding='same', activation='relu')\n", (2054, 2096), False, 'from tensorflow.keras.layers import Conv1D, MaxPool1D, LSTM, Dropout, Flatten, Dense\n'), ((2122, 2170), 'tensorflow.keras.layers.Conv1D', 'Conv1D', (['(64)', '(1)'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(64, 1, padding='same', activation='relu')\n", (2128, 2170), False, 'from tensorflow.keras.layers import Conv1D, MaxPool1D, LSTM, Dropout, Flatten, Dense\n'), ((2196, 2205), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (2203, 2205), False, 'from tensorflow.keras.layers import Conv1D, MaxPool1D, LSTM, Dropout, Flatten, Dense\n'), ((2234, 2279), 'tensorflow.keras.layers.Dense', 'Dense', (['(input_shape[0] * 64)'], {'activation': '"""relu"""'}), "(input_shape[0] * 64, activation='relu')\n", (2239, 2279), False, 'from tensorflow.keras.layers import Conv1D, MaxPool1D, LSTM, Dropout, Flatten, Dense\n'), ((2307, 2319), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (2314, 2319), False, 'from tensorflow.keras.layers import Conv1D, MaxPool1D, LSTM, Dropout, Flatten, Dense\n'), ((2546, 2574), 'tensorflow.keras.layers.Dense', 'Dense', (['(16)'], {'activation': '"""relu"""'}), "(16, activation='relu')\n", (2551, 2574), False, 'from tensorflow.keras.layers import Conv1D, MaxPool1D, LSTM, Dropout, Flatten, Dense\n'), ((2602, 2614), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (2609, 2614), False, 'from tensorflow.keras.layers import Conv1D, MaxPool1D, LSTM, Dropout, Flatten, Dense\n'), ((2643, 2658), 'tensorflow.keras.layers.Dense', 'Dense', (['n_output'], {}), '(n_output)\n', (2648, 2658), False, 'from tensorflow.keras.layers import Conv1D, MaxPool1D, LSTM, Dropout, Flatten, Dense\n'), ((2701, 2769), 'tensorflow.keras.layers.LSTM', 'LSTM', (['(8)'], {'dropout': '(0.2)', 'input_shape': 'input_shape', 'return_sequences': '(True)'}), '(8, dropout=0.2, input_shape=input_shape, return_sequences=True)\n', (2705, 2769), False, 'from tensorflow.keras.layers import Conv1D, MaxPool1D, LSTM, Dropout, Flatten, Dense\n'), ((2795, 2839), 'tensorflow.keras.layers.LSTM', 'LSTM', (['(16)'], {'dropout': '(0.2)', 'return_sequences': '(True)'}), '(16, dropout=0.2, return_sequences=True)\n', (2799, 2839), False, 'from tensorflow.keras.layers import Conv1D, MaxPool1D, LSTM, Dropout, Flatten, Dense\n'), ((2866, 2910), 'tensorflow.keras.layers.LSTM', 'LSTM', (['(32)'], {'dropout': '(0.2)', 'return_sequences': '(True)'}), '(32, dropout=0.2, return_sequences=True)\n', (2870, 2910), False, 'from tensorflow.keras.layers import Conv1D, MaxPool1D, LSTM, Dropout, Flatten, Dense\n'), ((2937, 2946), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (2944, 2946), False, 'from tensorflow.keras.layers import Conv1D, MaxPool1D, LSTM, Dropout, Flatten, Dense\n'), ((2975, 3003), 'tensorflow.keras.layers.Dense', 'Dense', (['(16)'], {'activation': '"""relu"""'}), "(16, activation='relu')\n", (2980, 3003), False, 'from tensorflow.keras.layers import Conv1D, MaxPool1D, LSTM, Dropout, Flatten, Dense\n'), ((3031, 3043), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (3038, 3043), False, 'from tensorflow.keras.layers import Conv1D, MaxPool1D, LSTM, Dropout, Flatten, Dense\n'), ((3072, 3087), 'tensorflow.keras.layers.Dense', 'Dense', (['n_output'], {}), '(n_output)\n', (3077, 3087), False, 'from tensorflow.keras.layers import Conv1D, MaxPool1D, LSTM, Dropout, Flatten, Dense\n'), ((3126, 3151), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (3130, 3151), False, 'from tensorflow.keras.optimizers import Adam\n'), ((4543, 4588), 'math.exp', 'exp', (['(-1 * self.time_step / self.EPSILON_DECAY)'], {}), '(-1 * self.time_step / self.EPSILON_DECAY)\n', (4546, 4588), False, 'from math import exp\n'), ((5636, 5666), 'numpy.max', 'np.max', (['future_q_values[index]'], {}), '(future_q_values[index])\n', (5642, 5666), True, 'import numpy as np\n'), ((5851, 5868), 'numpy.argmax', 'argmax', (['Action[0]'], {}), '(Action[0])\n', (5857, 5868), False, 'from numpy import array, zeros, argmax\n'), ((4894, 4923), 'random.randint', 'randint', (['(0)', 'self.action_space'], {}), '(0, self.action_space)\n', (4901, 4923), False, 'from random import sample, random, randint\n')] |
import csv
from datetime import datetime, timezone
import numpy as np
from exetera.core.session import Session
from exetera.core.persistence import DataStore
from exetera.core import utils, dataframe, dataset
from exetera.core import persistence as prst
from exeteracovid.algorithms.test_type_from_mechanism import test_type_from_mechanism_v2
from exeteracovid.algorithms.covid_test_date import covid_test_date_v1
from exeteracovid.algorithms.test_type_from_mechanism import pcr_standard_summarize
def save_df_to_csv(df, csv_name, chunk=200000): # chunk=100k ~ 20M/s
with open(csv_name, 'w', newline='') as csvfile:
columns = list(df.keys())
writer = csv.writer(csvfile)
writer.writerow(columns)
field1 = columns[0]
for current_row in range(0, len(df[field1].data), chunk):
torow = current_row + chunk if current_row + chunk < len(df[field1].data) else len(df[field1].data)
batch = list()
for k in df.keys():
batch.append(df[k].data[current_row:torow])
writer.writerows(list(zip(*batch)))
ds = DataStore()
ts = str(datetime.now(timezone.utc))
path = '/home/jd21/data'
list_symptoms =['abdominal_pain', 'altered_smell', 'blisters_on_feet', 'brain_fog',
'chest_pain', 'chills_or_shivers','delirium', 'diarrhoea',
'diarrhoea_frequency', 'dizzy_light_headed', 'ear_ringing', 'earache',
'eye_soreness', 'fatigue', 'feeling_down', 'fever', 'hair_loss',
'headache', 'headache_frequency','hoarse_voice',
'irregular_heartbeat', 'loss_of_smell', 'nausea','persistent_cough', 'rash', 'red_welts_on_face_or_lips', 'runny_nose',
'shortness_of_breath', 'skin_burning', 'skipped_meals', 'sneezing',
'sore_throat', 'swollen_glands', 'typical_hayfever', 'unusual_muscle_pains']
with Session() as s:
source = s.open_dataset('/home/jd21/data/processed_May17_processed.hdf5', 'r', 'src')
output = s.open_dataset('/home/jd21/data/May17_processed_mrslt.hdf5', 'w', 'out')
ds = DataStore()
ts = str(datetime.now(timezone.utc))
# # Same but for test
src_test = source['tests']
list_testid = src_test['patient_id']
list_testcreate = src_test['created_at']
out_test = output.create_dataframe('tests')
with utils.Timer('applying sort'):
for k in src_test.keys():
dataframe.copy(src_test[k], out_test, k)
# convert test date
covid_test_date_v1(s, out_test, out_test, 'date_effective_test')
# Filtering only definite results
results_raw = out_test['result'].data[:]
results_filt = np.where(np.logical_or(results_raw == 4, results_raw == 3), True, False)
for k in out_test.keys():
out_test[k].apply_filter(results_filt, in_place=True)
# Filter check
# sanity_filter = (date_fin == 0)
# print(np.sum(sanity_filter))
# Creating clean mechanism
reader_mec = out_test['mechanism'].data
s_reader_mec = s.get(out_test['mechanism'])
print(len(reader_mec), len(out_test['patient_id'].data))
reader_ftmec = out_test['mechanism_freetext'].data
s_reader_ftmec = s.get(out_test['mechanism_freetext'])
# pcr_standard_answers = out_test.create_numeric('pcr_standard_answers', 'bool')
# pcr_strong_inferred = out_test.create_numeric('pcr_strong_inferred', 'bool')
# pcr_weak_inferred = out_test.create_numeric('pcr_weak_inferred', 'bool')
# antibody_standard_answers = out_test.create_numeric('antibody_standard_answers', 'bool')
# antibody_strong_inferred = out_test.create_numeric('antibody_strong_inferred', 'bool')
# antibody_weak_inferred = out_test.create_numeric('antibody_weak_inferred', 'bool')
#
# t_pids = s.get(out_test['patient_id'])
# with utils.Timer('getting test mechanism filter for pcr and antibody', new_line=True):
# pcr_standard_answers = np.zeros(len(t_pids), dtype=np.bool)
# pcr_strong_inferred = np.zeros(len(t_pids), dtype=np.bool)
# pcr_weak_inferred = np.zeros(len(t_pids), dtype=np.bool)
# antibody_standard_answers = np.zeros(len(t_pids), dtype=np.bool)
# antibody_strong_inferred = np.zeros(len(t_pids), dtype=np.bool)
# antibody_weak_inferred = np.zeros(len(t_pids), dtype=np.bool)
#
# test_type_from_mechanism_v1(ds, s_reader_mec, s_reader_ftmec,
# pcr_standard_answers, pcr_strong_inferred, pcr_weak_inferred,
# antibody_standard_answers, antibody_strong_inferred, antibody_weak_inferred)
test_type_from_mechanism_v2(ds, out_test)
# reader_pcr_sa = s.get(out_test['pcr_standard_answers'])
# reader_pcr_si = s.get(out_test['pcr_strong_inferred'])
# reader_pcr_wi = s.get(out_test['pcr_weak_inferred'])
#
# pcr_standard = pcr_strong_inferred + pcr_standard_answers + pcr_weak_inferred
# pcr_standard = np.where(pcr_standard > 0, np.ones_like(pcr_standard), np.zeros_like(pcr_standard))
#
# #writer = ds.get_numeric_writer(out_test, 'pcr_standard', dtype='bool', timestamp=ts, writemode='overwrite')
# writer = out_test.create_numeric('pcr_standard', 'bool')
# writer.data.write(pcr_standard)
pcr_standard_summarize(s, out_test)
out_test_fin = output.create_dataframe('tests_fin')
writers_dict = {}
# other fields
for k in ('patient_id', 'date_effective_test', 'result', 'pcr_standard'):
values = out_test[k].data[:]
if k == 'result':
values -= 3
writers_dict[k] = out_test[k].create_like(out_test_fin, k, ts).data
print(len(values), k)
writers_dict[k].write_part(values)
# converted_test
values = np.zeros(len(out_test_fin['patient_id'].data), dtype='bool')
writers_dict['converted_test'] = out_test_fin.create_numeric('converted_test', 'bool', timestamp=ts).data
writers_dict['converted_test'].write_part(values)
# Taking care of the old test
src_asmt = source['assessments']
print(src_asmt.keys())
# Remap had_covid_test to 0/1 2 to binary 0,1
tcp_flat = np.where(src_asmt['tested_covid_positive'].data[:] < 1, 0, 1)
spans = src_asmt['patient_id'].get_spans()
# Get the first index at which the hct field is maximum
firstnz_tcp_ind = ds.apply_spans_index_of_max(spans, tcp_flat)
# Get the index of first element of patient_id when sorted
first_hct_ind = spans[:-1]
filt_tl = first_hct_ind != firstnz_tcp_ind
# Get the indices for which hct changed value (indicating that test happened after the first input)
sel_max_ind = ds.apply_filter(filter_to_apply=filt_tl, reader=firstnz_tcp_ind)
# Get the index at which test is maximum and for which that hct is possible
max_tcp_ind = ds.apply_spans_index_of_max(spans, src_asmt['tested_covid_positive'].data[:])
# filt_max_test = ds.apply_indices(filt_tl, max_tcp )
sel_max_tcp = ds.apply_indices(filt_tl, max_tcp_ind)
sel_maxtcp_ind = ds.apply_filter(filter_to_apply=filt_tl, reader=max_tcp_ind)
# Define usable assessments with correct test based on previous filter on indices
usable_asmt_tests = output.create_group('usable_asmt_tests')
# ====
# usable_asmt_tests 1 patients w/ multiple test and first ok
# ====
for k in ('id', 'patient_id', 'created_at', 'had_covid_test'):
src_asmt[k].create_like(usable_asmt_tests, k)
src_asmt[k].apply_index(sel_max_ind, target=usable_asmt_tests[k])
print(usable_asmt_tests[k].data[0])
src_asmt['created_at'].create_like(usable_asmt_tests, 'eff_result_time')
src_asmt['created_at'].apply_index(sel_maxtcp_ind, target=usable_asmt_tests['eff_result_time'])
src_asmt['tested_covid_positive'].create_like(usable_asmt_tests, 'eff_result')
src_asmt['tested_covid_positive'].apply_index(sel_maxtcp_ind, target=usable_asmt_tests['eff_result'])
src_asmt['tested_covid_positive'].create_like(usable_asmt_tests, 'tested_covid_positive')
src_asmt['tested_covid_positive'].apply_index(sel_max_tcp, target=usable_asmt_tests['tested_covid_positive'])
# ====
# usable_asmt_tests 2 patients w/ multiple test and first ok ; and only positive
# ====
# Making sure that the test is definite (either positive or negative)
filt_deftest = usable_asmt_tests['tested_covid_positive'].data[:] > 1
# print(len(ds.get_reader(usable_asmt_tests['patient_id'])))
for k in (
'id', 'patient_id', 'created_at', 'had_covid_test', 'tested_covid_positive', 'eff_result_time',
'eff_result'):
usable_asmt_tests[k].apply_filter(filt_deftest, in_place=True)
# ====
# usable_asmt_tests 3 delta_days_test date_final_test pcr_standard
# ====
# Getting difference between created at (max of hct date) and max of test result (eff_result_time)
reader_hct = usable_asmt_tests['created_at'].data[:]
reader_tcp = usable_asmt_tests['eff_result_time'].data[:]
with utils.Timer('doing delta time'):
delta_time = reader_tcp - reader_hct
delta_days = delta_time / 86400
print(delta_days[:10], delta_time[:10])
writer = usable_asmt_tests.create_numeric('delta_days_test', 'float32')
writer.data.write(delta_days)
# Final day of test
date_final_test = np.where(delta_days < 7, reader_hct, reader_tcp - 2 * 86400)
writer = usable_asmt_tests.create_timestamp('date_final_test')
writer.data.write(date_final_test)
# print(ds.get_reader(usable_asmt_tests['date_final_test'])[:10], date_final_test[:10])
pcr_standard = np.ones(len(usable_asmt_tests['patient_id'].data))
writer = usable_asmt_tests.create_numeric('pcr_standard', 'int')
writer.data.write(pcr_standard)
# ====
# out_test_fin copy from usable_asmt_tests
# ====
list_init = ('patient_id', 'date_final_test', 'tested_covid_positive', 'pcr_standard')
list_final = ('patient_id', 'date_effective_test', 'result', 'pcr_standard')
# Join
for (i, f) in zip(list_init, list_final):
reader = usable_asmt_tests[i].data
values = reader[:]
if f == 'result':
values -= 2
# writers_dict[f] = reader.get_writer(out_test_fin, f, ts)
print(len(values), f)
writers_dict[f].data.write(values)
writers_dict['converted_test'].data.write(np.ones(len(usable_asmt_tests['patient_id'].data), dtype='bool'))
converted_fin = out_test_fin['converted_test'].data
result_fin = out_test_fin['result'].data[:]
pat_id_fin = out_test_fin['patient_id'].data[:]
filt_pos = result_fin >= 0
# ====
# out_pos 1 copy from out_test_fin with valid result >=0
# ====
out_pos = output.create_dataframe('out_pos')
for k in out_test_fin.keys():
out_test_fin[k].create_like(out_pos, k)
out_test_fin[k].apply_filter(filt_pos, target=out_pos[k])
print(k, len(out_test_fin[k].data), len(filt_pos))
dataset.copy(out_pos, output, 'out_pos_copy')
# dict_test = {}
# for k in out_pos.keys():
# dict_test[k] = out_pos[k].data[:]
# df_test = pd.DataFrame.from_dict(dict_test)
# df_test.to_csv(path + '/TestedPositiveTestDetails.csv')
# del dict_test
# del df_test
#pat_id_all = src_asmt['patient_id'].data[:]
# ====
# out_pos 2 filter patient that has assessment
# ====
with utils.Timer('Mapping index asmt to pos only'):
test2pat = prst.foreign_key_is_in_primary_key(out_pos['patient_id'].data[:],
foreign_key=src_asmt['patient_id'].data[:])
for f in ['created_at', 'patient_id', 'treatment', 'other_symptoms', 'country_code', 'location',
'updated_at'] + list_symptoms:
print(f)
if(f in list(out_pos.keys())):
out_pos[f].data.clear()
src_asmt[f].apply_filter(test2pat, target=out_pos[f])
else:
src_asmt[f].create_like(out_pos, f)
src_asmt[f].apply_filter(test2pat, target=out_pos[f])
# reader = ds.get_reader(src_asmt[f])
# writer = reader.get_writer(out_pos,f,ts, write_mode='overwrite')
# ds.apply_filter(test2pat, reader, writer)
# print(len(np.unique(ds.get_reader(out_pos['patient_id'])[:])), len(np.unique(pat_pos[:])))
print("skip unique")
# this is duplicated with 265-273
# for k in list_symptoms:
# print(k)
# if k in list(out_pos.keys()):
# src_asmt[k].apply_filter(test2pat, target=out_pos[k])
# else:
# src_asmt[k].create_like(out_pos, k)
# src_asmt[k].apply_filter(test2pat, target=out_pos[k])
# reader = ds.get_reader(src_asmt[k])
# writer = reader.get_writer(out_pos, k,ts,write_mode='overwrite')
# ds.apply_filter(test2pat, reader,writer)
# ====
# summarize the symptoms TODO use exeteracovid.algorithm
# ====
sum_symp = np.zeros(len(out_pos['patient_id'].data))
for k in list_symptoms:
values = out_pos[k].data[:]
if k == 'fatigue' or k == 'shortness_of_breath':
values = np.where(values > 2, np.ones_like(values), np.zeros_like(values))
else:
values = np.where(values > 1, np.ones_like(values), np.zeros_like(values))
sum_symp += values
out_pos.create_numeric('sum_symp', 'int').data.write(sum_symp)
# writer = ds.get_numeric_writer(out_pos, 'sum_symp', dtype='int', timestamp=ts, writemode='overwrite')
# writer.write(sum_symp)
# ====
# filter the symptoms
# ====
symp_flat = np.where(out_pos['sum_symp'].data[:] < 1, 0, 1)
spans = out_pos['patient_id'].get_spans()
# Get the first index at which the hct field is maximum
firstnz_symp_ind = ds.apply_spans_index_of_max(spans, symp_flat)
max_symp_check = symp_flat[firstnz_symp_ind]
# Get the index of first element of patient_id when sorted
first_symp_ind = spans[:-1]
filt_asymptomatic = max_symp_check == 0
filt_firsthh_symp = first_symp_ind != firstnz_symp_ind
print('Number asymptomatic is ', len(spans) - 1 - np.sum(max_symp_check), np.sum(filt_asymptomatic))
print('Number not healthy first is ', len(spans) - 1 - np.sum(filt_firsthh_symp))
print('Number definitie positive is', len(spans) - 1)
spans_valid = ds.apply_filter(filt_firsthh_symp, first_symp_ind)
pat_sel = ds.apply_indices(spans_valid, out_pos['patient_id'].data[:])
filt_sel = prst.foreign_key_is_in_primary_key(pat_sel, out_pos['patient_id'].data[:])
spans_asymp = ds.apply_filter(filt_asymptomatic, first_symp_ind)
# ====
# out_pos re index asymptomatic
# ====
pat_asymp = out_pos['patient_id'].apply_index(spans_asymp)
#pat_asymp = ds.apply_indices(spans_asymp, ds.get_reader(out_pos['patient_id']))
filt_pata = prst.foreign_key_is_in_primary_key(pat_asymp.data[:], out_pos['patient_id'].data[:])
# ====
# out_pos_hs 1 not healthy first
# ====
out_pos_hs = output.create_dataframe('out_pos_hs')
for k in list_symptoms + ['created_at', 'patient_id', 'sum_symp', 'country_code', 'location', 'treatment',
'updated_at']:
print(k)
out_pos[k].create_like(out_pos_hs, k)
out_pos[k].apply_filter(filt_sel, target=out_pos_hs[k])
# reader = ds.get_reader(out_pos[k])
# writer = reader.get_writer(out_pos_hs, k, ts)
# ds.apply_filter(filt_sel, reader, writer)
# dict_final = {}
# for k in out_pos_hs.keys():
# dict_final[k] = out_pos_hs[k].data[:]
#
# df_final = pd.DataFrame.from_dict(dict_final)
# df_final.to_csv(path + '/PositiveSympStartHealthyAllSymptoms.csv')
# del dict_final
# del df_final
print('out_pos_asymp')
# ====
# out_pos_as 1 out_pos filter asymptomatic
# ====
out_pos_as = output.create_dataframe('out_pos_asymp')
for k in list_symptoms + ['created_at', 'patient_id', 'sum_symp', 'country_code', 'location',
'treatment']:
out_pos[k].create_like(out_pos_as, k)
out_pos[k].apply_filter(filt_pata, target=out_pos_as[k])
# reader = ds.get_reader(out_pos[k])
# writer = reader.get_writer(out_pos_as, k, ts)
# ds.apply_filter(filt_pata, reader, writer)
# dict_finala = {}
# for k in out_pos_as.keys():
# dict_finala[k] = out_pos_as[k].data[:]
#
# df_finala = pd.DataFrame.from_dict(dict_finala)
# df_finala.to_csv(path + '/PositiveAsympAllSymptoms.csv')
# del dict_finala
# del df_finala
# Based on the final selected patient_id, select the appropriate rows of the patient_table
src_pat = source['patients']
filt_pat = prst.foreign_key_is_in_primary_key(out_pos_hs['patient_id'].data[:], src_pat['id'].data[:])
list_interest = ['has_cancer', 'has_diabetes', 'has_lung_disease', 'has_heart_disease', 'has_kidney_disease',
'has_asthma',
'race_is_other', 'race_is_prefer_not_to_say', 'race_is_uk_asian', 'race_is_uk_black',
'race_is_uk_chinese', 'race_is_uk_middle_eastern', 'race_is_uk_mixed_other',
'race_is_uk_mixed_white_black', 'race_is_uk_white', 'race_is_us_asian', 'race_is_us_black',
'race_is_us_hawaiian_pacific', 'race_is_us_indian_native', 'race_is_us_white', 'race_other',
'year_of_birth', 'is_smoker', 'smoker_status', 'bmi_clean', 'is_in_uk_twins',
'healthcare_professional', 'gender', 'id', 'blood_group', 'lsoa11cd', 'already_had_covid']
out_pat = output.create_dataframe('patient_pos')
print('patient_pos')
for k in list_interest:
src_pat[k].create_like(out_pat, k)
src_pat[k].apply_filter(filt_pat, target=out_pat[k])
# reader = ds.get_reader(src_pat[k])
# writer = reader.get_writer(out_pat, k, ts)
# ds.apply_filter(filt_pat, reader, writer)
# dict_pat = {}
# for k in list_interest:
# values = out_pat[k].data[:]
# dict_pat[k] = values
#
# df_pat = pd.DataFrame.from_dict(dict_pat)
# df_pat.to_csv(path + '/PositiveSympStartHealthy_PatDetails.csv')
# del dict_pat
# del df_pat
spans_asymp = ds.apply_filter(filt_asymptomatic, first_symp_ind)
#pat_asymp = ds.apply_indices(spans_asymp, ds.get_reader(out_pos['patient_id']))
pat_asymp = out_pos['patient_id'].apply_index(spans_asymp)
filt_asymp = prst.foreign_key_is_in_primary_key(pat_asymp.data[:], src_pat['id'].data[:])
out_pat_asymp = output.create_dataframe('patient_asymp')
for k in list_interest:
src_pat[k].create_like(out_pat_asymp, k)
src_pat[k].apply_filter(filt_asymp, target=out_pat_asymp[k])
# reader = ds.get_reader(src_pat[k])
# writer = reader.get_writer(out_pat_asymp, k, ts)
# ds.apply_filter(filt_asymp, reader, writer)
# dict_pata = {}
# for k in list_interest:
# values = out_pat_asymp[k].data[:]
# dict_pata[k] = values
#
# df_pata = pd.DataFrame.from_dict(dict_pata)
# df_pata.to_csv(path + '/PositiveAsymp_PatDetails.csv')
| [
"exeteracovid.algorithms.test_type_from_mechanism.pcr_standard_summarize",
"numpy.ones_like",
"exetera.core.session.Session",
"exetera.core.persistence.foreign_key_is_in_primary_key",
"numpy.where",
"csv.writer",
"exeteracovid.algorithms.test_type_from_mechanism.test_type_from_mechanism_v2",
"numpy.lo... | [((1109, 1120), 'exetera.core.persistence.DataStore', 'DataStore', ([], {}), '()\n', (1118, 1120), False, 'from exetera.core.persistence import DataStore\n'), ((1130, 1156), 'datetime.datetime.now', 'datetime.now', (['timezone.utc'], {}), '(timezone.utc)\n', (1142, 1156), False, 'from datetime import datetime, timezone\n'), ((1832, 1841), 'exetera.core.session.Session', 'Session', ([], {}), '()\n', (1839, 1841), False, 'from exetera.core.session import Session\n'), ((2034, 2045), 'exetera.core.persistence.DataStore', 'DataStore', ([], {}), '()\n', (2043, 2045), False, 'from exetera.core.persistence import DataStore\n'), ((2434, 2498), 'exeteracovid.algorithms.covid_test_date.covid_test_date_v1', 'covid_test_date_v1', (['s', 'out_test', 'out_test', '"""date_effective_test"""'], {}), "(s, out_test, out_test, 'date_effective_test')\n", (2452, 2498), False, 'from exeteracovid.algorithms.covid_test_date import covid_test_date_v1\n'), ((4543, 4584), 'exeteracovid.algorithms.test_type_from_mechanism.test_type_from_mechanism_v2', 'test_type_from_mechanism_v2', (['ds', 'out_test'], {}), '(ds, out_test)\n', (4570, 4584), False, 'from exeteracovid.algorithms.test_type_from_mechanism import test_type_from_mechanism_v2\n'), ((5189, 5224), 'exeteracovid.algorithms.test_type_from_mechanism.pcr_standard_summarize', 'pcr_standard_summarize', (['s', 'out_test'], {}), '(s, out_test)\n', (5211, 5224), False, 'from exeteracovid.algorithms.test_type_from_mechanism import pcr_standard_summarize\n'), ((6061, 6122), 'numpy.where', 'np.where', (["(src_asmt['tested_covid_positive'].data[:] < 1)", '(0)', '(1)'], {}), "(src_asmt['tested_covid_positive'].data[:] < 1, 0, 1)\n", (6069, 6122), True, 'import numpy as np\n'), ((9242, 9302), 'numpy.where', 'np.where', (['(delta_days < 7)', 'reader_hct', '(reader_tcp - 2 * 86400)'], {}), '(delta_days < 7, reader_hct, reader_tcp - 2 * 86400)\n', (9250, 9302), True, 'import numpy as np\n'), ((10881, 10926), 'exetera.core.dataset.copy', 'dataset.copy', (['out_pos', 'output', '"""out_pos_copy"""'], {}), "(out_pos, output, 'out_pos_copy')\n", (10893, 10926), False, 'from exetera.core import utils, dataframe, dataset\n'), ((13517, 13564), 'numpy.where', 'np.where', (["(out_pos['sum_symp'].data[:] < 1)", '(0)', '(1)'], {}), "(out_pos['sum_symp'].data[:] < 1, 0, 1)\n", (13525, 13564), True, 'import numpy as np\n'), ((14395, 14469), 'exetera.core.persistence.foreign_key_is_in_primary_key', 'prst.foreign_key_is_in_primary_key', (['pat_sel', "out_pos['patient_id'].data[:]"], {}), "(pat_sel, out_pos['patient_id'].data[:])\n", (14429, 14469), True, 'from exetera.core import persistence as prst\n'), ((14762, 14851), 'exetera.core.persistence.foreign_key_is_in_primary_key', 'prst.foreign_key_is_in_primary_key', (['pat_asymp.data[:]', "out_pos['patient_id'].data[:]"], {}), "(pat_asymp.data[:], out_pos['patient_id']\n .data[:])\n", (14796, 14851), True, 'from exetera.core import persistence as prst\n'), ((16652, 16747), 'exetera.core.persistence.foreign_key_is_in_primary_key', 'prst.foreign_key_is_in_primary_key', (["out_pos_hs['patient_id'].data[:]", "src_pat['id'].data[:]"], {}), "(out_pos_hs['patient_id'].data[:],\n src_pat['id'].data[:])\n", (16686, 16747), True, 'from exetera.core import persistence as prst\n'), ((18412, 18488), 'exetera.core.persistence.foreign_key_is_in_primary_key', 'prst.foreign_key_is_in_primary_key', (['pat_asymp.data[:]', "src_pat['id'].data[:]"], {}), "(pat_asymp.data[:], src_pat['id'].data[:])\n", (18446, 18488), True, 'from exetera.core import persistence as prst\n'), ((676, 695), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (686, 695), False, 'import csv\n'), ((2059, 2085), 'datetime.datetime.now', 'datetime.now', (['timezone.utc'], {}), '(timezone.utc)\n', (2071, 2085), False, 'from datetime import datetime, timezone\n'), ((2288, 2316), 'exetera.core.utils.Timer', 'utils.Timer', (['"""applying sort"""'], {}), "('applying sort')\n", (2299, 2316), False, 'from exetera.core import utils, dataframe, dataset\n'), ((2612, 2661), 'numpy.logical_or', 'np.logical_or', (['(results_raw == 4)', '(results_raw == 3)'], {}), '(results_raw == 4, results_raw == 3)\n', (2625, 2661), True, 'import numpy as np\n'), ((8923, 8954), 'exetera.core.utils.Timer', 'utils.Timer', (['"""doing delta time"""'], {}), "('doing delta time')\n", (8934, 8954), False, 'from exetera.core import utils, dataframe, dataset\n'), ((11306, 11351), 'exetera.core.utils.Timer', 'utils.Timer', (['"""Mapping index asmt to pos only"""'], {}), "('Mapping index asmt to pos only')\n", (11317, 11351), False, 'from exetera.core import utils, dataframe, dataset\n'), ((11372, 11485), 'exetera.core.persistence.foreign_key_is_in_primary_key', 'prst.foreign_key_is_in_primary_key', (["out_pos['patient_id'].data[:]"], {'foreign_key': "src_asmt['patient_id'].data[:]"}), "(out_pos['patient_id'].data[:],\n foreign_key=src_asmt['patient_id'].data[:])\n", (11406, 11485), True, 'from exetera.core import persistence as prst\n'), ((14065, 14090), 'numpy.sum', 'np.sum', (['filt_asymptomatic'], {}), '(filt_asymptomatic)\n', (14071, 14090), True, 'import numpy as np\n'), ((2364, 2404), 'exetera.core.dataframe.copy', 'dataframe.copy', (['src_test[k]', 'out_test', 'k'], {}), '(src_test[k], out_test, k)\n', (2378, 2404), False, 'from exetera.core import utils, dataframe, dataset\n'), ((14041, 14063), 'numpy.sum', 'np.sum', (['max_symp_check'], {}), '(max_symp_check)\n', (14047, 14063), True, 'import numpy as np\n'), ((14151, 14176), 'numpy.sum', 'np.sum', (['filt_firsthh_symp'], {}), '(filt_firsthh_symp)\n', (14157, 14176), True, 'import numpy as np\n'), ((13074, 13094), 'numpy.ones_like', 'np.ones_like', (['values'], {}), '(values)\n', (13086, 13094), True, 'import numpy as np\n'), ((13096, 13117), 'numpy.zeros_like', 'np.zeros_like', (['values'], {}), '(values)\n', (13109, 13117), True, 'import numpy as np\n'), ((13175, 13195), 'numpy.ones_like', 'np.ones_like', (['values'], {}), '(values)\n', (13187, 13195), True, 'import numpy as np\n'), ((13197, 13218), 'numpy.zeros_like', 'np.zeros_like', (['values'], {}), '(values)\n', (13210, 13218), True, 'import numpy as np\n')] |
import operator
import logging
import numpy as np
import pandas as pd
from .coordinates import Coordinates
from .visual import VisualAttributes
from .visual import COLORS
from .exceptions import IncompatibleAttribute
from .component_link import (ComponentLink, CoordinateComponentLink,
BinaryComponentLink)
from .subset import Subset, InequalitySubsetState, SubsetState
from .hub import Hub
from .util import (split_component_view, view_shape,
coerce_numeric, check_sorted)
from .message import (DataUpdateMessage,
DataAddComponentMessage,
SubsetCreateMessage, ComponentsChangedMessage)
from .odict import OrderedDict
__all__ = ['Data', 'ComponentID', 'Component', 'DerivedComponent',
'CategoricalComponent', 'CoordinateComponent']
# access to ComponentIDs via .item[name]
class ComponentIDDict(object):
def __init__(self, data, **kwargs):
self.data = data
def __getitem__(self, key):
result = self.data.find_component_id(key)
if result is None:
raise KeyError("ComponentID not found or not unique: %s"
% key)
return result
class ComponentID(object):
""" References a :class:`Component` object within a :class:`Data` object.
ComponentIDs behave as keys::
component_id = data.id[name]
data[component_id] -> numpy array
"""
def __init__(self, label, hidden=False):
""":param label: Name for the ID
:type label: str"""
self._label = label
self._hidden = hidden
@property
def label(self):
return self._label
@label.setter
def label(self, value):
"""Change label.
.. warning::
Label changes are not currently tracked by client
classes. Label's should only be changd before creating other
client objects
"""
self._label = value
@property
def hidden(self):
"""Whether to hide the component by default"""
return self._hidden
def __str__(self):
return str(self._label)
def __repr__(self):
return str(self._label)
def __gt__(self, other):
return InequalitySubsetState(self, other, operator.gt)
def __ge__(self, other):
return InequalitySubsetState(self, other, operator.ge)
def __lt__(self, other):
return InequalitySubsetState(self, other, operator.lt)
def __le__(self, other):
return InequalitySubsetState(self, other, operator.le)
def __add__(self, other):
return BinaryComponentLink(self, other, operator.add)
def __radd__(self, other):
return BinaryComponentLink(other, self, operator.add)
def __sub__(self, other):
return BinaryComponentLink(self, other, operator.sub)
def __rsub__(self, other):
return BinaryComponentLink(other, self, operator.sub)
def __mul__(self, other):
return BinaryComponentLink(self, other, operator.mul)
def __rmul__(self, other):
return BinaryComponentLink(other, self, operator.mul)
def __div__(self, other):
return BinaryComponentLink(self, other, operator.div)
def __rdiv__(self, other):
return BinaryComponentLink(other, self, operator.div)
def __pow__(self, other):
return BinaryComponentLink(self, other, operator.pow)
def __rpow__(self, other):
return BinaryComponentLink(other, self, operator.pow)
class Component(object):
""" Stores the actual, numerical information for a particular quantity
Data objects hold one or more components, accessed via
ComponentIDs. All Components in a data set must have the same
shape and number of dimensions
Note
----
Instead of instantiating Components directly, consider using
:meth:`Component.autotyped`, which chooses a subclass most appropriate
for the data type.
"""
def __init__(self, data, units=None):
"""
:param data: The data to store
:type data: :class:`numpy.ndarray`
:param units: Optional unit label
:type units: str
"""
# The physical units of the data
self.units = units
# The actual data
# subclasses may pass non-arrays here as placeholders.
if isinstance(data, np.ndarray):
data = coerce_numeric(data)
data.setflags(write=False) # data is read-only
self._data = data
@property
def hidden(self):
"""Whether the Component is hidden by default"""
return False
@property
def data(self):
""" The underlying :class:`numpy.ndarray` """
return self._data
@property
def shape(self):
""" Tuple of array dimensions """
return self._data.shape
@property
def ndim(self):
""" The number of dimensions """
return len(self._data.shape)
def __getitem__(self, key):
logging.debug("Using %s to index data of shape %s", key, self.shape)
return self._data[key]
@property
def numeric(self):
"""
Whether or not the datatype is numeric
"""
return np.can_cast(self.data[0], np.complex)
def __str__(self):
return "Component with shape %s" % (self.shape,)
def jitter(self, method=None):
raise NotImplementedError
def to_series(self, **kwargs):
""" Convert into a pandas.Series object.
:param kwargs: All kwargs are passed to the Series constructor.
:return: pandas.Series
"""
return pd.Series(self.data.ravel(), **kwargs)
@classmethod
def autotyped(cls, data, units=None):
"""
Automatically choose between Component and CategoricalComponent,
based on the input data type.
:param data: The data to pack into a Component
:type data: Array-like
:param units: Optional units
:type units: str
:returns: A Component (or subclass)
"""
data = np.asarray(data)
n = coerce_numeric(data)
thresh = 0.5
if np.isfinite(n).mean() > thresh:
return Component(n, units=units)
elif np.issubdtype(data.dtype, np.character):
return CategoricalComponent(data, units=units)
return Component(data, units=units)
class DerivedComponent(Component):
""" A component which derives its data from a function """
def __init__(self, data, link, units=None):
"""
:param data: The data object to use for calculation
:type data: :class:`~glue.core.data.Data`
:param link: The link that carries out the function
:type link: :class:`~glue.core.component_link.ComponentLink`
:param units: Optional unit description
"""
super(DerivedComponent, self).__init__(data, units=units)
self._link = link
def set_parent(self, data):
""" Reassign the Data object that this DerivedComponent operates on """
self._data = data
@property
def hidden(self):
return self._link.hidden
@property
def data(self):
""" Return the numerical data as a numpy array """
return self._link.compute(self._data)
@property
def link(self):
""" Return the component link """
return self._link
def __getitem__(self, key):
return self._link.compute(self._data, key)
class CoordinateComponent(Component):
"""
Components associated with pixel or world coordinates
The numerical values are computed on the fly.
"""
def __init__(self, data, axis, world=False):
super(CoordinateComponent, self).__init__(None, None)
self.world = world
self._data = data
self.axis = axis
@property
def data(self):
return self._calculate()
def _calculate(self, view=None):
slices = [slice(0, s, 1) for s in self.shape]
grids = np.broadcast_arrays(*np.ogrid[slices])
if view is not None:
grids = [g[view] for g in grids]
if self.world:
world = self._data.coords.pixel2world(*grids[::-1])[::-1]
return world[self.axis]
else:
return grids[self.axis]
@property
def shape(self):
""" Tuple of array dimensions. """
return self._data.shape
@property
def ndim(self):
""" Number of dimensions """
return len(self._data.shape)
def __getitem__(self, key):
return self._calculate(key)
def __lt__(self, other):
if self.world == other.world:
return self.axis < other.axis
return self.world
def __gluestate__(self, context):
return dict(axis=self.axis, world=self.world)
@classmethod
def __setgluestate__(cls, rec, context):
return cls(None, rec['axis'], rec['world'])
class CategoricalComponent(Component):
"""
Container for categorical data.
"""
def __init__(self, categorical_data, categories=None, jitter=None, units=None):
"""
:param categorical_data: The underlying :class:`numpy.ndarray`
:param categories: List of unique values in the data
:jitter: Strategy for jittering the data
"""
super(CategoricalComponent, self).__init__(None, units)
self._categorical_data = np.asarray(categorical_data, dtype=np.object)
self._categorical_data.setflags(write=False)
self._categories = categories
self._jitter_method = jitter
self._is_jittered = False
self._data = None
if self._categories is None:
self._update_categories()
else:
self._update_data()
def _update_categories(self, categories=None):
"""
:param categories: A sorted array of categories to find in the dataset.
If None the categories are the unique items in the data.
:return: None
"""
if categories is None:
categories, inv = np.unique(self._categorical_data,
return_inverse=True)
self._categories = categories
self._data = inv.astype(np.float)
self._data.setflags(write=False)
self.jitter(method=self._jitter_method)
else:
if check_sorted(categories):
self._categories = categories
self._update_data()
else:
raise ValueError("Provided categories must be Sorted")
def _update_data(self):
""" Converts the categorical data into the numeric representations
given self._categories
"""
self._is_jittered = False
# Complicated because of the case of items not in
# self._categories may be on either side of the sorted list
left = np.searchsorted(self._categories,
self._categorical_data,
side='left')
right = np.searchsorted(self._categories,
self._categorical_data,
side='right')
self._data = left.astype(float)
self._data[(left == 0) & (right == 0)] = np.nan
self._data[left == len(self._categories)] = np.nan
self._data[self._data == len(self._categories)] = np.nan
self.jitter(method=self._jitter_method)
self._data.setflags(write=False)
def jitter(self, method=None):
"""
Jitter the data so the density of points can be easily seen in a
scatter plot.
:param method: None | 'uniform':
* None: No jittering is done (or any jittering is undone).
* uniform: A unformly distributed random variable (-0.5, 0.5)
is applied to each point.
:return: None
"""
if method not in {'uniform', None}:
raise ValueError('%s jitter not supported' % method)
self._jitter_method = method
seed = 1234567890
rand_state = np.random.RandomState(seed)
if (self._jitter_method is None) and self._is_jittered:
self._update_data()
elif (self._jitter_method is 'uniform') and not self._is_jittered:
iswrite = self._data.flags['WRITEABLE']
self._data.setflags(write=True)
self._data += rand_state.uniform(-0.5, 0.5, size=self._data.shape)
self._is_jittered = True
self._data.setflags(write=iswrite)
def to_series(self, **kwargs):
""" Convert into a pandas.Series object.
This will be converted as a dtype=np.object!
:param kwargs: All kwargs are passed to the Series constructor.
:return: pandas.Series
"""
return pd.Series(self._categorical_data.ravel(),
dtype=np.object, **kwargs)
class Data(object):
"""The basic data container in Glue.
The data object stores data as a collection of
:class:`~glue.core.data.Component` objects. Each component stored in a
dataset must have the same shape.
Catalog data sets are stored such that each column is a distinct
1-dimensional :class:`~glue.core.data.Component`.
There are several ways to extract the actual numerical data stored in a
:class:`~glue.core.data.Data` object::
data = Data(x=[1, 2, 3], label='data')
xid = data.id['x']
data[xid]
data.get_component(xid).data
data['x'] # if 'x' is a unique component name
Likewise, datasets support :ref:`fancy indexing <numpy:basics.indexing>`::
data[xid, 0:2]
data[xid, [True, False, True]]
See also: :ref:`data_tutorial`
"""
def __init__(self, label="", **kwargs):
"""
:param label: label for data
:type label: str
Extra array-like keywords are extracted into components
"""
# Coordinate conversion object
self.coords = Coordinates()
self._shape = ()
# Components
self._components = OrderedDict()
self._pixel_component_ids = []
self._world_component_ids = []
self.id = ComponentIDDict(self)
# Tree description of the data
# (Deprecated)
self.tree = None
# Subsets of the data
self._subsets = []
# Hub that the data is attached to
self.hub = None
self.style = VisualAttributes(parent=self)
self._coordinate_links = None
self.data = self
self.label = label
self.edit_subset = None
for lbl, data in kwargs.items():
self.add_component(data, lbl)
@property
def subsets(self):
"""
Tuple of subsets attached to this dataset
"""
return tuple(self._subsets)
@property
def ndim(self):
"""
Dimensionality of the dataset
"""
return len(self.shape)
@property
def shape(self):
"""
Tuple of array dimensions, like :attr:`numpy.ndarray.shape`
"""
return self._shape
@property
def label(self):
""" Convenience access to data set's label """
return self._label
@label.setter
def label(self, value):
""" Set the label to value
"""
self._label = value
self.broadcast(attribute='label')
@property
def size(self):
"""
Total number of elements in the dataset.
"""
return np.product(self.shape)
def _check_can_add(self, component):
if isinstance(component, DerivedComponent):
return component._data is self
else:
if len(self._components) == 0:
return True
return component.shape == self.shape
def dtype(self, cid):
"""Lookup the dtype for the data associated with a ComponentID"""
# grab a small piece of data
ind = tuple([slice(0, 1)] * self.ndim)
arr = self[cid, ind]
return arr.dtype
def remove_component(self, component_id):
""" Remove a component from a data set
:param component_id: the component to remove
:type component_id: :class:`~glue.core.data.ComponentID`
"""
if component_id in self._components:
self._components.pop(component_id)
def add_component(self, component, label, hidden=False):
""" Add a new component to this data set.
:param component: object to add
:param label:
The label. If this is a string,
a new :class:`ComponentID` with this label will be
created and associated with the Component
:type component: :class:`~glue.core.data.Component` or
array-like
:type label: :class:`str` or :class:`~glue.core.data.ComponentID`
:raises:
TypeError, if label is invalid
ValueError if the component has an incompatible shape
:returns:
The ComponentID associated with the newly-added component
"""
if not isinstance(component, Component):
component = Component.autotyped(component)
if isinstance(component, DerivedComponent):
component.set_parent(self)
if not(self._check_can_add(component)):
raise ValueError("The dimensions of component %s are "
"incompatible with the dimensions of this data: "
"%r vs %r" % (label, component.shape, self.shape))
if isinstance(label, ComponentID):
component_id = label
elif isinstance(label, basestring):
component_id = ComponentID(label, hidden=hidden)
else:
raise TypeError("label must be a ComponentID or string")
is_present = component_id in self._components
self._components[component_id] = component
first_component = len(self._components) == 1
if first_component:
if isinstance(component, DerivedComponent):
raise TypeError("Cannot add a derived component as "
"first component")
self._shape = component.shape
self._create_pixel_and_world_components()
if self.hub and (not is_present):
msg = DataAddComponentMessage(self, component_id)
self.hub.broadcast(msg)
msg = ComponentsChangedMessage(self)
self.hub.broadcast(msg)
return component_id
def add_component_link(self, link, cid=None):
""" Shortcut method for generating a new :class:`DerivedComponent`
from a ComponentLink object, and adding it to a data set.
:param link: :class:`~glue.core.component_link.ComponentLink`
:returns:
The :class:`DerivedComponent` that was added
"""
if cid is not None:
if isinstance(cid, basestring):
cid = ComponentID(cid)
link.set_to_id(cid)
if link.get_to_id() is None:
raise TypeError("Cannot add component_link: "
"has no 'to' ComponentID")
dc = DerivedComponent(self, link)
to_ = link.get_to_id()
self.add_component(dc, to_)
return dc
def _create_pixel_and_world_components(self):
for i in range(self.ndim):
comp = CoordinateComponent(self, i)
label = pixel_label(i, self.ndim)
cid = self.add_component(comp, "Pixel %s" % label, hidden=True)
self._pixel_component_ids.append(cid)
if self.coords:
for i in range(self.ndim):
comp = CoordinateComponent(self, i, world=True)
label = self.coords.axis_label(i)
cid = self.add_component(comp, label, hidden=True)
self._world_component_ids.append(cid)
@property
def components(self):
""" All :class:`ComponentIDs <ComponentID>` in the Data
:rtype: list
"""
return sorted(self._components.keys(), key=lambda x: x.label)
@property
def visible_components(self):
""" :class:`ComponentIDs <ComponentID>` for all non-hidden components.
:rtype: list
"""
return [cid for cid, comp in self._components.items()
if not cid.hidden and not comp.hidden]
@property
def primary_components(self):
"""The ComponentIDs not associated with a :class:`DerivedComponent`
:rtype: list
"""
return [c for c in self.component_ids() if
not isinstance(self._components[c], DerivedComponent)]
@property
def derived_components(self):
"""The ComponentIDs for each :class:`DerivedComponent`
:rtype: list
"""
return [c for c in self.component_ids() if
isinstance(self._components[c], DerivedComponent)]
@property
def pixel_component_ids(self):
"""
The :class:`ComponentIDs <ComponentID>` for each pixel coordinate.
"""
return self._pixel_component_ids
@property
def world_component_ids(self):
"""
The :class:`ComponentIDs <ComponentID>` for each world coordinate.
"""
return self._world_component_ids
def find_component_id(self, label):
""" Retrieve component_ids associated by label name.
:param label: string to search for
:returns:
The associated ComponentID if label is found and unique, else None
"""
result = [cid for cid in self.component_ids() if
cid.label == label]
if len(result) == 1:
return result[0]
@property
def coordinate_links(self):
"""A list of the ComponentLinks that connect pixel and
world. If no coordinate transformation object is present,
return an empty list.
"""
if self._coordinate_links:
return self._coordinate_links
if not self.coords:
return []
if self.ndim != len(self._pixel_component_ids) or \
self.ndim != len(self._world_component_ids):
# haven't populated pixel, world coordinates yet
return []
def make_toworld_func(i):
def pix2world(*args):
return self.coords.pixel2world(*args[::-1])[::-1][i]
return pix2world
def make_topixel_func(i):
def world2pix(*args):
return self.coords.world2pixel(*args[::-1])[::-1][i]
return world2pix
result = []
for i in range(self.ndim):
link = CoordinateComponentLink(self._pixel_component_ids,
self._world_component_ids[i],
self.coords, i)
result.append(link)
link = CoordinateComponentLink(self._world_component_ids,
self._pixel_component_ids[i],
self.coords, i, pixel2world=False)
result.append(link)
self._coordinate_links = result
return result
def get_pixel_component_id(self, axis):
"""Return the pixel :class:`ComponentID` associated with a given axis
"""
return self._pixel_component_ids[axis]
def get_world_component_id(self, axis):
"""Return the world :class:`ComponentID` associated with a given axis
"""
return self._world_component_ids[axis]
def component_ids(self):
"""
Equivalent to :attr:`Data.components`
"""
return list(self._components.keys())
def new_subset(self, subset=None, color=None, label=None, **kwargs):
"""
Create a new subset, and attach to self.
.. note:: The preferred way for creating subsets is via
:meth:`~glue.core.data_collection.DataCollection.new_subset_group`.
Manually-instantiated subsets will **not** be
represented properly by the UI
:param subset: optional, reference subset or subset state.
If provided, the new subset will copy the logic of
this subset.
:returns: The new subset object
"""
nsub = len(self.subsets)
color = color or COLORS[nsub % len(COLORS)]
label = label or "%s.%i" % (self.label, nsub + 1)
new_subset = Subset(self, color=color, label=label, **kwargs)
if subset is not None:
new_subset.subset_state = subset.subset_state.copy()
self.add_subset(new_subset)
return new_subset
def add_subset(self, subset):
"""Assign a pre-existing subset to this data object.
:param subset: A :class:`~glue.core.subset.Subset` or
:class:`~glue.core.subset.SubsetState` object
If input is a :class:`~glue.core.subset.SubsetState`,
it will be wrapped in a new Subset automatically
.. note:: The preferred way for creating subsets is via
:meth:`~glue.core.data_collection.DataCollection.new_subset_group`.
Manually-instantiated subsets will **not** be
represented properly by the UI
"""
if subset in self.subsets:
return # prevents infinite recursion
if isinstance(subset, SubsetState):
# auto-wrap state in subset
state = subset
subset = Subset(None)
subset.subset_state = state
self._subsets.append(subset)
if subset.data is not self:
subset.do_broadcast(False)
subset.data = self
subset.label = subset.label # hacky. disambiguates name if needed
if self.hub is not None:
msg = SubsetCreateMessage(subset)
self.hub.broadcast(msg)
subset.do_broadcast(True)
def register_to_hub(self, hub):
""" Connect to a hub.
This method usually doesn't have to be called directly, as
DataCollections manage the registration of data objects
"""
if not isinstance(hub, Hub):
raise TypeError("input is not a Hub object: %s" % type(hub))
self.hub = hub
def broadcast(self, attribute=None):
"""
Send a :class:`~glue.core.message.DataUpdateMessage` to the hub
:param attribute: Name of an attribute that has changed
:type attribute: str
"""
if not self.hub:
return
msg = DataUpdateMessage(self, attribute=attribute)
self.hub.broadcast(msg)
def update_id(self, old, new):
"""Reassign a component to a different :class:`ComponentID`
:param old: The old :class:`ComponentID`.
:param new: The new :class:`ComponentID`.
"""
changed = False
if old in self._components:
self._components[new] = self._components.pop(old)
changed = True
try:
index = self._pixel_component_ids.index(old)
self._pixel_component_ids[index] = new
changed = True
except ValueError:
pass
try:
index = self._world_component_ids.index(old)
self._world_component_ids[index] = new
changed = True
except ValueError:
pass
if changed and self.hub is not None:
self.hub.broadcast(ComponentsChangedMessage(self))
def __str__(self):
s = "Data Set: %s" % self.label
s += "Number of dimensions: %i\n" % self.ndim
s += "Shape: %s\n" % ' x '.join([str(x) for x in self.shape])
s += "Components:\n"
for i, component in enumerate(self._components):
s += " %i) %s\n" % (i, component)
return s[:-1]
def __repr__(self):
return 'Data (label: %s)' % self.label
def __setattr__(self, name, value):
if name == "hub" and hasattr(self, 'hub') \
and self.hub is not value and self.hub is not None:
raise AttributeError("Data has already been assigned "
"to a different hub")
object.__setattr__(self, name, value)
def __getitem__(self, key):
""" Shortcut syntax to access the numerical data in a component.
Equivalent to:
``component = data.get_component(component_id).data``
:param key:
The component to fetch data from
:type key: :class:`~glue.core.data.ComponentID`
:returns: :class:`~numpy.ndarray`
"""
key, view = split_component_view(key)
if isinstance(key, basestring):
_k = key
key = self.find_component_id(key)
if key is None:
raise IncompatibleAttribute(_k)
if isinstance(key, ComponentLink):
return key.compute(self, view)
try:
comp = self._components[key]
except KeyError:
raise IncompatibleAttribute(key)
shp = view_shape(self.shape, view)
if view is not None:
result = comp[view]
else:
result = comp.data
assert result.shape == shp, \
"Component view returned bad shape: %s %s" % (result.shape, shp)
return result
def get_component(self, component_id):
"""Fetch the component corresponding to component_id.
:param component_id: the component_id to retrieve
"""
if component_id is None:
raise IncompatibleAttribute()
if isinstance(component_id, basestring):
component_id = self.id[component_id]
try:
return self._components[component_id]
except KeyError:
raise IncompatibleAttribute(component_id)
def to_dataframe(self, index=None):
""" Convert the Data object into a pandas.DataFrame object
:param index: Any 'index-like' object that can be passed to the
pandas.Series constructor
:return: pandas.DataFrame
"""
h = lambda comp: self.get_component(comp).to_series(index=index)
df = pd.DataFrame({comp.label: h(comp) for comp in self.components})
order = [comp.label for comp in self.components]
return df[order]
def pixel_label(i, ndim):
if ndim == 2:
return ['y', 'x'][i]
if ndim == 3:
return ['z', 'y', 'x'][i]
return "Axis %s" % i
| [
"numpy.product",
"logging.debug",
"numpy.unique",
"numpy.searchsorted",
"numpy.asarray",
"numpy.can_cast",
"numpy.issubdtype",
"numpy.isfinite",
"numpy.broadcast_arrays",
"numpy.random.RandomState"
] | [((5012, 5080), 'logging.debug', 'logging.debug', (['"""Using %s to index data of shape %s"""', 'key', 'self.shape'], {}), "('Using %s to index data of shape %s', key, self.shape)\n", (5025, 5080), False, 'import logging\n'), ((5236, 5273), 'numpy.can_cast', 'np.can_cast', (['self.data[0]', 'np.complex'], {}), '(self.data[0], np.complex)\n', (5247, 5273), True, 'import numpy as np\n'), ((6085, 6101), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (6095, 6101), True, 'import numpy as np\n'), ((8025, 8063), 'numpy.broadcast_arrays', 'np.broadcast_arrays', (['*np.ogrid[slices]'], {}), '(*np.ogrid[slices])\n', (8044, 8063), True, 'import numpy as np\n'), ((9432, 9477), 'numpy.asarray', 'np.asarray', (['categorical_data'], {'dtype': 'np.object'}), '(categorical_data, dtype=np.object)\n', (9442, 9477), True, 'import numpy as np\n'), ((10920, 10990), 'numpy.searchsorted', 'np.searchsorted', (['self._categories', 'self._categorical_data'], {'side': '"""left"""'}), "(self._categories, self._categorical_data, side='left')\n", (10935, 10990), True, 'import numpy as np\n'), ((11069, 11140), 'numpy.searchsorted', 'np.searchsorted', (['self._categories', 'self._categorical_data'], {'side': '"""right"""'}), "(self._categories, self._categorical_data, side='right')\n", (11084, 11140), True, 'import numpy as np\n'), ((12105, 12132), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (12126, 12132), True, 'import numpy as np\n'), ((15561, 15583), 'numpy.product', 'np.product', (['self.shape'], {}), '(self.shape)\n', (15571, 15583), True, 'import numpy as np\n'), ((6257, 6296), 'numpy.issubdtype', 'np.issubdtype', (['data.dtype', 'np.character'], {}), '(data.dtype, np.character)\n', (6270, 6296), True, 'import numpy as np\n'), ((10092, 10146), 'numpy.unique', 'np.unique', (['self._categorical_data'], {'return_inverse': '(True)'}), '(self._categorical_data, return_inverse=True)\n', (10101, 10146), True, 'import numpy as np\n'), ((6167, 6181), 'numpy.isfinite', 'np.isfinite', (['n'], {}), '(n)\n', (6178, 6181), True, 'import numpy as np\n')] |
import sys
from timeit import timeit
import matplotlib.pyplot as plt
import numpy as np
# from numba import jit, njit
# from numba.typed import List
from statistics import mean
import ray
from multiprocessing import Pool
# Silences Numba warnings about a Python list being passed into a numba function
import warnings
warnings.filterwarnings('ignore')
#### OPTIONS ####
testCppFunctions = False #- set up for PyBind11 - only installed on one of my computers
testJuliaFunctions = True # - excessively slow - only installed on one of my computers
testCython = False
nTests=50
maxArraySize=1000
step=50
arrayLengths = list(range(1, maxArraySize, step))
#### END OPTIONS ####
#### Functions to test ####
def createPythonList(length):
a = list(range(length))
return [ float(x) for x in a ]
def createNumpyArray(length):
pyList = createPythonList(length)
return np.array(pyList, dtype=np.float64)
# def createNumbaTypedList(length):
# pyList = createPythonList(length)
# typedList = List()
# [ typedList.append(x) for x in pyList ]
# return typedList
def addFive_Python(array):
return [ x+5.0 for x in array ]
@ray.remote
def addFive_Python_RaySub(array):
return [ x+5.0 for x in array ]
def addFive_Python_Ray(array):
chunk = round(len(array)/2)
future1 = addFive_Python_RaySub.remote(array[:chunk])
future2 = addFive_Python_RaySub.remote(array[chunk:])
return ray.get(future1) + ray.get(future2)
def addFive_Python_Multiprocessing(array):
chunk = round(len(array)/2)
with Pool(2) as p:
results = p.map(addFive_Python, [ array[:chunk], array[chunk:]])
return results[0] + results[1]
# @njit()
def addFive_Numba(array):
for i in range(len(array)):
array[i] += 5.0
return array
def addFive_Numpy(array):
array += 5.0
return array
# Assume the Python function will be imported from this file
pythonBenchmarkSetupString = """
from addScalarToArray import {}, {}
pyList = {}({})
"""
# Assume Comparison function list generator will be imported from this file
# Comparison function can be imported from any module
comparisonBenchmarkSetupString = """
from addScalarToArray import {}
from {} import {}
comparisonArray = {}({})
# Call function once to pre-compile it for JIT methods like numba
{}(comparisonArray)
"""
#### Functions that do the testing / result plotting ####
def getSpeedup(length, comparisonFunction, comparisonFunctionListGenerator, comparisonFnModule, pythonFunction, pythonListGenerator):
setupPython = pythonBenchmarkSetupString.format(pythonFunction, pythonListGenerator, pythonListGenerator, length)
setupComparison = comparisonBenchmarkSetupString.format(comparisonFunctionListGenerator, comparisonFnModule, comparisonFunction.__name__, comparisonFunctionListGenerator, length, comparisonFunction.__name__)
pythonTime = timeit("{}(pyList)".format(pythonFunction), setup=setupPython, number=nTests)
fnTime = timeit("{}(comparisonArray)".format(comparisonFunction.__name__), setup=setupComparison, number=nTests)
return pythonTime/fnTime
def plotSpeedupForEachArrayLength(function, label="Unlabelled", comparisonFunctionListGenerator="createNumpyArray", comparisonFnModule="addScalarToArray", pythonFunction="addFive_Python", pythonListGenerator="createPythonList"):
ratios = [ getSpeedup(l, function, comparisonFunctionListGenerator, comparisonFnModule, pythonFunction, pythonListGenerator) for l in arrayLengths ]
print("Speedup {:<40}: {:>6.2f}, {:>6.2f}, {:>6.2f}".format("("+label+")", min(ratios), max(ratios), mean(ratios)))
# Plot result, with different line styles depending on which data type is being operated on
if "ndarray" in label:
plt.plot(arrayLengths, ratios, linestyle="dashed", label=label)
elif "numba.typed.List" in label:
plt.plot(arrayLengths, ratios, linestyle="dotted", label=label)
else:
plt.plot(arrayLengths, ratios, label=label)
if testJuliaFunctions:
import julia
from julia import Main
Main.include("addScalar.jl")
# function to test is Main.addFive - seems very slow, must be converting types or not being compiled
addFive_Julia = Main.addFive_Julia
#### Main ####
if __name__ == "__main__":
print("Each operation performed {} times".format(nTests))
print("Speedup {:<40}: {:>6}, {:>6}, {:>6}".format("", "Min", "Max", "Mean"))
plotSpeedupForEachArrayLength(addFive_Python, label="Python loop: ndarray")
plotSpeedupForEachArrayLength(addFive_Python, label="Python loop: list", comparisonFunctionListGenerator="createPythonList")
plotSpeedupForEachArrayLength(addFive_Numpy, label="numpy +=: ndarray")
# plotSpeedupForEachArrayLength(addFive_Numba, label="numba: list", comparisonFunctionListGenerator="createPythonList")
# plotSpeedupForEachArrayLength(addFive_Numba, label="numba: numba.typed.List", comparisonFunctionListGenerator="createNumbaTypedList")
# plotSpeedupForEachArrayLength(addFive_Numba, label="numba: ndarray")
if testCython:
# Must have compiled the 'addScalarCython.pyx' file on your machine using Cython to make this work
# Run `cythonize addScalar.pyx`
# https://cython.readthedocs.io/en/latest/MAPLEAF/tutorial/cython_tutorial.html
import addScalarCython
plotSpeedupForEachArrayLength(addScalarCython.addFive_Numpy, comparisonFnModule="addScalarCython", label="Cython - strongly-typed: ndarray")
plotSpeedupForEachArrayLength(addScalarCython.addFive_Plain, comparisonFnModule="addScalarCython", label="Cython - Plain Python: list", comparisonFunctionListGenerator="createPythonList")
plotSpeedupForEachArrayLength(addScalarCython.addFive_Plain, comparisonFnModule="addScalarCython", label="Cython - Plain Python: ndarray")
# Too slow
# plotSpeedupForEachArrayLength(addFive_Python_Multiprocessing, label="Multiprocessing: 2 cores")
# Also very slow, but less so because processes are only launched once
# ray.init()
# plotSpeedupForEachArrayLength(addFive_Python_Ray, label="Ray: 2 cores")
if testCppFunctions:
import example
#Functions to test are:
example.addToList_Cpp # - (converts to std::vector and back)
# example.addFive(nV1) # - (no conversion, loops in C++)
example.vectorizedAddFive #- (C++ function wrapped with py::vectorize to work on an array)
if testJuliaFunctions:
plotSpeedupForEachArrayLength(Main.addFive_Julia, label="Julia, ndarray")
plotSpeedupForEachArrayLength(Main.addFive_Julia, label="Julia, list", comparisonFunctionListGenerator="createPythonList")
plt.xlabel("Array size")
plt.ylabel("Speedup")
plt.title("Elementwise adding a scalar to an array of float64")
plt.autoscale(tight=True, axis="y")
plt.xlim([0, maxArraySize])
# plt.yscale("log")
plt.legend()
plt.show()
| [
"statistics.mean",
"ray.get",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"julia.Main.include",
"numpy.array",
"multiprocessing.Pool",
"matplotlib.pyplot.autoscale",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"warnings.filterwarnings",
"matplot... | [((320, 353), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (343, 353), False, 'import warnings\n'), ((878, 912), 'numpy.array', 'np.array', (['pyList'], {'dtype': 'np.float64'}), '(pyList, dtype=np.float64)\n', (886, 912), True, 'import numpy as np\n'), ((4029, 4057), 'julia.Main.include', 'Main.include', (['"""addScalar.jl"""'], {}), "('addScalar.jl')\n", (4041, 4057), False, 'from julia import Main\n'), ((6644, 6668), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Array size"""'], {}), "('Array size')\n", (6654, 6668), True, 'import matplotlib.pyplot as plt\n'), ((6673, 6694), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Speedup"""'], {}), "('Speedup')\n", (6683, 6694), True, 'import matplotlib.pyplot as plt\n'), ((6699, 6762), 'matplotlib.pyplot.title', 'plt.title', (['"""Elementwise adding a scalar to an array of float64"""'], {}), "('Elementwise adding a scalar to an array of float64')\n", (6708, 6762), True, 'import matplotlib.pyplot as plt\n'), ((6772, 6807), 'matplotlib.pyplot.autoscale', 'plt.autoscale', ([], {'tight': '(True)', 'axis': '"""y"""'}), "(tight=True, axis='y')\n", (6785, 6807), True, 'import matplotlib.pyplot as plt\n'), ((6812, 6839), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, maxArraySize]'], {}), '([0, maxArraySize])\n', (6820, 6839), True, 'import matplotlib.pyplot as plt\n'), ((6869, 6881), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6879, 6881), True, 'import matplotlib.pyplot as plt\n'), ((6886, 6896), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6894, 6896), True, 'import matplotlib.pyplot as plt\n'), ((1422, 1438), 'ray.get', 'ray.get', (['future1'], {}), '(future1)\n', (1429, 1438), False, 'import ray\n'), ((1441, 1457), 'ray.get', 'ray.get', (['future2'], {}), '(future2)\n', (1448, 1457), False, 'import ray\n'), ((1543, 1550), 'multiprocessing.Pool', 'Pool', (['(2)'], {}), '(2)\n', (1547, 1550), False, 'from multiprocessing import Pool\n'), ((3721, 3784), 'matplotlib.pyplot.plot', 'plt.plot', (['arrayLengths', 'ratios'], {'linestyle': '"""dashed"""', 'label': 'label'}), "(arrayLengths, ratios, linestyle='dashed', label=label)\n", (3729, 3784), True, 'import matplotlib.pyplot as plt\n'), ((3574, 3586), 'statistics.mean', 'mean', (['ratios'], {}), '(ratios)\n', (3578, 3586), False, 'from statistics import mean\n'), ((3831, 3894), 'matplotlib.pyplot.plot', 'plt.plot', (['arrayLengths', 'ratios'], {'linestyle': '"""dotted"""', 'label': 'label'}), "(arrayLengths, ratios, linestyle='dotted', label=label)\n", (3839, 3894), True, 'import matplotlib.pyplot as plt\n'), ((3913, 3956), 'matplotlib.pyplot.plot', 'plt.plot', (['arrayLengths', 'ratios'], {'label': 'label'}), '(arrayLengths, ratios, label=label)\n', (3921, 3956), True, 'import matplotlib.pyplot as plt\n')] |
#! /usr/bin/env python
#<NAME>
#<EMAIL>
import sys
import os
import argparse
import numpy as np
from osgeo import gdal
from pygeotools.lib import iolib
#Can use ASP image_calc for multithreaded ndv replacement of huge images
#image_calc -o ${1%.*}_ndv.tif -c 'var_0' --output-nodata-value $2 $1
def getparser():
parser = argparse.ArgumentParser(description="Replace raster NoData value")
parser.add_argument('-overwrite', action='store_true', help='Overwrite original file')
parser.add_argument('src_fn', type=str, help='Input raster filename')
parser.add_argument('new_ndv', type=str, help='New NoData value (e.g., -9999)')
return parser
def main():
parser = getparser()
args = parser.parse_args()
src_fn = args.src_fn
new_ndv = args.new_ndv
#Input argument is a string, which is not recognized by set_fill_value
#Must use np.nan object
if new_ndv == 'nan' or new_ndv == 'np.nan':
new_ndv = np.nan
else:
new_ndv = float(new_ndv)
#Output filename will have ndv appended
if args.overwrite:
out_fn = src_fn
else:
out_fn = os.path.splitext(src_fn)[0]+'_ndv.tif'
ds = gdal.Open(src_fn)
b = ds.GetRasterBand(1)
#Extract old ndv
old_ndv = iolib.get_ndv_b(b)
print(src_fn)
print("Replacing old ndv %s with new ndv %s" % (old_ndv, new_ndv))
#Load masked array
bma = iolib.ds_getma(ds)
#Handle cases with input ndv of nan
#if old_ndv == np.nan:
bma = np.ma.fix_invalid(bma)
#Set new fill value
bma.set_fill_value(new_ndv)
#Fill ma with new value and write out
iolib.writeGTiff(bma.filled(), out_fn, ds, ndv=new_ndv)
if __name__ == '__main__':
main()
| [
"osgeo.gdal.Open",
"pygeotools.lib.iolib.ds_getma",
"argparse.ArgumentParser",
"numpy.ma.fix_invalid",
"pygeotools.lib.iolib.get_ndv_b",
"os.path.splitext"
] | [((331, 397), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Replace raster NoData value"""'}), "(description='Replace raster NoData value')\n", (354, 397), False, 'import argparse\n'), ((1175, 1192), 'osgeo.gdal.Open', 'gdal.Open', (['src_fn'], {}), '(src_fn)\n', (1184, 1192), False, 'from osgeo import gdal\n'), ((1256, 1274), 'pygeotools.lib.iolib.get_ndv_b', 'iolib.get_ndv_b', (['b'], {}), '(b)\n', (1271, 1274), False, 'from pygeotools.lib import iolib\n'), ((1399, 1417), 'pygeotools.lib.iolib.ds_getma', 'iolib.ds_getma', (['ds'], {}), '(ds)\n', (1413, 1417), False, 'from pygeotools.lib import iolib\n'), ((1496, 1518), 'numpy.ma.fix_invalid', 'np.ma.fix_invalid', (['bma'], {}), '(bma)\n', (1513, 1518), True, 'import numpy as np\n'), ((1126, 1150), 'os.path.splitext', 'os.path.splitext', (['src_fn'], {}), '(src_fn)\n', (1142, 1150), False, 'import os\n')] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 1 13:38:59 2019
@author: ryancompton
"""
import os
import pandas as pd
import numpy as np
print("CONVERTING ACCOUNTS TO AMLSIM FORMATTED ACCOUNTS")
file_path = os.path.join("output_datasets")
files = [os.path.join(file_path,x) for x in os.listdir(file_path) if "account" in x]
latest_account_csv = max(files,key=os.path.getctime)
data = pd.read_csv(latest_account_csv)
def random_min(row):
return np.random.randint(100,401)
def random_max(row):
return row["min_balance"] + np.random.randint(100,401)
data["min_balance"] = data.apply(lambda row : random_min(row),axis=1)
data["max_balance"] = data.apply(lambda row : random_max(row),axis=1)
data.to_csv(os.path.join("outputs","accounts.csv"))
data = pd.read_csv(latest_account_csv)
cust_ids = set(data["primary_cust_id"])
def find_rand_cust(row):
global cust_ids
sample = list(cust_ids - set(row["primary_cust_id"]))
return np.random.choice(sample,1)[0]
data["ben_cust_id"] = data.apply(lambda row : find_rand_cust(row),axis=1)
data.to_csv(latest_account_csv)
print("DONE")
| [
"os.listdir",
"pandas.read_csv",
"numpy.random.choice",
"os.path.join",
"numpy.random.randint"
] | [((236, 267), 'os.path.join', 'os.path.join', (['"""output_datasets"""'], {}), "('output_datasets')\n", (248, 267), False, 'import os\n'), ((416, 447), 'pandas.read_csv', 'pd.read_csv', (['latest_account_csv'], {}), '(latest_account_csv)\n', (427, 447), True, 'import pandas as pd\n'), ((792, 823), 'pandas.read_csv', 'pd.read_csv', (['latest_account_csv'], {}), '(latest_account_csv)\n', (803, 823), True, 'import pandas as pd\n'), ((278, 304), 'os.path.join', 'os.path.join', (['file_path', 'x'], {}), '(file_path, x)\n', (290, 304), False, 'import os\n'), ((481, 508), 'numpy.random.randint', 'np.random.randint', (['(100)', '(401)'], {}), '(100, 401)\n', (498, 508), True, 'import numpy as np\n'), ((744, 783), 'os.path.join', 'os.path.join', (['"""outputs"""', '"""accounts.csv"""'], {}), "('outputs', 'accounts.csv')\n", (756, 783), False, 'import os\n'), ((313, 334), 'os.listdir', 'os.listdir', (['file_path'], {}), '(file_path)\n', (323, 334), False, 'import os\n'), ((562, 589), 'numpy.random.randint', 'np.random.randint', (['(100)', '(401)'], {}), '(100, 401)\n', (579, 589), True, 'import numpy as np\n'), ((980, 1007), 'numpy.random.choice', 'np.random.choice', (['sample', '(1)'], {}), '(sample, 1)\n', (996, 1007), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 12 19:07:50 2016
@author: ngordon
"""
#==============================================================================
# 3d animation 0 stackoverflow
#==============================================================================
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import matplotlib.animation as animation
def data_gen(num):
"""Data generation"""
angle = num * np.pi/36
vx, vy, vz = np.cos(angle), np.sin(angle), 1
ax.cla()
ax.quiver(0, 0, 0, vx, vy, vz, pivot="tail", color="black")
ax.quiver(0, 0, 0, vx, vy, 0, pivot="tail", color="black",
linestyle="dashed")
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
ax.set_zlim(-1, 1)
ax.view_init(elev=30, azim=60)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
data_gen(0)
ani = animation.FuncAnimation(fig, data_gen, range(72), blit=False)
plt.show()
| [
"numpy.sin",
"matplotlib.pyplot.figure",
"numpy.cos",
"matplotlib.pyplot.show"
] | [((855, 867), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (865, 867), True, 'import matplotlib.pyplot as plt\n'), ((995, 1005), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1003, 1005), True, 'import matplotlib.pyplot as plt\n'), ((526, 539), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (532, 539), True, 'import numpy as np\n'), ((541, 554), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (547, 554), True, 'import numpy as np\n')] |
import numpy as np
from numpy import abs, cos, exp, mean, pi, prod, sin, sqrt, sum
from autotune import TuningProblem
from autotune.space import *
# problem space
task_space = None
input_space = Space([
Real(-15, 15, name=f'x_{i}') for i in range(10)
])
output_space = Space([
Real(-inf, inf, name='y')
])
def myobj(point: dict):
def ackley(x, a=20, b=0.2, c=2*pi ):
x = np.asarray_chkfinite(x) # ValueError if any NaN or Inf
n = len(x)
s1 = sum( x**2 )
s2 = sum(cos( c * x ))
return -a*exp( -b*sqrt( s1 / n )) - exp( s2 / n ) + a + exp(1)
x = np.array([point[f'x_{i}'] for i in range(len(point))])
objective = ackley(x)
return objective
Problem = TuningProblem(
task_space=None,
input_space=input_space,
output_space=output_space,
objective=myobj,
constraints=None,
model=None
) | [
"numpy.sqrt",
"autotune.TuningProblem",
"numpy.exp",
"numpy.sum",
"numpy.cos",
"numpy.asarray_chkfinite"
] | [((721, 855), 'autotune.TuningProblem', 'TuningProblem', ([], {'task_space': 'None', 'input_space': 'input_space', 'output_space': 'output_space', 'objective': 'myobj', 'constraints': 'None', 'model': 'None'}), '(task_space=None, input_space=input_space, output_space=\n output_space, objective=myobj, constraints=None, model=None)\n', (734, 855), False, 'from autotune import TuningProblem\n'), ((396, 419), 'numpy.asarray_chkfinite', 'np.asarray_chkfinite', (['x'], {}), '(x)\n', (416, 419), True, 'import numpy as np\n'), ((484, 495), 'numpy.sum', 'sum', (['(x ** 2)'], {}), '(x ** 2)\n', (487, 495), False, 'from numpy import abs, cos, exp, mean, pi, prod, sin, sqrt, sum\n'), ((513, 523), 'numpy.cos', 'cos', (['(c * x)'], {}), '(c * x)\n', (516, 523), False, 'from numpy import abs, cos, exp, mean, pi, prod, sin, sqrt, sum\n'), ((591, 597), 'numpy.exp', 'exp', (['(1)'], {}), '(1)\n', (594, 597), False, 'from numpy import abs, cos, exp, mean, pi, prod, sin, sqrt, sum\n'), ((571, 582), 'numpy.exp', 'exp', (['(s2 / n)'], {}), '(s2 / n)\n', (574, 582), False, 'from numpy import abs, cos, exp, mean, pi, prod, sin, sqrt, sum\n'), ((553, 565), 'numpy.sqrt', 'sqrt', (['(s1 / n)'], {}), '(s1 / n)\n', (557, 565), False, 'from numpy import abs, cos, exp, mean, pi, prod, sin, sqrt, sum\n')] |
import numpy as np
import pandas as pd
MAXEPOCH = 1000
file = open("Train.txt")
lines = file.readlines()
numClass, numFeature, datasetLen = 0, 0, 0
dataset = []
count = 0
for line in lines:
if count == 0:
var = line.split()
numFeature = int(var[0])
numClass = int(var[1])
datasetLen = int(var[2])
else:
var = line.split()
data = []
for i in range(numFeature):
data.append(float(var[i]))
data.append(int(var[numFeature]))
dataset.append(data)
count += 1
file = open("Test.txt")
lines = file.readlines()
test_dataset = []
np.random.seed(41)
w = np.random.uniform(-10,10,numFeature+1)
for line in lines:
var = line.split()
data = []
for i in range(numFeature):
data.append(float(var[i]))
data.append(int(var[numFeature]))
test_dataset.append(data)
def test(dataset,w):
count_accurate = 0
for data in dataset:
x = np.array(data)
group = x[numFeature]
x[numFeature] = 1
x = np.array(x)
x = x.reshape(numFeature+1,1)
dot_product = np.dot(w,x)[0]
predicted = -1
if dot_product >= 0:
predicted = 1
else:
predicted = 2
if predicted==group:
count_accurate += 1
print("Accuracy :",float((count_accurate/len(dataset))*100))
return float(count_accurate/len(dataset))
def train_basic_perceptron():
"""
docstring
"""
global w, dataset, MAXEPOCH, datasetLen
learning_rate = 0.025
t = 0
for i in range(MAXEPOCH):
Y = []
arr_dx = []
for j in range(datasetLen):
x = np.array(dataset[j])
group = x[numFeature]
x[numFeature] = 1
x = x.reshape(numFeature+1,1)
dot_product = np.dot(w,x)[0]
if(group == 2 and dot_product>0):
Y.append(x)
arr_dx.append(1)
elif(group ==1 and dot_product<0):
Y.append(x)
arr_dx.append(-1)
else:
pass
sum = np.zeros(numFeature+1)
for j in range(len(Y)):
sum += arr_dx[j]*Y[j].transpose()[0]
w = w - learning_rate*sum
print("Iter {} => {}".format(i,"---"))
if len(Y) == 0:
break
if __name__ == "__main__":
train_basic_perceptron()
print('Final Weight : ', w)
test(test_dataset,w)
| [
"numpy.array",
"numpy.dot",
"numpy.zeros",
"numpy.random.seed",
"numpy.random.uniform"
] | [((625, 643), 'numpy.random.seed', 'np.random.seed', (['(41)'], {}), '(41)\n', (639, 643), True, 'import numpy as np\n'), ((648, 690), 'numpy.random.uniform', 'np.random.uniform', (['(-10)', '(10)', '(numFeature + 1)'], {}), '(-10, 10, numFeature + 1)\n', (665, 690), True, 'import numpy as np\n'), ((961, 975), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (969, 975), True, 'import numpy as np\n'), ((1044, 1055), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1052, 1055), True, 'import numpy as np\n'), ((2083, 2107), 'numpy.zeros', 'np.zeros', (['(numFeature + 1)'], {}), '(numFeature + 1)\n', (2091, 2107), True, 'import numpy as np\n'), ((1116, 1128), 'numpy.dot', 'np.dot', (['w', 'x'], {}), '(w, x)\n', (1122, 1128), True, 'import numpy as np\n'), ((1665, 1685), 'numpy.array', 'np.array', (['dataset[j]'], {}), '(dataset[j])\n', (1673, 1685), True, 'import numpy as np\n'), ((1810, 1822), 'numpy.dot', 'np.dot', (['w', 'x'], {}), '(w, x)\n', (1816, 1822), True, 'import numpy as np\n')] |
import numpy as np
from .binarygrid_util import MfGrdFile
def get_structured_faceflows(
flowja, grb_file=None, ia=None, ja=None, verbose=False
):
"""
Get the face flows for the flow right face, flow front face, and
flow lower face from the MODFLOW 6 flowja flows. This method can
be useful for building face flow arrays for MT3DMS, MT3D-USGS, and
RT3D. This method only works for a structured MODFLOW 6 model.
Parameters
----------
flowja : ndarray
flowja array for a structured MODFLOW 6 model
grbfile : str
MODFLOW 6 binary grid file path
ia : list or ndarray
CRS row pointers. Only required if grb_file is not provided.
ja : list or ndarray
CRS column pointers. Only required if grb_file is not provided.
verbose: bool
Write information to standard output
Returns
-------
frf : ndarray
right face flows
fff : ndarray
front face flows
flf : ndarray
lower face flows
"""
if grb_file is not None:
grb = MfGrdFile(grb_file, verbose=verbose)
if grb.grid_type != "DIS":
raise ValueError(
"get_structured_faceflows method "
"is only for structured DIS grids"
)
ia, ja = grb.ia, grb.ja
else:
if ia is None or ja is None:
raise ValueError(
"ia and ja arrays must be specified if the MODFLOW 6"
"binary grid file name is not specified."
)
# flatten flowja, if necessary
if len(flowja.shape) > 0:
flowja = flowja.flatten()
# evaluate size of flowja relative to ja
__check_flowja_size(flowja, ja)
# create face flow arrays
shape = (grb.nlay, grb.nrow, grb.ncol)
frf = np.zeros(shape, dtype=float).flatten()
fff = np.zeros(shape, dtype=float).flatten()
flf = np.zeros(shape, dtype=float).flatten()
# fill flow terms
vmult = [-1.0, -1.0, -1.0]
flows = [frf, fff, flf]
for n in range(grb.nodes):
i0, i1 = ia[n] + 1, ia[n + 1]
ipos = 0
for j in range(i0, i1):
jcol = ja[j]
if jcol > n:
flows[ipos][n] = vmult[ipos] * flowja[j]
ipos += 1
# reshape flow terms
frf = frf.reshape(shape)
fff = fff.reshape(shape)
flf = flf.reshape(shape)
return frf, fff, flf
def get_residuals(
flowja, grb_file=None, ia=None, ja=None, shape=None, verbose=False
):
"""
Get the residual from the MODFLOW 6 flowja flows. The residual is stored
in the diagonal position of the flowja vector.
Parameters
----------
flowja : ndarray
flowja array for a structured MODFLOW 6 model
grbfile : str
MODFLOW 6 binary grid file path
ia : list or ndarray
CRS row pointers. Only required if grb_file is not provided.
ja : list or ndarray
CRS column pointers. Only required if grb_file is not provided.
shape : tuple
shape of returned residual. A flat array is returned if shape is None
and grbfile is None.
verbose: bool
Write information to standard output
Returns
-------
residual : ndarray
Residual for each cell
"""
if grb_file is not None:
grb = MfGrdFile(grb_file, verbose=verbose)
shape = grb.shape
ia, ja = grb.ia, grb.ja
else:
if ia is None or ja is None:
raise ValueError(
"ia and ja arrays must be specified if the MODFLOW 6 "
"binary grid file name is not specified."
)
# flatten flowja, if necessary
if len(flowja.shape) > 0:
flowja = flowja.flatten()
# evaluate size of flowja relative to ja
__check_flowja_size(flowja, ja)
# create residual
nodes = grb.nodes
residual = np.zeros(nodes, dtype=float)
# fill flow terms
for n in range(nodes):
i0, i1 = ia[n], ia[n + 1]
if i0 < i1:
residual[n] = flowja[i0]
else:
residual[n] = np.nan
# reshape residual terms
if shape is not None:
residual = residual.reshape(shape)
return residual
# internal
def __check_flowja_size(flowja, ja):
"""
Check the shape of flowja relative to ja.
"""
if flowja.shape != ja.shape:
raise ValueError(
f"size of flowja ({flowja.shape}) not equal to {ja.shape}"
)
| [
"numpy.zeros"
] | [((3865, 3893), 'numpy.zeros', 'np.zeros', (['nodes'], {'dtype': 'float'}), '(nodes, dtype=float)\n', (3873, 3893), True, 'import numpy as np\n'), ((1796, 1824), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'float'}), '(shape, dtype=float)\n', (1804, 1824), True, 'import numpy as np\n'), ((1845, 1873), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'float'}), '(shape, dtype=float)\n', (1853, 1873), True, 'import numpy as np\n'), ((1894, 1922), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'float'}), '(shape, dtype=float)\n', (1902, 1922), True, 'import numpy as np\n')] |
#!/usr/bin/python3
''' Summary: Script to process images and update the database '''
import datetime
import os
import sqlite3
from pathlib import Path
from sqlite3 import Error
import cv2
import numpy as np
from numpy import array
from shapely.geometry import Polygon, asPoint
import mrcnn.config
from mrcnn.model import MaskRCNN
DB_PATH = os.path.join(os.path.dirname(__file__), 'db/park.db')
try:
# Open database connection
conn = sqlite3.connect(DB_PATH)
conn.row_factory = sqlite3.Row
# prepare a cursor object using cursor() method
cursor = conn.cursor()
except Error as ex:
print("Error in connection: {}".format(ex))
exit()
# Configuration that will be used by the Mask-RCNN library
class MaskRCNNConfig(mrcnn.config.Config):
NAME = "coco_pretrained_model_config"
IMAGES_PER_GPU = 1
GPU_COUNT = 1
NUM_CLASSES = 1 + 80 # COCO dataset has 80 classes + one background class
DETECTION_MIN_CONFIDENCE = 0.5
# Filter a list of Mask R-CNN detection results to get only the detected cars / trucks
def get_car_boxes(boxes, class_ids):
car_boxes = []
for i, box in enumerate(boxes):
# If the detected object isn't a car / truck, skip it
if class_ids[i] in [3, 8, 6]:
car_boxes.append(box)
return np.array(car_boxes)
def query_database(sql):
with conn:
# execute SQL query using execute() method.
cursor.execute(sql)
# Commit on CREATE, INSERT, UPDATE, and DELETE
if sql.lower().startswith("select") == False:
conn.commit
return cursor
# Root directory of the project
ROOT_DIR = Path(__file__).resolve().parent
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# Local path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Download COCO trained weights from Releases if needed
if not os.path.exists(COCO_MODEL_PATH):
print("Missing the mask_rcnn_coco.h5 dataset! Downloading now...")
mrcnn.utils.download_trained_weights(COCO_MODEL_PATH)
# Get database version
db_vers = query_database("SELECT sqlite_version();").fetchone()
print("Connected. Database version: {}".format(db_vers[0]))
# Get source data
source = query_database("SELECT * FROM Source").fetchall()
if len(source) == 0:
print("No feeds found! Exiting now...")
exit()
else:
# Create a Mask-RCNN model in inference mode
model = MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=MaskRCNNConfig())
# Load pre-trained model
model.load_weights(COCO_MODEL_PATH, by_name=True)
# Get UTC time before loop
local_timezone = datetime.datetime.now(datetime.timezone.utc).astimezone().tzinfo
timestamp = datetime.datetime.now(local_timezone).strftime("%Y-%m-%d %H:%M:%S %Z")
smalltimestamp = datetime.datetime.now(local_timezone).strftime("%Y%m%d")
def main():
for s in source:
if s['Active'] == True:
# Source password decryption code would go here
# Video file or camera feed to process
FRAME_SOURCE = s['URI']
# Load the source we want to run detection on
video_capture = cv2.VideoCapture(FRAME_SOURCE)
success, frame = video_capture.read()
if success:
# Clone image instead of using original
frame_copy = frame.copy()
# Convert the image from BGR color (which OpenCV uses) to RGB color
rgb_image = frame_copy[:, :, ::-1]
print("Starting Mask R-CNN segmentation and detection...")
# Run the image through the Mask R-CNN model to get results.
results = model.detect([rgb_image], verbose=0)
# Mask R-CNN assumes we are running detection on multiple images.
# We only passed in one image to detect, so only grab the first result.
r = results[0]
# The r variable will now have the results of detection:
# - r['rois'] are the bounding box of each detected object
# - r['class_ids'] are the class id (type) of each detected object
# - r['scores'] are the confidence scores for each detection
# - r['masks'] are the object masks for each detected object (which gives you the object outline)
print("Starting vehicle localization...")
# Filter the results to only grab the car / truck bounding boxes
car_boxes = get_car_boxes(r['rois'], r['class_ids'])
# Get zone data
sql = "SELECT Zone.*, Type.Description FROM Zone JOIN Type USING(TypeID) WHERE SourceID = {}".format(s['SourceID'])
zone = query_database(sql).fetchall()
if len(zone) == 0:
print("There are no zones defined for this source!")
break
print("Cars found in frame: {}".format(len(car_boxes)))
print("Counting vehicles in zones...")
for z in zone:
# Convert string representation of list to list
poly_coords = eval(z['PolyCoords'])
# Hold count of cars in zone
count = 0
# Draw each box on the frame
for box in car_boxes:
y1, x1, y2, x2 = box
if(((Polygon([(x1, y1), (x2, y1), (x1, y2), (x2, y2)])).centroid).intersects(Polygon(asPoint(array(poly_coords))))):
# Display the box coordinates in the console
print("Car: ", box)
# Count cars in zone
count += 1
# Delete the car to avoid double counting
np.delete(car_boxes, box)
# Make sure the number counted is not more than the number of spaces
count = count if count <= z['TotalSpaces'] else z['TotalSpaces']
print("Total cars in zone {} ({}): {}.".format(z['ZoneID'], z['Description'], count))
# Insert count into database
sql = "INSERT INTO OccupancyLog (ZoneID, LotID, TypeID, Timestamp, OccupiedSpaces, TotalSpaces) VALUES ({}, {}, {}, {}, {}, {})".format(z['ZoneID'], z['LotID'], z['TypeID'], "'{}'".format(timestamp), count, z['TotalSpaces'])
query_database(sql)
print("Database updated...")
# Clean up everything when finished
video_capture.release()
cv2.destroyAllWindows()
else:
print("Cannot access source {} vic {}!".format(s['SourceID'], s['Location']))
cursor.close()
conn.close()
print("Job complete. Have an excellent day.")
if __name__ == '__main__':
main()
| [
"os.path.exists",
"sqlite3.connect",
"pathlib.Path",
"numpy.delete",
"os.path.join",
"os.path.dirname",
"numpy.array",
"datetime.datetime.now",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"shapely.geometry.Polygon"
] | [((1720, 1750), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""logs"""'], {}), "(ROOT_DIR, 'logs')\n", (1732, 1750), False, 'import os\n'), ((1807, 1850), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""mask_rcnn_coco.h5"""'], {}), "(ROOT_DIR, 'mask_rcnn_coco.h5')\n", (1819, 1850), False, 'import os\n'), ((356, 381), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (371, 381), False, 'import os\n'), ((445, 469), 'sqlite3.connect', 'sqlite3.connect', (['DB_PATH'], {}), '(DB_PATH)\n', (460, 469), False, 'import sqlite3\n'), ((1291, 1310), 'numpy.array', 'np.array', (['car_boxes'], {}), '(car_boxes)\n', (1299, 1310), True, 'import numpy as np\n'), ((1915, 1946), 'os.path.exists', 'os.path.exists', (['COCO_MODEL_PATH'], {}), '(COCO_MODEL_PATH)\n', (1929, 1946), False, 'import os\n'), ((1632, 1646), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1636, 1646), False, 'from pathlib import Path\n'), ((2737, 2774), 'datetime.datetime.now', 'datetime.datetime.now', (['local_timezone'], {}), '(local_timezone)\n', (2758, 2774), False, 'import datetime\n'), ((2829, 2866), 'datetime.datetime.now', 'datetime.datetime.now', (['local_timezone'], {}), '(local_timezone)\n', (2850, 2866), False, 'import datetime\n'), ((2656, 2700), 'datetime.datetime.now', 'datetime.datetime.now', (['datetime.timezone.utc'], {}), '(datetime.timezone.utc)\n', (2677, 2700), False, 'import datetime\n'), ((3220, 3250), 'cv2.VideoCapture', 'cv2.VideoCapture', (['FRAME_SOURCE'], {}), '(FRAME_SOURCE)\n', (3236, 3250), False, 'import cv2\n'), ((6924, 6947), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (6945, 6947), False, 'import cv2\n'), ((6088, 6113), 'numpy.delete', 'np.delete', (['car_boxes', 'box'], {}), '(car_boxes, box)\n', (6097, 6113), True, 'import numpy as np\n'), ((5645, 5694), 'shapely.geometry.Polygon', 'Polygon', (['[(x1, y1), (x2, y1), (x1, y2), (x2, y2)]'], {}), '([(x1, y1), (x2, y1), (x1, y2), (x2, y2)])\n', (5652, 5694), False, 'from shapely.geometry import Polygon, asPoint\n'), ((5733, 5751), 'numpy.array', 'array', (['poly_coords'], {}), '(poly_coords)\n', (5738, 5751), False, 'from numpy import array\n')] |
"""
Python implementation of the LiNGAM algorithms.
The LiNGAM Project: https://sites.google.com/site/sshimizu06/lingam
"""
import numpy as np
from scipy.stats import gamma
from statsmodels.nonparametric import bandwidths
__all__ = ['get_kernel_width', 'get_gram_matrix', 'hsic_teststat', 'hsic_test_gamma']
def get_kernel_width(X):
"""Calculate the bandwidth to median distance between points.
Use at most 100 points (since median is only a heuristic,
and 100 points is sufficient for a robust estimate).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where ``n_samples`` is the number of samples
and ``n_features`` is the number of features.
Returns
-------
float
The bandwidth parameter.
"""
n_samples = X.shape[0]
if n_samples > 100:
X_med = X[:100, :]
n_samples = 100
else:
X_med = X
G = np.sum(X_med * X_med, 1).reshape(n_samples, 1)
Q = np.tile(G, (1, n_samples))
R = np.tile(G.T, (n_samples, 1))
dists = Q + R - 2 * np.dot(X_med, X_med.T)
dists = dists - np.tril(dists)
dists = dists.reshape(n_samples ** 2, 1)
return np.sqrt(0.5 * np.median(dists[dists > 0]))
def _rbf_dot(X, Y, width):
"""Compute the inner product of radial basis functions."""
n_samples_X = X.shape[0]
n_samples_Y = Y.shape[0]
G = np.sum(X * X, 1).reshape(n_samples_X, 1)
H = np.sum(Y * Y, 1).reshape(n_samples_Y, 1)
Q = np.tile(G, (1, n_samples_Y))
R = np.tile(H.T, (n_samples_X, 1))
H = Q + R - 2 * np.dot(X, Y.T)
return np.exp(-H / 2 / (width ** 2))
def get_gram_matrix(X, width):
"""Get the centered gram matrices.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where ``n_samples`` is the number of samples
and ``n_features`` is the number of features.
width : float
The bandwidth parameter.
Returns
-------
K, Kc : array
the centered gram matrices.
"""
n = X.shape[0]
H = np.eye(n) - 1 / n * np.ones((n, n))
K = _rbf_dot(X, X, width)
Kc = np.dot(np.dot(H, K), H)
return K, Kc
def hsic_teststat(Kc, Lc, n):
"""get the HSIC statistic.
Parameters
----------
K, Kc : array
the centered gram matrices.
n : float
the number of samples.
Returns
-------
float
the HSIC statistic.
"""
# test statistic m*HSICb under H1
return 1 / n * np.sum(np.sum(Kc.T * Lc))
def hsic_test_gamma(X, Y, bw_method='mdbs'):
"""get the HSIC statistic.
Parameters
----------
X, Y : array-like, shape (n_samples, n_features)
Training data, where ``n_samples`` is the number of samples
and ``n_features`` is the number of features.
bw_method : str, optional (default=``mdbs``)
The method used to calculate the bandwidth of the HSIC.
* ``mdbs`` : Median distance between samples.
* ``scott`` : Scott's Rule of Thumb.
* ``silverman`` : Silverman's Rule of Thumb.
Returns
-------
test_stat : float
the HSIC statistic.
p : float
the HSIC p-value.
"""
X = X.reshape(-1, 1) if X.ndim == 1 else X
Y = Y.reshape(-1, 1) if Y.ndim == 1 else Y
if bw_method == 'scott':
width_x = bandwidths.bw_scott(X)
width_y = bandwidths.bw_scott(Y)
elif bw_method == 'silverman':
width_x = bandwidths.bw_silverman(X)
width_y = bandwidths.bw_silverman(Y)
# Get kernel width to median distance between points
else:
width_x = get_kernel_width(X)
width_y = get_kernel_width(Y)
# these are slightly biased estimates of centered gram matrices
K, Kc = get_gram_matrix(X, width_x)
L, Lc = get_gram_matrix(Y, width_y)
# test statistic m*HSICb under H1
n = X.shape[0]
bone = np.ones((n, 1))
test_stat = hsic_teststat(Kc, Lc, n)
var = (1 / 6 * Kc * Lc) ** 2
# second subtracted term is bias correction
var = 1 / n / (n - 1) * (np.sum(np.sum(var)) - np.sum(np.diag(var)))
# variance under H0
var = 72 * (n - 4) * (n - 5) / n / (n - 1) / (n - 2) / (n - 3) * var
K = K - np.diag(np.diag(K))
L = L - np.diag(np.diag(L))
mu_X = 1 / n / (n - 1) * np.dot(bone.T, np.dot(K, bone))
mu_Y = 1 / n / (n - 1) * np.dot(bone.T, np.dot(L, bone))
# mean under H0
mean = 1 / n * (1 + mu_X * mu_Y - mu_X - mu_Y)
alpha = mean ** 2 / var
# threshold for hsicArr*m
beta = np.dot(var, n) / mean
p = 1 - gamma.cdf(test_stat, alpha, scale=beta)[0][0]
return test_stat, p
| [
"numpy.tile",
"numpy.eye",
"numpy.median",
"scipy.stats.gamma.cdf",
"numpy.ones",
"statsmodels.nonparametric.bandwidths.bw_silverman",
"numpy.diag",
"numpy.exp",
"numpy.sum",
"numpy.dot",
"numpy.tril",
"statsmodels.nonparametric.bandwidths.bw_scott"
] | [((995, 1021), 'numpy.tile', 'np.tile', (['G', '(1, n_samples)'], {}), '(G, (1, n_samples))\n', (1002, 1021), True, 'import numpy as np\n'), ((1030, 1058), 'numpy.tile', 'np.tile', (['G.T', '(n_samples, 1)'], {}), '(G.T, (n_samples, 1))\n', (1037, 1058), True, 'import numpy as np\n'), ((1499, 1527), 'numpy.tile', 'np.tile', (['G', '(1, n_samples_Y)'], {}), '(G, (1, n_samples_Y))\n', (1506, 1527), True, 'import numpy as np\n'), ((1536, 1566), 'numpy.tile', 'np.tile', (['H.T', '(n_samples_X, 1)'], {}), '(H.T, (n_samples_X, 1))\n', (1543, 1566), True, 'import numpy as np\n'), ((1614, 1641), 'numpy.exp', 'np.exp', (['(-H / 2 / width ** 2)'], {}), '(-H / 2 / width ** 2)\n', (1620, 1641), True, 'import numpy as np\n'), ((3918, 3933), 'numpy.ones', 'np.ones', (['(n, 1)'], {}), '((n, 1))\n', (3925, 3933), True, 'import numpy as np\n'), ((1127, 1141), 'numpy.tril', 'np.tril', (['dists'], {}), '(dists)\n', (1134, 1141), True, 'import numpy as np\n'), ((2085, 2094), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (2091, 2094), True, 'import numpy as np\n'), ((2168, 2180), 'numpy.dot', 'np.dot', (['H', 'K'], {}), '(H, K)\n', (2174, 2180), True, 'import numpy as np\n'), ((3368, 3390), 'statsmodels.nonparametric.bandwidths.bw_scott', 'bandwidths.bw_scott', (['X'], {}), '(X)\n', (3387, 3390), False, 'from statsmodels.nonparametric import bandwidths\n'), ((3409, 3431), 'statsmodels.nonparametric.bandwidths.bw_scott', 'bandwidths.bw_scott', (['Y'], {}), '(Y)\n', (3428, 3431), False, 'from statsmodels.nonparametric import bandwidths\n'), ((4555, 4569), 'numpy.dot', 'np.dot', (['var', 'n'], {}), '(var, n)\n', (4561, 4569), True, 'import numpy as np\n'), ((940, 964), 'numpy.sum', 'np.sum', (['(X_med * X_med)', '(1)'], {}), '(X_med * X_med, 1)\n', (946, 964), True, 'import numpy as np\n'), ((1084, 1106), 'numpy.dot', 'np.dot', (['X_med', 'X_med.T'], {}), '(X_med, X_med.T)\n', (1090, 1106), True, 'import numpy as np\n'), ((1213, 1240), 'numpy.median', 'np.median', (['dists[dists > 0]'], {}), '(dists[dists > 0])\n', (1222, 1240), True, 'import numpy as np\n'), ((1401, 1417), 'numpy.sum', 'np.sum', (['(X * X)', '(1)'], {}), '(X * X, 1)\n', (1407, 1417), True, 'import numpy as np\n'), ((1450, 1466), 'numpy.sum', 'np.sum', (['(Y * Y)', '(1)'], {}), '(Y * Y, 1)\n', (1456, 1466), True, 'import numpy as np\n'), ((1587, 1601), 'numpy.dot', 'np.dot', (['X', 'Y.T'], {}), '(X, Y.T)\n', (1593, 1601), True, 'import numpy as np\n'), ((2105, 2120), 'numpy.ones', 'np.ones', (['(n, n)'], {}), '((n, n))\n', (2112, 2120), True, 'import numpy as np\n'), ((2532, 2549), 'numpy.sum', 'np.sum', (['(Kc.T * Lc)'], {}), '(Kc.T * Lc)\n', (2538, 2549), True, 'import numpy as np\n'), ((3485, 3511), 'statsmodels.nonparametric.bandwidths.bw_silverman', 'bandwidths.bw_silverman', (['X'], {}), '(X)\n', (3508, 3511), False, 'from statsmodels.nonparametric import bandwidths\n'), ((3530, 3556), 'statsmodels.nonparametric.bandwidths.bw_silverman', 'bandwidths.bw_silverman', (['Y'], {}), '(Y)\n', (3553, 3556), False, 'from statsmodels.nonparametric import bandwidths\n'), ((4248, 4258), 'numpy.diag', 'np.diag', (['K'], {}), '(K)\n', (4255, 4258), True, 'import numpy as np\n'), ((4280, 4290), 'numpy.diag', 'np.diag', (['L'], {}), '(L)\n', (4287, 4290), True, 'import numpy as np\n'), ((4336, 4351), 'numpy.dot', 'np.dot', (['K', 'bone'], {}), '(K, bone)\n', (4342, 4351), True, 'import numpy as np\n'), ((4397, 4412), 'numpy.dot', 'np.dot', (['L', 'bone'], {}), '(L, bone)\n', (4403, 4412), True, 'import numpy as np\n'), ((4093, 4104), 'numpy.sum', 'np.sum', (['var'], {}), '(var)\n', (4099, 4104), True, 'import numpy as np\n'), ((4115, 4127), 'numpy.diag', 'np.diag', (['var'], {}), '(var)\n', (4122, 4127), True, 'import numpy as np\n'), ((4589, 4628), 'scipy.stats.gamma.cdf', 'gamma.cdf', (['test_stat', 'alpha'], {'scale': 'beta'}), '(test_stat, alpha, scale=beta)\n', (4598, 4628), False, 'from scipy.stats import gamma\n')] |
from __future__ import print_function
import sys
import numpy
import pytest
import struct
from stl import mesh
_STL_FILE = '''
solid test.stl
facet normal -0.014565 0.073223 -0.002897
outer loop
vertex 0.399344 0.461940 1.044090
vertex 0.500000 0.500000 1.500000
vertex 0.576120 0.500000 1.117320
endloop
endfacet
endsolid test.stl
'''.lstrip()
def test_valid_ascii(tmpdir, speedups):
tmp_file = tmpdir.join('tmp.stl')
with tmp_file.open('w+') as fh:
fh.write(_STL_FILE)
fh.seek(0)
mesh.Mesh.from_file(str(tmp_file), fh=fh, speedups=speedups)
def test_ascii_with_missing_name(tmpdir, speedups):
tmp_file = tmpdir.join('tmp.stl')
with tmp_file.open('w+') as fh:
# Split the file into lines
lines = _STL_FILE.splitlines()
# Remove everything except solid
lines[0] = lines[0].split()[0]
# Join the lines to test files that start with solid without space
fh.write('\n'.join(lines))
fh.seek(0)
mesh.Mesh.from_file(str(tmp_file), fh=fh, speedups=speedups)
def test_ascii_with_blank_lines(tmpdir, speedups):
_stl_file = '''
solid test.stl
facet normal -0.014565 0.073223 -0.002897
outer loop
vertex 0.399344 0.461940 1.044090
vertex 0.500000 0.500000 1.500000
vertex 0.576120 0.500000 1.117320
endloop
endfacet
endsolid test.stl
'''.lstrip()
tmp_file = tmpdir.join('tmp.stl')
with tmp_file.open('w+') as fh:
fh.write(_stl_file)
fh.seek(0)
mesh.Mesh.from_file(str(tmp_file), fh=fh, speedups=speedups)
def test_incomplete_ascii_file(tmpdir, speedups):
tmp_file = tmpdir.join('tmp.stl')
with tmp_file.open('w+') as fh:
fh.write('solid some_file.stl')
fh.seek(0)
with pytest.raises(AssertionError):
mesh.Mesh.from_file(str(tmp_file), fh=fh, speedups=speedups)
for offset in (-20, 82, 100):
with tmp_file.open('w+') as fh:
fh.write(_STL_FILE[:-offset])
fh.seek(0)
with pytest.raises(AssertionError):
mesh.Mesh.from_file(str(tmp_file), fh=fh, speedups=speedups)
def test_corrupt_ascii_file(tmpdir, speedups):
tmp_file = tmpdir.join('tmp.stl')
with tmp_file.open('w+') as fh:
fh.write(_STL_FILE)
fh.seek(40)
print('####\n' * 100, file=fh)
fh.seek(0)
if speedups and sys.version_info.major != 2:
with pytest.raises(AssertionError):
mesh.Mesh.from_file(str(tmp_file), fh=fh, speedups=speedups)
with tmp_file.open('w+') as fh:
fh.write(_STL_FILE)
fh.seek(40)
print(' ' * 100, file=fh)
fh.seek(80)
fh.write(struct.pack('<i', 10).decode('utf-8'))
fh.seek(0)
with pytest.raises(AssertionError):
mesh.Mesh.from_file(str(tmp_file), fh=fh, speedups=speedups)
def test_corrupt_binary_file(tmpdir, speedups):
tmp_file = tmpdir.join('tmp.stl')
with tmp_file.open('w+') as fh:
fh.write('#########\n' * 8)
fh.write('#\0\0\0')
fh.seek(0)
mesh.Mesh.from_file(str(tmp_file), fh=fh, speedups=speedups)
with tmp_file.open('w+') as fh:
fh.write('#########\n' * 9)
fh.seek(0)
with pytest.raises(AssertionError):
mesh.Mesh.from_file(str(tmp_file), fh=fh, speedups=speedups)
with tmp_file.open('w+') as fh:
fh.write('#########\n' * 8)
fh.write('#\0\0\0')
fh.seek(0)
fh.write('solid test.stl')
fh.seek(0)
mesh.Mesh.from_file(str(tmp_file), fh=fh, speedups=speedups)
def test_duplicate_polygons():
data = numpy.zeros(3, dtype=mesh.Mesh.dtype)
data['vectors'][0] = numpy.array([[0, 0, 0],
[1, 0, 0],
[0, 1, 1.]])
data['vectors'][0] = numpy.array([[0, 0, 0],
[2, 0, 0],
[0, 2, 1.]])
data['vectors'][0] = numpy.array([[0, 0, 0],
[3, 0, 0],
[0, 3, 1.]])
assert not mesh.Mesh(data, remove_empty_areas=False).check()
| [
"struct.pack",
"numpy.array",
"numpy.zeros",
"pytest.raises",
"stl.mesh.Mesh"
] | [((3715, 3752), 'numpy.zeros', 'numpy.zeros', (['(3)'], {'dtype': 'mesh.Mesh.dtype'}), '(3, dtype=mesh.Mesh.dtype)\n', (3726, 3752), False, 'import numpy\n'), ((3778, 3826), 'numpy.array', 'numpy.array', (['[[0, 0, 0], [1, 0, 0], [0, 1, 1.0]]'], {}), '([[0, 0, 0], [1, 0, 0], [0, 1, 1.0]])\n', (3789, 3826), False, 'import numpy\n'), ((3927, 3975), 'numpy.array', 'numpy.array', (['[[0, 0, 0], [2, 0, 0], [0, 2, 1.0]]'], {}), '([[0, 0, 0], [2, 0, 0], [0, 2, 1.0]])\n', (3938, 3975), False, 'import numpy\n'), ((4076, 4124), 'numpy.array', 'numpy.array', (['[[0, 0, 0], [3, 0, 0], [0, 3, 1.0]]'], {}), '([[0, 0, 0], [3, 0, 0], [0, 3, 1.0]])\n', (4087, 4124), False, 'import numpy\n'), ((1836, 1865), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (1849, 1865), False, 'import pytest\n'), ((2839, 2868), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (2852, 2868), False, 'import pytest\n'), ((3324, 3353), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (3337, 3353), False, 'import pytest\n'), ((2097, 2126), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (2110, 2126), False, 'import pytest\n'), ((2504, 2533), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (2517, 2533), False, 'import pytest\n'), ((4216, 4257), 'stl.mesh.Mesh', 'mesh.Mesh', (['data'], {'remove_empty_areas': '(False)'}), '(data, remove_empty_areas=False)\n', (4225, 4257), False, 'from stl import mesh\n'), ((2768, 2789), 'struct.pack', 'struct.pack', (['"""<i"""', '(10)'], {}), "('<i', 10)\n", (2779, 2789), False, 'import struct\n')] |
from mri_modules.utils import *
import os
import numpy as np
import cv2
import shutil
from skimage.measure import marching_cubes_lewiner as marching_cubes
import stl
from stl import mesh
import tensorflow as tf
from tensorflow.keras.models import load_model
import skimage.transform
import nibabel as nib
import h5py
import scipy
from mri_modules.load_in_arrays import *
import time
import random
import tensorflow.keras as keras
from PIL import Image
def binarize(array, min_):
binary = array.copy()
binary[array < min_] = 0
binary[array >= min_] = 1
return binary
def dilate_up(array, size, stacked = True):
if stacked:
binary = np.squeeze(array.copy()[0], axis = 3)
else:
binary = array.copy()
binary[binary > 0] = 1
kernel = scipy.ndimage.generate_binary_structure(3, 1)
blew_up = scipy.ndimage.binary_dilation(binary.astype('uint8'), kernel, iterations=size)
if stacked:
return np.stack([np.stack([blew_up], axis = 3)])
else:
return blew_up
def translate_3d(array, translation):
original_array = array.copy()
array_translated = array.copy()
array_translated[:] = 0
for z,Slice in enumerate(original_array):
for y,line in enumerate(Slice):
for x,pixel in enumerate(line):
try:
array_translated[z+translation[0]][y+translation[1]][x+translation[2]] = pixel
except:
pass
return array_translated
def touching_island(reference, array, stacked = True):
if stacked:
array = np.squeeze(array.copy()[0], axis = 3)
array[array > 0] = 1
reference = np.squeeze(reference.copy()[0], axis = 3)
reference[reference > 0] = 1
else:
array[array > 0] = 1
reference[reference > 0] = 1
masked = array.copy()
masked[:] = 0
touching_structure_3d =[[[0,0,0],
[0,1,0],
[0,0,0]],
[[0,1,0],
[1,1,1],
[0,1,0]],
[[0,0,0],
[0,1,0],
[0,0,0]]]
markers, num_features = scipy.ndimage.measurements.label(array, touching_structure_3d)
reference_idx = np.unique(markers[reference == 1])
for idx in reference_idx:
masked[markers == idx] = 1
masked[array == 0] = 0
if stacked:
return np.stack([np.stack([masked], axis = 3)])
else:
return masked
def biggest_island(input_array, stacked = True):
if stacked:
masked = np.squeeze(input_array.copy()[0], axis = 3)
binary = np.squeeze(input_array.copy()[0], axis = 3)
binary[:] = 0
binary[np.squeeze(input_array[0], axis = 3) > 0] = 1
else:
masked = input_array.copy()
binary = input_array.copy()
binary[:] = 0
binary[input_array > 0] = 1
touching_structure_3d =[[[0,0,0],
[0,1,0],
[0,0,0]],
[[0,1,0],
[1,1,1],
[0,1,0]],
[[0,0,0],
[0,1,0],
[0,0,0]]]
markers,_ = scipy.ndimage.measurements.label(binary,touching_structure_3d)
markers[binary == 0] = 0
counts = np.bincount(markers.ravel())
counts[0] = 0
noise_idx = np.where(counts != np.max(counts))
noise = np.isin(markers, noise_idx)
binary[noise] = 0
masked[binary == 0] = 0
if stacked:
return np.stack([np.stack([masked], axis = 3)])
else:
return masked
def combine_zeros(arrays):
combined = arrays[0].copy()
for array in arrays:
combined[array < 0.1] = 0
return combined
def adaptive_threshold(array, course, precise, blur_precision = 0, stacked = True):
if stacked:
thresholded_array = np.squeeze(array.copy()[0], axis = 3)
thresholded_array = thresholded_array*255
thresholded_array[thresholded_array > 255] = 255
else:
thresholded_array = array.copy()
thresholded_array = thresholded_array*255
thresholded_array[thresholded_array > 255] = 255
blurred = scipy.ndimage.gaussian_filter(thresholded_array, blur_precision)
adap = []
for image in blurred:
thresh = cv2.adaptiveThreshold(image.astype('uint8'), 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, course, 2)
thresh2 = cv2.adaptiveThreshold(image.astype('uint8'), 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, precise, 2)
thresh3 = thresh.copy()
thresh3[:] = 255
thresh3[thresh2 == 0] = 0
thresh3[thresh == 0] = 0
adap.append(thresh3)
adap = np.stack(adap)
thresholded_array[adap == 0] = 0
if stacked:
return np.stack([np.stack([thresholded_array/255], axis = 3)])
else:
return thresholded_array/255
def generate_stl(array_3d, stl_file_path, stl_resolution):
array = array_3d.copy()
verts, faces, norm, val = marching_cubes(array, 0.01, step_size = stl_resolution, allow_degenerate=True)
mesh = stl.mesh.Mesh(np.zeros(faces.shape[0], dtype=stl.mesh.Mesh.dtype))
for i, f in enumerate(faces):
for j in range(3):
mesh.vectors[i][j] = verts[f[j],:]
if not stl_file_path.endswith(".stl"):
stl_file_path += ".stl"
if not os.path.exists(os.path.dirname(stl_file_path)):
os.makedirs(os.path.dirname(stl_file_path))
mesh.save(stl_file_path)
def find_median_grayscale(array):
zero_pixels = float(np.count_nonzero(array==0))
single_dimensional = array.flatten().tolist()
single_dimensional.extend(np.full((1, int(zero_pixels)), 1000).flatten().tolist())
return np.median(single_dimensional)
def locate_bounds(array, stacked = True):
if stacked:
left = np.squeeze(array.copy()[0], axis = 3).shape[2]
right = 0
low = np.squeeze(array.copy()[0], axis = 3).shape[1]
high = 0
shallow = np.squeeze(array.copy()[0], axis = 3).shape[0]
deep = 0
array_3d = np.squeeze(array.copy()[0], axis = 3)
else:
left = array.copy().shape[2]
right = 0
low = array.copy().shape[1]
high = 0
shallow = array.copy().shape[0]
deep = 0
array_3d = array.copy()
for z,Slice in enumerate(array_3d):
for y,line in enumerate(Slice):
for x,pixel in enumerate(line):
if pixel > 0:
if z > deep:
deep = z
if z < shallow:
shallow = z
if y > high:
high = y
if y < low:
low = y
if x > right:
right = x
if x < left:
left = x
return [left,right,low,high,shallow,deep]
def pad(array):
padded = []
for image in array:
padded.append(image)
padded.append(np.zeros((array.shape[1],array.shape[2])))
padded.append(np.zeros((array.shape[1],array.shape[2])))
final = translate_3d(np.stack(padded), [1,1,1])
return final
def write_images(array, test_folder_path):
if not os.path.exists(test_folder_path):
os.makedirs(test_folder_path)
for n,image in enumerate(array):
file_name = str(str(n) +'.png')
cv2.imwrite(os.path.join(test_folder_path, file_name), image*255)
def circle_highlighted(reference, binary, color):
circled = reference.copy()
binary = binary.copy()
binary[binary > 0] = 1
for n, image in enumerate(binary):
contours, _ = cv2.findContours(image.astype('uint8'),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(circled[n], contours, -1,color, 1)
return circled
def get_folder_path(path, index, img_id):
pathes = []
for path, dirs, files in os.walk(path, topdown=False):
for dir_ in dirs:
if img_id in dir_.lower():
pathes.append(os.path.join(path, dir_))
return pathes[index]
class highlight_ct:
def __init__(self, input_path):
self.input_path = input_path
self.file_names = os.listdir(input_path)
def load_scan(self):
##loads and sorts the data in as a dcm type array
raw_data = [dicom.read_file(self.input_path + '/' + s) for s in os.listdir(self.input_path) if s.endswith(".dcm")]
raw_data.sort(key = lambda x: int(x.InstanceNumber))
##sets the slice thickness
try:
slice_thickness = np.abs(raw_data[0].ImagePositionPatient[2] - raw_data[1].ImagePositionPatient[2])
except:
slice_thickness = np.abs(raw_data[0].SliceLocation - raw_data[1].SliceLocation)
for s in raw_data:
s.SliceThickness = slice_thickness
self.raw_data = raw_data ##update the output
def generate_pixel_data(self):
## creates a 3d array of pixel data from the raw_data
unprocessed_pixel_data = np.stack([s.pixel_array for s in self.raw_data])
#unprocessed_pixel_data = (np.maximum(unprocessed_pixel_data,0) / unprocessed_pixel_data.max()) * 255.0
self.original_pixel_array = unprocessed_pixel_data ##update the output
return self.original_pixel_array
def resample_array(self):
##resamples the array using the slice thickness obtained earlier
new_spacing=[1,1,1]
spacing = map(float, ([self.raw_data[0].SliceThickness, self.raw_data[0].PixelSpacing[0], self.raw_data[0].PixelSpacing[1]]))
spacing = np.array(list(spacing))
resize_factor = spacing / new_spacing
new_real_shape = self.original_pixel_array.shape * resize_factor
new_shape = np.round(new_real_shape)
real_resize_factor = new_shape / self.original_pixel_array.shape
self.resize_factor = real_resize_factor ##creates a value called resize factor that can be used for image outputting later on
self.resampled_array = scipy.ndimage.interpolation.zoom(self.original_pixel_array, real_resize_factor) ##update the output
return self.resampled_array
def circle_highlighted(self, array, color):
circled = self.resampled_array.copy()
binary = array.copy()
binary[:] = 0
binary[array > 0] = 255
cont = []
for n, image in enumerate(binary):
contours, _ = cv2.findContours(image.astype('uint8'),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
#cont.append(contours)
cv2.drawContours(circled[n], contours, -1,color, 1)
circled[binary == 0] = 0
return circled
def write_test_images(self, array_3d, test_folder_path):
array_3d = array_3d/np.max(array_3d)
print(np.max(array_3d))
for n,image in enumerate(array_3d):
##finds the index of the corresponding file name in the original input path from the resize factor after resampling
file_name = str(str(n) +'.png')
##writes the resulting image as a png in the test_folder_path
cv2.imwrite(os.path.join(test_folder_path, file_name), image*255)
def generate_stl(self, array_3d, stl_file_path, name, stl_resolution):
print('Generating mesh...')
##transposes the image to be the correct shape because np arrays are technically flipped
transposed = np.squeeze(array_3d.copy()[0], axis = 3)
##uses the marching cubes algorithm to make a list of vertices, faces, normals, and values
verts, faces, norm, val = marching_cubes(transposed, 0.01, step_size = stl_resolution, allow_degenerate=True)
mesh = stl.mesh.Mesh(np.zeros(faces.shape[0], dtype=stl.mesh.Mesh.dtype))
print('Vertices obatined:', len(verts))
print('')
for i, f in enumerate(faces):
for j in range(3):
mesh.vectors[i][j] = verts[f[j],:]
path = stl_file_path + '/' + name
if not path.endswith(".stl"):
path += ".stl"
if not os.path.exists(stl_file_path):
os.makedirs(stl_file_path)
mesh.save(path)
def threshold_scans(self, input_array, lower_thresh, upper_thresh, blur_precision):
input_array = np.squeeze(input_array.copy()[0], axis = 3)
##updates the object with the chosen lower and upper threshold
self.lower_thresh = lower_thresh
self.upper_thresh = upper_thresh
##blurs the scan to do very simple denoising
blurred_scans = scipy.ndimage.gaussian_filter(input_array, blur_precision)
masked_array = np.squeeze(self.resampled_array.copy()[0], axis = 3)
##creates a mask that is the same shape as the original array and sets it to 255
mask = masked_array.copy()
mask[:] = 255
##sets the areas of the mask where the blurred image is not within the threshold to 0
mask[blurred_scans > upper_thresh] = 0
mask[blurred_scans < lower_thresh] = 0
##sets the masked off areas in the masked image output to 0
masked_array[mask == 0] = 0
##finds the contours and draws them in the image with circled areas
self.thresholded_array = masked_array ##update the output
self.blurred_array = blurred_scans
return np.stack([np.stack([self.thresholded_array], axis = 3)])
def erode_down(self, array, size):
binary = np.squeeze(array.copy()[0], axis = 3)
masked = np.squeeze(array.copy()[0], axis = 3)
binary[:] = 0
binary[np.squeeze(array.copy()[0], axis = 3) > 0] = 255
##creates a kernel which is a 3 by 3 square of ones as the main kernel for all denoising
kernel = scipy.ndimage.generate_binary_structure(3, 1)
##erodes away the white areas of the 3d array to seperate the loose parts
blew_up = scipy.ndimage.binary_erosion(binary.astype('uint8'), kernel, iterations=size)
masked[blew_up == 0] = 0
return np.stack([np.stack([masked], axis = 3)])
def invert(self, array):
masked = self.resampled_array.copy()
binary = array.copy()
binary[:] = 1
binary[array > 0] = 0
masked[binary == 0] = 0
return masked
def write_images(array, test_folder_path):
if not os.path.exists(test_folder_path):
os.makedirs(test_folder_path)
for n,image in enumerate(array):
file_name = str(str(n) +'.png')
image_color = np.stack([image*255,image*255,image*255], axis = -1)
image_color[:,:,0][image == 2] = 255
image_color[:,:,1][image == 2] = 0
image_color[:,:,2][image == 2] = 0
Image.fromarray(image_color.astype("uint8")).save(os.path.join(test_folder_path, file_name), "PNG")
def down_block(x, filters, dropout, kernel_size=(3, 3, 3), padding="same", strides=1):
print(x.shape)
c = keras.layers.Conv3D(filters, kernel_size, padding=padding, strides=strides, activation="relu", input_shape = x.shape[1:], kernel_initializer='he_normal')(x)
c = keras.layers.Dropout(dropout)(c)
c = keras.layers.Conv3D(filters, kernel_size, padding=padding, strides=strides, activation="relu", input_shape = c.shape[1:], kernel_initializer='he_normal')(c)
p = keras.layers.MaxPool3D(pool_size = (2, 2, 2))(c)
return c, p
def up_block(x, skip, filters, dropout,kernel_size=(3, 3, 3), padding="same", strides=1):
us = keras.layers.UpSampling3D((2, 2, 2))(x)
concat = keras.layers.Concatenate()([us, skip])
c = keras.layers.Conv3D(filters, kernel_size, padding=padding, strides=strides, activation="relu", input_shape = concat.shape[1:], kernel_initializer='he_normal')(concat)
c = keras.layers.Dropout(dropout)(c)
c = keras.layers.Conv3D(filters, kernel_size, padding=padding, strides=strides, activation="relu", input_shape = c.shape[1:], kernel_initializer='he_normal')(c)
return c
def bottleneck(x, filters, dropout, kernel_size=(3, 3, 3), padding="same", strides=1):
c = keras.layers.Conv3D(filters, kernel_size, padding=padding, strides=strides, activation="relu", input_shape = x.shape[1:], kernel_initializer='he_normal')(x)
c = keras.layers.Dropout(dropout) (c)
c = keras.layers.Conv3D(filters, kernel_size, padding=padding, strides=strides, activation="relu", input_shape = c.shape[1:], kernel_initializer='he_normal')(c)
return c
def ConvNetTumor(x,y,z):
inputs = keras.layers.Input((x,y,z, 1))
p0 = inputs
c1, p1 = down_block(p0, 16, 0.1) #128 -> 64
print(p1.shape)
c2, p2 = down_block(p1, 32, 0.1) #64 -> 32
c3, p3 = down_block(p2, 64, 0.2) #32 -> 16
c4, p4 = down_block(p3, 128, 0.3) #16->8
bn = bottleneck(p4, 256, 0.4)
print(bn.shape)
u1 = up_block(bn, c4, 128, 0.3) #8 -> 16
u2 = up_block(u1, c3, 64, 0.2) #16 -> 32
u3 = up_block(u2, c2, 32, 0.1) #32 -> 64
u4 = up_block(u3, c1, 16, 0.1) #64 -> 128
outputs = tf.keras.layers.Conv3D(1, (1, 1, 1),padding='same', activation="sigmoid")(u4)
#outputs = keras.layers.Conv3D(1, (1, 1, 1), padding="same", activation="relu")(u4)
print("out")
print(outputs.shape)
model = keras.models.Model(inputs, outputs)
return model
image_size = 128
brain_seg_model_top = "C:/Users/JiangQin/Documents/c++/build-MRImage3D-Desktop_Qt_5_15_0_MSVC2015_64bit-Debug/models/flair_brain_top.h5"
brain_seg_model_front = "C:/Users/JiangQin/Documents/c++/build-MRImage3D-Desktop_Qt_5_15_0_MSVC2015_64bit-Debug/models/flair_brain_front.h5"
brain_seg_model_side = "C:/Users/JiangQin/Documents/c++/build-MRImage3D-Desktop_Qt_5_15_0_MSVC2015_64bit-Debug/models/flair_brain_side.h5"
brain_seg_model_edges = "C:/Users/JiangQin/Documents/c++/build-MRImage3D-Desktop_Qt_5_15_0_MSVC2015_64bit-Debug/models/flair_brain_edges.h5"
tumor_seg_model = "C:/Users/JiangQin/Documents/c++/build-MRImage3D-Desktop_Qt_5_15_0_MSVC2015_64bit-Debug/models/Model 34.h5"
input_path = "C:/Users/JiangQin/Documents/python/ct to tumor identifier project/QIN GBM Treatment Response"
output_path = "C:/Users/JiangQin/Documents/python/ct to tumor identifier project/QIN GBM Treatment Response/loaded arrays 2"
sets = []
images = []
masks = []
start = 0
current_set = 0
start_time = time.time()
backwards_indexes = []
worsen_indexes = [4,28,30]
improve_indexes = [0,12,16,22,32,38]
output_path = "C:/Users/JiangQin/Documents/python/ct to tumor identifier project/raw ct files/Brain-Tumor-Progression/loaded arrays for growth prediction"
folder_output = "C:/Users/JiangQin/Documents/python/ct to tumor identifier project/image ct visualizations/Machine Learning 2 models test/growth prediction example"
for Set in range(start, 20):
if Set * 2 in worsen_indexes or Set*2 in improve_indexes:
try:
file = get_folder_path("C:/Users/JiangQin/Documents/python/ct to tumor identifier project/raw ct files/Brain-Tumor-Progression", Set*2, "flair")
print("\n\nSet:", Set, "\n\n")
brain = highlight_ct(file)
print('initialized')
brain.load_scan()
print('loaded scans')
pixel = brain.generate_pixel_data()
print('generated pixel array')
image_data = brain.resample_array()
image_data = image_data/np.max(image_data)
blank_unscaled_array = image_data.copy()
blank_unscaled_array[:] = 0
z_zoom = image_size/image_data.shape[0]
y_zoom = image_size/image_data.shape[1]
x_zoom = image_size/image_data.shape[2]
rescaled_blank = skimage.transform.rescale(blank_unscaled_array, (z_zoom, y_zoom, x_zoom))
update_label("Tranforming data...")
image_data = np.stack([np.stack([image_data], axis = 3)])
bounds_finder = image_data.copy()
bounds_finder = adaptive_threshold(bounds_finder, 101, 45, 1)
bounds_finder = biggest_island(bounds_finder)
image_data = biggest_island(image_data)
bounds = locate_bounds(bounds_finder)
[left,right,low,high,shallow,deep] = bounds
x_size = abs(left-right)
y_size = abs(low-high)
z_size = abs(shallow-deep)
max_size = np.max([x_size, y_size, z_size])
rescale_factor = (image_size*0.8)/max_size
backscale_factor = 1/rescale_factor
image_data = skimage.transform.rescale(np.squeeze(image_data.copy()[0], axis = 3), (rescale_factor, rescale_factor, rescale_factor))
bounds_finder = image_data.copy()
bounds_finder = adaptive_threshold(bounds_finder, 101, 45, 1, stacked = False)
bounds_finder = biggest_island(bounds_finder, stacked = False)
image_data = biggest_island(image_data, stacked = False)
bounds = locate_bounds(np.stack([np.stack([bounds_finder], axis = 3)]))
[left,right,low,high,shallow,deep] = bounds
image_data = translate_3d(image_data, [-shallow,-low,-left])
original_unscaled_array = skimage.transform.rescale(image_data, (backscale_factor, backscale_factor, backscale_factor))
write_images(original_unscaled_array, folder_output + "/orig")
rescaled_array = rescaled_blank.copy()
for z,Slice in enumerate(image_data):
for y,line in enumerate(Slice):
for x,pixel in enumerate(line):
try:
rescaled_array[z][y][x] = pixel
except:
pass
original_array_top = np.stack([np.stack([rescaled_array], axis = 3)])
original_array_front = np.stack([np.stack([np.rot90(rescaled_array, axes = (1,0))], axis = 3)])
original_array_side = np.stack([np.stack([np.rot90(rescaled_array.T, axes = (1,2))], axis = 3)])
update_label("Performing brain segmentation for median calculation...")
segmentations = []
brain_seg_top = load_model(brain_seg_model_top)
brain_mask_top = brain_seg_top.predict(original_array_top)
binary_brain_top = binarize(brain_mask_top, 0.5)
binary_brain_top_top_ized = np.squeeze(binary_brain_top.copy()[0], axis = 3)
segmentations.append(binary_brain_top_top_ized)
brain_seg_front = load_model(brain_seg_model_front)
brain_mask_front = brain_seg_front.predict(original_array_front)
binary_brain_front = binarize(brain_mask_front, 0.5)
binary_brain_front_top_ized = np.rot90(np.squeeze(binary_brain_front.copy()[0], axis = 3), axes = (0,1))
segmentations.append(binary_brain_front_top_ized)
brain_seg_side = load_model(brain_seg_model_side)
brain_mask_side = brain_seg_side.predict(original_array_side)
binary_brain_side = binarize(brain_mask_side, 0.5)
binary_brain_side_top_ized = np.rot90(np.squeeze(binary_brain_side.copy()[0], axis = 3), axes = (2,1)).T
segmentations.append(binary_brain_side_top_ized)
binary_brain_wo_median_combined = combine_zeros(segmentations)
median = find_median_grayscale(np.squeeze(original_array_top[0], axis = 3)[binary_brain_wo_median_combined > 0])
update_label("Performing brain segmentation with median...")
segmentations = []
brain_seg_top = load_model(brain_seg_model_top)
new_array_top = original_array_top/(median/0.2)
brain_mask_top = brain_seg_top.predict(new_array_top)
binary_brain_top = binarize(brain_mask_top, 0.7)
binary_brain_top_top_ized = np.squeeze(binary_brain_top.copy()[0], axis = 3)
segmentations.append(binary_brain_top_top_ized)
binary_brain_final_combined = combine_zeros(segmentations)
update_label("Performing tumor segmentation...")
only_brain = original_array_top.copy()
only_brain[np.stack([np.stack([binary_brain_final_combined], axis = 3)]) == 0] = 0
tumor_seg_top = ConvNetTumor(128,128,128)
tumor_seg_top.load_weights(tumor_seg_model)
#tumor_seg_top = load_model(tumor_seg_model)
new_array = only_brain/(median/0.3)
tumor_mask = tumor_seg_top.predict(new_array)
binary_tumor1 = np.squeeze(binarize(tumor_mask, 0.9)[0], axis = 3)
only_brain1 = np.squeeze(only_brain[0], axis = 3)
write_images(only_brain1/(median/0.3), folder_output + "/only brain 1")
write_images(binary_tumor1, folder_output + "/tumor 1")
############################################################################
file = get_folder_path("C:/Users/JiangQin/Documents/python/ct to tumor identifier project/raw ct files/Brain-Tumor-Progression", (Set*2)+1, "flair")
brain = highlight_ct(file)
print('initialized')
brain.load_scan()
print('loaded scans')
pixel = brain.generate_pixel_data()
print('generated pixel array')
image_data = brain.resample_array()
image_data = image_data/np.max(image_data)
blank_unscaled_array = image_data.copy()
blank_unscaled_array[:] = 0
z_zoom = image_size/image_data.shape[0]
y_zoom = image_size/image_data.shape[1]
x_zoom = image_size/image_data.shape[2]
rescaled_blank = skimage.transform.rescale(blank_unscaled_array, (z_zoom, y_zoom, x_zoom))
update_label("Tranforming data...")
image_data = np.stack([np.stack([image_data], axis = 3)])
bounds_finder = image_data.copy()
bounds_finder = adaptive_threshold(bounds_finder, 101, 45, 1)
bounds_finder = biggest_island(bounds_finder)
image_data = biggest_island(image_data)
bounds = locate_bounds(bounds_finder)
[left,right,low,high,shallow,deep] = bounds
x_size = abs(left-right)
y_size = abs(low-high)
z_size = abs(shallow-deep)
max_size = np.max([x_size, y_size, z_size])
rescale_factor = (image_size*0.8)/max_size
backscale_factor = 1/rescale_factor
image_data = skimage.transform.rescale(np.squeeze(image_data.copy()[0], axis = 3), (rescale_factor, rescale_factor, rescale_factor))
bounds_finder = image_data.copy()
bounds_finder = adaptive_threshold(bounds_finder, 101, 45, 1, stacked = False)
bounds_finder = biggest_island(bounds_finder, stacked = False)
image_data = biggest_island(image_data, stacked = False)
bounds = locate_bounds(np.stack([np.stack([bounds_finder], axis = 3)]))
[left,right,low,high,shallow,deep] = bounds
image_data = translate_3d(image_data, [-shallow,-low,-left])
original_unscaled_array = skimage.transform.rescale(image_data, (backscale_factor, backscale_factor, backscale_factor))
rescaled_array = rescaled_blank.copy()
for z,Slice in enumerate(image_data):
for y,line in enumerate(Slice):
for x,pixel in enumerate(line):
try:
rescaled_array[z][y][x] = pixel
except:
pass
original_array_top = np.stack([np.stack([rescaled_array], axis = 3)])
original_array_front = np.stack([np.stack([np.rot90(rescaled_array, axes = (1,0))], axis = 3)])
original_array_side = np.stack([np.stack([np.rot90(rescaled_array.T, axes = (1,2))], axis = 3)])
update_label("Performing brain segmentation for median calculation...")
segmentations = []
brain_seg_top = load_model(brain_seg_model_top)
brain_mask_top = brain_seg_top.predict(original_array_top)
binary_brain_top = binarize(brain_mask_top, 0.5)
binary_brain_top_top_ized = np.squeeze(binary_brain_top.copy()[0], axis = 3)
segmentations.append(binary_brain_top_top_ized)
brain_seg_front = load_model(brain_seg_model_front)
brain_mask_front = brain_seg_front.predict(original_array_front)
binary_brain_front = binarize(brain_mask_front, 0.5)
binary_brain_front_top_ized = np.rot90(np.squeeze(binary_brain_front.copy()[0], axis = 3), axes = (0,1))
segmentations.append(binary_brain_front_top_ized)
brain_seg_side = load_model(brain_seg_model_side)
brain_mask_side = brain_seg_side.predict(original_array_side)
binary_brain_side = binarize(brain_mask_side, 0.5)
binary_brain_side_top_ized = np.rot90(np.squeeze(binary_brain_side.copy()[0], axis = 3), axes = (2,1)).T
segmentations.append(binary_brain_side_top_ized)
binary_brain_wo_median_combined = combine_zeros(segmentations)
median = find_median_grayscale(np.squeeze(original_array_top[0], axis = 3)[binary_brain_wo_median_combined > 0])
update_label("Performing brain segmentation with median...")
segmentations = []
brain_seg_top = load_model(brain_seg_model_top)
new_array_top = original_array_top/(median/0.2)
brain_mask_top = brain_seg_top.predict(new_array_top)
binary_brain_top = binarize(brain_mask_top, 0.7)
binary_brain_top_top_ized = np.squeeze(binary_brain_top.copy()[0], axis = 3)
segmentations.append(binary_brain_top_top_ized)
brain_seg_front = load_model(brain_seg_model_front)
new_array_front = original_array_front/(median/0.2)
brain_mask_front = brain_seg_front.predict(new_array_front)
binary_brain_front = binarize(brain_mask_front, 0.5)
binary_brain_front_top_ized = np.rot90(np.squeeze(binary_brain_front.copy()[0], axis = 3), axes = (0,1))
segmentations.append(binary_brain_front_top_ized)
brain_seg_side = load_model(brain_seg_model_side)
new_array_side = original_array_side/(median/0.2)
brain_mask_side = brain_seg_side.predict(new_array_side)
binary_brain_side = binarize(brain_mask_side, 0.5)
binary_brain_side_top_ized = np.rot90(np.squeeze(binary_brain_side.copy()[0], axis = 3), axes = (2,1)).T
segmentations.append(binary_brain_side_top_ized)
binary_brain_final_combined = combine_zeros(segmentations)
update_label("Performing tumor segmentation...")
only_brain = original_array_top.copy()
only_brain[np.stack([np.stack([binary_brain_final_combined], axis = 3)]) == 0] = 0
tumor_seg_top = ConvNetTumor(128,128,128)
tumor_seg_top.load_weights(tumor_seg_model)
#tumor_seg_top = load_model(tumor_seg_model)
new_array = only_brain/(median/0.3)
tumor_mask = tumor_seg_top.predict(new_array)
binary_tumor2 = np.squeeze(binarize(tumor_mask, 0.9)[0], axis = 3)
only_brain2 = np.squeeze(only_brain[0], axis = 3)
write_images(only_brain2/(median/0.3), folder_output + "/only brain 2")
write_images(binary_tumor2, folder_output + "/tumor 2")
if Set*2 in worsen_indexes:
complete_image = np.stack([only_brain1,binary_tumor1],axis = -1)
save_array(complete_image, output_path + "/" + str(current_set) + "/image.h5")
save_array(binary_tumor2, output_path + "/" + str(current_set) + "/mask.h5")
elif Set*2 in improve_indexes:
complete_image = np.stack([only_brain2,binary_tumor2],axis = -1)
save_array(complete_image, output_path + "/" + str(current_set) + "/image.h5")
save_array(binary_tumor1, output_path + "/" + str(current_set) + "/mask.h5")
print("saved image")
print ('Finished one set in', int((time.time() - start_time)/60), 'minutes and ', int((time.time() - start_time) % 60), 'seconds.')
current_set += 1
except Exception as e:
print(e)
print("\n\nFAILED\n\n")
| [
"tensorflow.keras.layers.Conv3D",
"scipy.ndimage.measurements.label",
"numpy.isin",
"skimage.measure.marching_cubes_lewiner",
"numpy.count_nonzero",
"tensorflow.keras.layers.UpSampling3D",
"tensorflow.keras.models.load_model",
"numpy.rot90",
"scipy.ndimage.gaussian_filter",
"scipy.ndimage.interpol... | [((19163, 19174), 'time.time', 'time.time', ([], {}), '()\n', (19172, 19174), False, 'import time\n'), ((827, 872), 'scipy.ndimage.generate_binary_structure', 'scipy.ndimage.generate_binary_structure', (['(3)', '(1)'], {}), '(3, 1)\n', (866, 872), False, 'import scipy\n'), ((2353, 2415), 'scipy.ndimage.measurements.label', 'scipy.ndimage.measurements.label', (['array', 'touching_structure_3d'], {}), '(array, touching_structure_3d)\n', (2385, 2415), False, 'import scipy\n'), ((2437, 2471), 'numpy.unique', 'np.unique', (['markers[reference == 1]'], {}), '(markers[reference == 1])\n', (2446, 2471), True, 'import numpy as np\n'), ((3493, 3556), 'scipy.ndimage.measurements.label', 'scipy.ndimage.measurements.label', (['binary', 'touching_structure_3d'], {}), '(binary, touching_structure_3d)\n', (3525, 3556), False, 'import scipy\n'), ((3713, 3740), 'numpy.isin', 'np.isin', (['markers', 'noise_idx'], {}), '(markers, noise_idx)\n', (3720, 3740), True, 'import numpy as np\n'), ((4513, 4577), 'scipy.ndimage.gaussian_filter', 'scipy.ndimage.gaussian_filter', (['thresholded_array', 'blur_precision'], {}), '(thresholded_array, blur_precision)\n', (4542, 4577), False, 'import scipy\n'), ((5066, 5080), 'numpy.stack', 'np.stack', (['adap'], {}), '(adap)\n', (5074, 5080), True, 'import numpy as np\n'), ((5408, 5484), 'skimage.measure.marching_cubes_lewiner', 'marching_cubes', (['array', '(0.01)'], {'step_size': 'stl_resolution', 'allow_degenerate': '(True)'}), '(array, 0.01, step_size=stl_resolution, allow_degenerate=True)\n', (5422, 5484), True, 'from skimage.measure import marching_cubes_lewiner as marching_cubes\n'), ((5878, 5902), 'stl.mesh.save', 'mesh.save', (['stl_file_path'], {}), '(stl_file_path)\n', (5887, 5902), False, 'from stl import mesh\n'), ((6152, 6181), 'numpy.median', 'np.median', (['single_dimensional'], {}), '(single_dimensional)\n', (6161, 6181), True, 'import numpy as np\n'), ((8480, 8508), 'os.walk', 'os.walk', (['path'], {'topdown': '(False)'}), '(path, topdown=False)\n', (8487, 8508), False, 'import os\n'), ((17305, 17337), 'tensorflow.keras.layers.Input', 'keras.layers.Input', (['(x, y, z, 1)'], {}), '((x, y, z, 1))\n', (17323, 17337), True, 'import tensorflow.keras as keras\n'), ((18069, 18104), 'tensorflow.keras.models.Model', 'keras.models.Model', (['inputs', 'outputs'], {}), '(inputs, outputs)\n', (18087, 18104), True, 'import tensorflow.keras as keras\n'), ((5513, 5564), 'numpy.zeros', 'np.zeros', (['faces.shape[0]'], {'dtype': 'stl.mesh.Mesh.dtype'}), '(faces.shape[0], dtype=stl.mesh.Mesh.dtype)\n', (5521, 5564), True, 'import numpy as np\n'), ((5971, 5999), 'numpy.count_nonzero', 'np.count_nonzero', (['(array == 0)'], {}), '(array == 0)\n', (5987, 5999), True, 'import numpy as np\n'), ((7538, 7580), 'numpy.zeros', 'np.zeros', (['(array.shape[1], array.shape[2])'], {}), '((array.shape[1], array.shape[2]))\n', (7546, 7580), True, 'import numpy as np\n'), ((7600, 7642), 'numpy.zeros', 'np.zeros', (['(array.shape[1], array.shape[2])'], {}), '((array.shape[1], array.shape[2]))\n', (7608, 7642), True, 'import numpy as np\n'), ((7669, 7685), 'numpy.stack', 'np.stack', (['padded'], {}), '(padded)\n', (7677, 7685), True, 'import numpy as np\n'), ((7778, 7810), 'os.path.exists', 'os.path.exists', (['test_folder_path'], {}), '(test_folder_path)\n', (7792, 7810), False, 'import os\n'), ((7821, 7850), 'os.makedirs', 'os.makedirs', (['test_folder_path'], {}), '(test_folder_path)\n', (7832, 7850), False, 'import os\n'), ((8314, 8366), 'cv2.drawContours', 'cv2.drawContours', (['circled[n]', 'contours', '(-1)', 'color', '(1)'], {}), '(circled[n], contours, -1, color, 1)\n', (8330, 8366), False, 'import cv2\n'), ((8797, 8819), 'os.listdir', 'os.listdir', (['input_path'], {}), '(input_path)\n', (8807, 8819), False, 'import os\n'), ((9650, 9698), 'numpy.stack', 'np.stack', (['[s.pixel_array for s in self.raw_data]'], {}), '([s.pixel_array for s in self.raw_data])\n', (9658, 9698), True, 'import numpy as np\n'), ((10416, 10440), 'numpy.round', 'np.round', (['new_real_shape'], {}), '(new_real_shape)\n', (10424, 10440), True, 'import numpy as np\n'), ((10700, 10779), 'scipy.ndimage.interpolation.zoom', 'scipy.ndimage.interpolation.zoom', (['self.original_pixel_array', 'real_resize_factor'], {}), '(self.original_pixel_array, real_resize_factor)\n', (10732, 10779), False, 'import scipy\n'), ((12327, 12413), 'skimage.measure.marching_cubes_lewiner', 'marching_cubes', (['transposed', '(0.01)'], {'step_size': 'stl_resolution', 'allow_degenerate': '(True)'}), '(transposed, 0.01, step_size=stl_resolution, allow_degenerate\n =True)\n', (12341, 12413), True, 'from skimage.measure import marching_cubes_lewiner as marching_cubes\n'), ((12893, 12908), 'stl.mesh.save', 'mesh.save', (['path'], {}), '(path)\n', (12902, 12908), False, 'from stl import mesh\n'), ((13313, 13371), 'scipy.ndimage.gaussian_filter', 'scipy.ndimage.gaussian_filter', (['input_array', 'blur_precision'], {}), '(input_array, blur_precision)\n', (13342, 13371), False, 'import scipy\n'), ((14548, 14593), 'scipy.ndimage.generate_binary_structure', 'scipy.ndimage.generate_binary_structure', (['(3)', '(1)'], {}), '(3, 1)\n', (14587, 14593), False, 'import scipy\n'), ((15156, 15188), 'os.path.exists', 'os.path.exists', (['test_folder_path'], {}), '(test_folder_path)\n', (15170, 15188), False, 'import os\n'), ((15199, 15228), 'os.makedirs', 'os.makedirs', (['test_folder_path'], {}), '(test_folder_path)\n', (15210, 15228), False, 'import os\n'), ((15331, 15389), 'numpy.stack', 'np.stack', (['[image * 255, image * 255, image * 255]'], {'axis': '(-1)'}), '([image * 255, image * 255, image * 255], axis=-1)\n', (15339, 15389), True, 'import numpy as np\n'), ((15750, 15905), 'tensorflow.keras.layers.Conv3D', 'keras.layers.Conv3D', (['filters', 'kernel_size'], {'padding': 'padding', 'strides': 'strides', 'activation': '"""relu"""', 'input_shape': 'x.shape[1:]', 'kernel_initializer': '"""he_normal"""'}), "(filters, kernel_size, padding=padding, strides=strides,\n activation='relu', input_shape=x.shape[1:], kernel_initializer='he_normal')\n", (15769, 15905), True, 'import tensorflow.keras as keras\n'), ((15916, 15945), 'tensorflow.keras.layers.Dropout', 'keras.layers.Dropout', (['dropout'], {}), '(dropout)\n', (15936, 15945), True, 'import tensorflow.keras as keras\n'), ((15958, 16113), 'tensorflow.keras.layers.Conv3D', 'keras.layers.Conv3D', (['filters', 'kernel_size'], {'padding': 'padding', 'strides': 'strides', 'activation': '"""relu"""', 'input_shape': 'c.shape[1:]', 'kernel_initializer': '"""he_normal"""'}), "(filters, kernel_size, padding=padding, strides=strides,\n activation='relu', input_shape=c.shape[1:], kernel_initializer='he_normal')\n", (15977, 16113), True, 'import tensorflow.keras as keras\n'), ((16124, 16167), 'tensorflow.keras.layers.MaxPool3D', 'keras.layers.MaxPool3D', ([], {'pool_size': '(2, 2, 2)'}), '(pool_size=(2, 2, 2))\n', (16146, 16167), True, 'import tensorflow.keras as keras\n'), ((16293, 16329), 'tensorflow.keras.layers.UpSampling3D', 'keras.layers.UpSampling3D', (['(2, 2, 2)'], {}), '((2, 2, 2))\n', (16318, 16329), True, 'import tensorflow.keras as keras\n'), ((16347, 16373), 'tensorflow.keras.layers.Concatenate', 'keras.layers.Concatenate', ([], {}), '()\n', (16371, 16373), True, 'import tensorflow.keras as keras\n'), ((16395, 16560), 'tensorflow.keras.layers.Conv3D', 'keras.layers.Conv3D', (['filters', 'kernel_size'], {'padding': 'padding', 'strides': 'strides', 'activation': '"""relu"""', 'input_shape': 'concat.shape[1:]', 'kernel_initializer': '"""he_normal"""'}), "(filters, kernel_size, padding=padding, strides=strides,\n activation='relu', input_shape=concat.shape[1:], kernel_initializer=\n 'he_normal')\n", (16414, 16560), True, 'import tensorflow.keras as keras\n'), ((16571, 16600), 'tensorflow.keras.layers.Dropout', 'keras.layers.Dropout', (['dropout'], {}), '(dropout)\n', (16591, 16600), True, 'import tensorflow.keras as keras\n'), ((16613, 16768), 'tensorflow.keras.layers.Conv3D', 'keras.layers.Conv3D', (['filters', 'kernel_size'], {'padding': 'padding', 'strides': 'strides', 'activation': '"""relu"""', 'input_shape': 'c.shape[1:]', 'kernel_initializer': '"""he_normal"""'}), "(filters, kernel_size, padding=padding, strides=strides,\n activation='relu', input_shape=c.shape[1:], kernel_initializer='he_normal')\n", (16632, 16768), True, 'import tensorflow.keras as keras\n'), ((16883, 17038), 'tensorflow.keras.layers.Conv3D', 'keras.layers.Conv3D', (['filters', 'kernel_size'], {'padding': 'padding', 'strides': 'strides', 'activation': '"""relu"""', 'input_shape': 'x.shape[1:]', 'kernel_initializer': '"""he_normal"""'}), "(filters, kernel_size, padding=padding, strides=strides,\n activation='relu', input_shape=x.shape[1:], kernel_initializer='he_normal')\n", (16902, 17038), True, 'import tensorflow.keras as keras\n'), ((17049, 17078), 'tensorflow.keras.layers.Dropout', 'keras.layers.Dropout', (['dropout'], {}), '(dropout)\n', (17069, 17078), True, 'import tensorflow.keras as keras\n'), ((17092, 17247), 'tensorflow.keras.layers.Conv3D', 'keras.layers.Conv3D', (['filters', 'kernel_size'], {'padding': 'padding', 'strides': 'strides', 'activation': '"""relu"""', 'input_shape': 'c.shape[1:]', 'kernel_initializer': '"""he_normal"""'}), "(filters, kernel_size, padding=padding, strides=strides,\n activation='relu', input_shape=c.shape[1:], kernel_initializer='he_normal')\n", (17111, 17247), True, 'import tensorflow.keras as keras\n'), ((17845, 17919), 'tensorflow.keras.layers.Conv3D', 'tf.keras.layers.Conv3D', (['(1)', '(1, 1, 1)'], {'padding': '"""same"""', 'activation': '"""sigmoid"""'}), "(1, (1, 1, 1), padding='same', activation='sigmoid')\n", (17867, 17919), True, 'import tensorflow as tf\n'), ((3684, 3698), 'numpy.max', 'np.max', (['counts'], {}), '(counts)\n', (3690, 3698), True, 'import numpy as np\n'), ((5787, 5817), 'os.path.dirname', 'os.path.dirname', (['stl_file_path'], {}), '(stl_file_path)\n', (5802, 5817), False, 'import os\n'), ((5841, 5871), 'os.path.dirname', 'os.path.dirname', (['stl_file_path'], {}), '(stl_file_path)\n', (5856, 5871), False, 'import os\n'), ((7951, 7992), 'os.path.join', 'os.path.join', (['test_folder_path', 'file_name'], {}), '(test_folder_path, file_name)\n', (7963, 7992), False, 'import os\n'), ((9177, 9263), 'numpy.abs', 'np.abs', (['(raw_data[0].ImagePositionPatient[2] - raw_data[1].ImagePositionPatient[2])'], {}), '(raw_data[0].ImagePositionPatient[2] - raw_data[1].\n ImagePositionPatient[2])\n', (9183, 9263), True, 'import numpy as np\n'), ((11263, 11315), 'cv2.drawContours', 'cv2.drawContours', (['circled[n]', 'contours', '(-1)', 'color', '(1)'], {}), '(circled[n], contours, -1, color, 1)\n', (11279, 11315), False, 'import cv2\n'), ((11473, 11489), 'numpy.max', 'np.max', (['array_3d'], {}), '(array_3d)\n', (11479, 11489), True, 'import numpy as np\n'), ((11505, 11521), 'numpy.max', 'np.max', (['array_3d'], {}), '(array_3d)\n', (11511, 11521), True, 'import numpy as np\n'), ((12443, 12494), 'numpy.zeros', 'np.zeros', (['faces.shape[0]'], {'dtype': 'stl.mesh.Mesh.dtype'}), '(faces.shape[0], dtype=stl.mesh.Mesh.dtype)\n', (12451, 12494), True, 'import numpy as np\n'), ((12813, 12842), 'os.path.exists', 'os.path.exists', (['stl_file_path'], {}), '(stl_file_path)\n', (12827, 12842), False, 'import os\n'), ((12857, 12883), 'os.makedirs', 'os.makedirs', (['stl_file_path'], {}), '(stl_file_path)\n', (12868, 12883), False, 'import os\n'), ((15579, 15620), 'os.path.join', 'os.path.join', (['test_folder_path', 'file_name'], {}), '(test_folder_path, file_name)\n', (15591, 15620), False, 'import os\n'), ((21241, 21273), 'numpy.max', 'np.max', (['[x_size, y_size, z_size]'], {}), '([x_size, y_size, z_size])\n', (21247, 21273), True, 'import numpy as np\n'), ((23068, 23099), 'tensorflow.keras.models.load_model', 'load_model', (['brain_seg_model_top'], {}), '(brain_seg_model_top)\n', (23078, 23099), False, 'from tensorflow.keras.models import load_model\n'), ((23420, 23453), 'tensorflow.keras.models.load_model', 'load_model', (['brain_seg_model_front'], {}), '(brain_seg_model_front)\n', (23430, 23453), False, 'from tensorflow.keras.models import load_model\n'), ((23813, 23845), 'tensorflow.keras.models.load_model', 'load_model', (['brain_seg_model_side'], {}), '(brain_seg_model_side)\n', (23823, 23845), False, 'from tensorflow.keras.models import load_model\n'), ((24518, 24549), 'tensorflow.keras.models.load_model', 'load_model', (['brain_seg_model_top'], {}), '(brain_seg_model_top)\n', (24528, 24549), False, 'from tensorflow.keras.models import load_model\n'), ((25606, 25639), 'numpy.squeeze', 'np.squeeze', (['only_brain[0]'], {'axis': '(3)'}), '(only_brain[0], axis=3)\n', (25616, 25639), True, 'import numpy as np\n'), ((27390, 27422), 'numpy.max', 'np.max', (['[x_size, y_size, z_size]'], {}), '([x_size, y_size, z_size])\n', (27396, 27422), True, 'import numpy as np\n'), ((29139, 29170), 'tensorflow.keras.models.load_model', 'load_model', (['brain_seg_model_top'], {}), '(brain_seg_model_top)\n', (29149, 29170), False, 'from tensorflow.keras.models import load_model\n'), ((29491, 29524), 'tensorflow.keras.models.load_model', 'load_model', (['brain_seg_model_front'], {}), '(brain_seg_model_front)\n', (29501, 29524), False, 'from tensorflow.keras.models import load_model\n'), ((29884, 29916), 'tensorflow.keras.models.load_model', 'load_model', (['brain_seg_model_side'], {}), '(brain_seg_model_side)\n', (29894, 29916), False, 'from tensorflow.keras.models import load_model\n'), ((30589, 30620), 'tensorflow.keras.models.load_model', 'load_model', (['brain_seg_model_top'], {}), '(brain_seg_model_top)\n', (30599, 30620), False, 'from tensorflow.keras.models import load_model\n'), ((30997, 31030), 'tensorflow.keras.models.load_model', 'load_model', (['brain_seg_model_front'], {}), '(brain_seg_model_front)\n', (31007, 31030), False, 'from tensorflow.keras.models import load_model\n'), ((31448, 31480), 'tensorflow.keras.models.load_model', 'load_model', (['brain_seg_model_side'], {}), '(brain_seg_model_side)\n', (31458, 31480), False, 'from tensorflow.keras.models import load_model\n'), ((32537, 32570), 'numpy.squeeze', 'np.squeeze', (['only_brain[0]'], {'axis': '(3)'}), '(only_brain[0], axis=3)\n', (32547, 32570), True, 'import numpy as np\n'), ((1016, 1043), 'numpy.stack', 'np.stack', (['[blew_up]'], {'axis': '(3)'}), '([blew_up], axis=3)\n', (1024, 1043), True, 'import numpy as np\n'), ((2614, 2640), 'numpy.stack', 'np.stack', (['[masked]'], {'axis': '(3)'}), '([masked], axis=3)\n', (2622, 2640), True, 'import numpy as np\n'), ((2916, 2950), 'numpy.squeeze', 'np.squeeze', (['input_array[0]'], {'axis': '(3)'}), '(input_array[0], axis=3)\n', (2926, 2950), True, 'import numpy as np\n'), ((3838, 3864), 'numpy.stack', 'np.stack', (['[masked]'], {'axis': '(3)'}), '([masked], axis=3)\n', (3846, 3864), True, 'import numpy as np\n'), ((5178, 5221), 'numpy.stack', 'np.stack', (['[thresholded_array / 255]'], {'axis': '(3)'}), '([thresholded_array / 255], axis=3)\n', (5186, 5221), True, 'import numpy as np\n'), ((8980, 9007), 'os.listdir', 'os.listdir', (['self.input_path'], {}), '(self.input_path)\n', (8990, 9007), False, 'import os\n'), ((9307, 9368), 'numpy.abs', 'np.abs', (['(raw_data[0].SliceLocation - raw_data[1].SliceLocation)'], {}), '(raw_data[0].SliceLocation - raw_data[1].SliceLocation)\n', (9313, 9368), True, 'import numpy as np\n'), ((11846, 11887), 'os.path.join', 'os.path.join', (['test_folder_path', 'file_name'], {}), '(test_folder_path, file_name)\n', (11858, 11887), False, 'import os\n'), ((14119, 14161), 'numpy.stack', 'np.stack', (['[self.thresholded_array]'], {'axis': '(3)'}), '([self.thresholded_array], axis=3)\n', (14127, 14161), True, 'import numpy as np\n'), ((14840, 14866), 'numpy.stack', 'np.stack', (['[masked]'], {'axis': '(3)'}), '([masked], axis=3)\n', (14848, 14866), True, 'import numpy as np\n'), ((20248, 20266), 'numpy.max', 'np.max', (['image_data'], {}), '(image_data)\n', (20254, 20266), True, 'import numpy as np\n'), ((26397, 26415), 'numpy.max', 'np.max', (['image_data'], {}), '(image_data)\n', (26403, 26415), True, 'import numpy as np\n'), ((32808, 32855), 'numpy.stack', 'np.stack', (['[only_brain1, binary_tumor1]'], {'axis': '(-1)'}), '([only_brain1, binary_tumor1], axis=-1)\n', (32816, 32855), True, 'import numpy as np\n'), ((8608, 8632), 'os.path.join', 'os.path.join', (['path', 'dir_'], {}), '(path, dir_)\n', (8620, 8632), False, 'import os\n'), ((20722, 20752), 'numpy.stack', 'np.stack', (['[image_data]'], {'axis': '(3)'}), '([image_data], axis=3)\n', (20730, 20752), True, 'import numpy as np\n'), ((22660, 22694), 'numpy.stack', 'np.stack', (['[rescaled_array]'], {'axis': '(3)'}), '([rescaled_array], axis=3)\n', (22668, 22694), True, 'import numpy as np\n'), ((24293, 24334), 'numpy.squeeze', 'np.squeeze', (['original_array_top[0]'], {'axis': '(3)'}), '(original_array_top[0], axis=3)\n', (24303, 24334), True, 'import numpy as np\n'), ((26871, 26901), 'numpy.stack', 'np.stack', (['[image_data]'], {'axis': '(3)'}), '([image_data], axis=3)\n', (26879, 26901), True, 'import numpy as np\n'), ((28731, 28765), 'numpy.stack', 'np.stack', (['[rescaled_array]'], {'axis': '(3)'}), '([rescaled_array], axis=3)\n', (28739, 28765), True, 'import numpy as np\n'), ((30364, 30405), 'numpy.squeeze', 'np.squeeze', (['original_array_top[0]'], {'axis': '(3)'}), '(original_array_top[0], axis=3)\n', (30374, 30405), True, 'import numpy as np\n'), ((33124, 33171), 'numpy.stack', 'np.stack', (['[only_brain2, binary_tumor2]'], {'axis': '(-1)'}), '([only_brain2, binary_tumor2], axis=-1)\n', (33132, 33171), True, 'import numpy as np\n'), ((21864, 21897), 'numpy.stack', 'np.stack', (['[bounds_finder]'], {'axis': '(3)'}), '([bounds_finder], axis=3)\n', (21872, 21897), True, 'import numpy as np\n'), ((28013, 28046), 'numpy.stack', 'np.stack', (['[bounds_finder]'], {'axis': '(3)'}), '([bounds_finder], axis=3)\n', (28021, 28046), True, 'import numpy as np\n'), ((22755, 22792), 'numpy.rot90', 'np.rot90', (['rescaled_array'], {'axes': '(1, 0)'}), '(rescaled_array, axes=(1, 0))\n', (22763, 22792), True, 'import numpy as np\n'), ((22863, 22902), 'numpy.rot90', 'np.rot90', (['rescaled_array.T'], {'axes': '(1, 2)'}), '(rescaled_array.T, axes=(1, 2))\n', (22871, 22902), True, 'import numpy as np\n'), ((25125, 25172), 'numpy.stack', 'np.stack', (['[binary_brain_final_combined]'], {'axis': '(3)'}), '([binary_brain_final_combined], axis=3)\n', (25133, 25172), True, 'import numpy as np\n'), ((28826, 28863), 'numpy.rot90', 'np.rot90', (['rescaled_array'], {'axes': '(1, 0)'}), '(rescaled_array, axes=(1, 0))\n', (28834, 28863), True, 'import numpy as np\n'), ((28934, 28973), 'numpy.rot90', 'np.rot90', (['rescaled_array.T'], {'axes': '(1, 2)'}), '(rescaled_array.T, axes=(1, 2))\n', (28942, 28973), True, 'import numpy as np\n'), ((32088, 32135), 'numpy.stack', 'np.stack', (['[binary_brain_final_combined]'], {'axis': '(3)'}), '([binary_brain_final_combined], axis=3)\n', (32096, 32135), True, 'import numpy as np\n'), ((33484, 33495), 'time.time', 'time.time', ([], {}), '()\n', (33493, 33495), False, 'import time\n'), ((33536, 33547), 'time.time', 'time.time', ([], {}), '()\n', (33545, 33547), False, 'import time\n')] |
# coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definition of R2R problem."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import operator
import pickle
from absl import logging
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from valan.framework import common
from valan.framework import problem_type
from valan.framework import utils
from valan.r2r import agent
from valan.r2r import agent_config
from valan.r2r import constants
from valan.r2r import curriculum_env
from valan.r2r import curriculum_env_config as curriculum_env_config_lib
from valan.r2r import env
from valan.r2r import env_config as env_config_lib
from valan.r2r import eval_metric
from valan.r2r.multi_task import mt_agent
from valan.r2r.multi_task import mt_agent_config
R2RDebugInfo = collections.namedtuple(
'R2RDebugInfo', ['episode_undisc_reward', 'episode_num_steps', 'num_paths'])
class R2RProblem(problem_type.ProblemType):
"""Problem type for R2R."""
def __init__(self,
runtime_config,
mode,
data_sources,
curriculum='',
agent_type='r2r'):
self._runtime_config = runtime_config
self._mode = mode
self._data_sources = data_sources
self._curriculum = curriculum
if agent_type.lower() == 'r2r':
self._agent = agent.R2RAgent(
agent_config.get_r2r_agent_config(), mode=mode)
elif agent_type.lower() == 'mt':
self._agent = mt_agent.MTEnvAgAgent(
mt_agent_config.get_agent_config(), mode=mode)
else:
raise ValueError('Invalid agent_type: {}'.format(agent_type))
self._prob_ac = 0.5
self._env = None
self._loss_type = None
self._eval_dict = self._get_eval_dict()
def _get_eval_dict(self):
return {
'eval/success_rate':
eval_metric.get_success_rate,
'eval/navigation_error':
eval_metric.get_navigation_error,
'eval/path_length':
eval_metric.get_path_length,
'eval/oracle_success':
eval_metric.get_oracle_success,
'eval/num_steps_before_stop':
eval_metric.get_num_steps_before_stop,
'eval/spl':
eval_metric.get_spl,
'eval/undiscounted_episode_reward':
eval_metric.get_undisc_episode_reward,
'eval/cls':
eval_metric.get_cls,
'eval/dtw':
eval_metric.get_dtw,
'eval/norm_dtw':
eval_metric.get_norm_dtw,
'eval/sdtw':
eval_metric.get_sdtw,
'eval/' + common.VISUALIZATION_IMAGES:
eval_metric.get_visualization_image,
}
def get_environment(self):
if not self._env:
assert self._data_sources, 'data_sources must be non-empty.'
if self._curriculum:
# See actor_main.py and curriculum_env.py for the argument options.
self._env = curriculum_env.CurriculumR2REnv(
data_sources=self._data_sources,
runtime_config=self._runtime_config,
curriculum_env_config=
curriculum_env_config_lib.get_default_curriculum_env_config(
self._curriculum)
)
else:
self._env = env.R2REnv(
data_sources=self._data_sources,
runtime_config=self._runtime_config,
env_config=env_config_lib.get_default_env_config())
return self._env
def get_agent(self):
return self._agent
def get_optimizer(self, learning_rate):
return tf.keras.optimizers.Adam(learning_rate=learning_rate)
def create_summary(self, step, info):
sum_episode_reward = 0.
sum_episode_num_steps = 0.
num_infos = 0
num_paths_list = []
for infos in [pickle.loads(t.numpy()) for t in info]:
for episode_undisc_reward, episode_num_steps, num_paths in infos:
sum_episode_reward += episode_undisc_reward
sum_episode_num_steps += episode_num_steps
num_paths_list.append(num_paths)
num_infos += 1
if num_infos:
tf.summary.scalar(
'train_debug/episode_undiscounted_reward',
sum_episode_reward / num_infos,
step=step)
tf.summary.scalar(
'train_debug/episode_num_steps',
sum_episode_num_steps / num_infos,
step=step)
# Log the number of paths for analyzing curriculum learning.
tf.summary.scalar(
'train_debug/env_num_paths_mean',
sum(num_paths_list) / num_infos,
step=step)
tf.summary.scalar(
'train_debug/env_num_paths_maximum',
max(num_paths_list),
step=step)
def get_actor_info(self, final_step_env_output, episode_reward_sum,
episode_num_steps):
return R2RDebugInfo(episode_reward_sum, episode_num_steps,
self._env.num_paths)
def get_study_loss_types(self):
return [common.AC_LOSS, common.CE_LOSS]
def get_episode_loss_type(self, iterations):
self._loss_type = np.random.choice([common.AC_LOSS, common.CE_LOSS],
p=[self._prob_ac, 1. - self._prob_ac])
return self._loss_type
def select_actor_action(self, env_output, agent_output):
oracle_next_action = env_output.observation[constants.ORACLE_NEXT_ACTION]
oracle_next_action_indices = tf.where(
tf.equal(env_output.observation[constants.CONN_IDS],
oracle_next_action))
oracle_next_action_idx = tf.reduce_min(oracle_next_action_indices)
assert self._mode, 'mode must be set.'
if self._mode == 'train':
if self._loss_type == common.CE_LOSS:
# This is teacher-forcing mode, so choose action same as oracle action.
action_idx = oracle_next_action_idx
elif self._loss_type == common.AC_LOSS:
# Choose next pano from probability distribution over next panos
action_idx = tfp.distributions.Categorical(
logits=agent_output.policy_logits).sample()
else:
raise ValueError('Unsupported loss type {}'.format(self._loss_type))
else:
# In non-train modes, choose greedily.
action_idx = tf.argmax(agent_output.policy_logits, axis=-1)
action_val = env_output.observation[constants.CONN_IDS][action_idx]
policy_logprob = tf.nn.log_softmax(agent_output.policy_logits)
return common.ActorAction(
chosen_action_idx=int(action_idx.numpy()),
oracle_next_action_idx=int(oracle_next_action_idx.numpy()),
action_val=int(action_val.numpy()),
log_prob=float(policy_logprob[action_idx].numpy()))
def plan_actor_action(self, agent_output, agent_state, agent_instance,
env_output, env_instance, beam_size, planning_horizon,
temperature=1.0):
initial_env_state = env_instance.get_state()
initial_time_step = env_output.observation[constants.TIME_STEP]
beam = [common.PlanningState(score=0,
agent_output=agent_output,
agent_state=agent_state,
env_output=env_output,
env_state=initial_env_state,
action_history=[])]
planning_step = 1
while True:
next_beam = []
for state in beam:
if state.action_history and (state.action_history[-1].action_val
== constants.STOP_NODE_ID):
# Path is done. This won't be reflected in env_output.done since
# stop actions are not performed during planning.
next_beam.append(state)
continue
# Find the beam_size best next actions based on policy log probability.
num_actions = tf.math.count_nonzero(state.env_output.observation[
constants.CONN_IDS] >= constants.STOP_NODE_ID).numpy()
policy_logprob = tf.nn.log_softmax(
state.agent_output.policy_logits / temperature)
logprob, ix = tf.math.top_k(
policy_logprob, k=min(num_actions, beam_size))
action_vals = tf.gather(
state.env_output.observation[constants.CONN_IDS], ix)
oracle_action = state.env_output.observation[
constants.ORACLE_NEXT_ACTION]
oracle_action_indices = tf.where(
tf.equal(state.env_output.observation[constants.CONN_IDS],
oracle_action))
oracle_action_idx = tf.reduce_min(oracle_action_indices)
# Expand each action and add to the beam for the next iteration.
for j, action_val in enumerate(action_vals.numpy()):
next_action = common.ActorAction(
chosen_action_idx=int(ix[j].numpy()),
oracle_next_action_idx=int(oracle_action_idx.numpy()),
action_val=int(action_val),
log_prob=float(logprob[j].numpy()))
if action_val == constants.STOP_NODE_ID:
# Don't perform stop actions which trigger a new episode that can't
# be reset using set_state.
next_state = common.PlanningState(
score=state.score + logprob[j],
agent_output=state.agent_output,
agent_state=state.agent_state,
env_output=state.env_output,
env_state=state.env_state,
action_history=state.action_history + [next_action])
else:
# Perform the non-stop action.
env_instance.set_state(state.env_state)
next_env_output = env_instance.step(action_val)
next_env_output = utils.add_time_batch_dim(next_env_output)
next_agent_output, next_agent_state = agent_instance(
next_env_output, state.agent_state)
next_env_output, next_agent_output = utils.remove_time_batch_dim(
next_env_output, next_agent_output)
next_state = common.PlanningState(
score=state.score + logprob[j],
agent_output=next_agent_output,
agent_state=next_agent_state,
env_output=next_env_output,
env_state=env_instance.get_state(),
action_history=state.action_history + [next_action])
next_beam.append(next_state)
def _log_beam(beam):
for item in beam:
path_string = '\t'.join(
[str(a.action_val) for a in item.action_history])
score_string = '\t'.join(
['%.4f' % a.log_prob for a in item.action_history])
logging.debug('Score: %.4f', item.score)
logging.debug('Log prob: %s', score_string)
logging.debug('Steps: %s', path_string)
# Reduce the next beam to only the top beam_size paths.
beam = sorted(next_beam, reverse=True, key=operator.attrgetter('score'))
beam = beam[:beam_size]
logging.debug('Planning step %d', planning_step)
_log_beam(beam)
# Break if all episodes are done.
if all(item.action_history[-1].action_val == constants.STOP_NODE_ID
for item in beam):
break
# Break if exceeded planning_horizon.
if planning_step >= planning_horizon:
break
# Break if we are planning beyond the max actions per episode, since this
# will also trigger a new episode (same as the stop action).
if initial_time_step + planning_step >= env_instance._max_actions_per_episode:
break
planning_step += 1
# Restore the environment to it's initial state so the agent can still act.
env_instance.set_state(initial_env_state)
return beam[0].action_history
def eval(self, action_list, env_output_list):
result = {}
for key, fn in self._eval_dict.items():
score = fn(action_list, env_output_list, self._env)
result[key] = score
return result
| [
"tensorflow.compat.v2.argmax",
"tensorflow_probability.distributions.Categorical",
"tensorflow.compat.v2.keras.optimizers.Adam",
"tensorflow.compat.v2.equal",
"tensorflow.compat.v2.nn.log_softmax",
"valan.r2r.curriculum_env_config.get_default_curriculum_env_config",
"valan.r2r.env_config.get_default_env... | [((1432, 1535), 'collections.namedtuple', 'collections.namedtuple', (['"""R2RDebugInfo"""', "['episode_undisc_reward', 'episode_num_steps', 'num_paths']"], {}), "('R2RDebugInfo', ['episode_undisc_reward',\n 'episode_num_steps', 'num_paths'])\n", (1454, 1535), False, 'import collections\n'), ((4123, 4176), 'tensorflow.compat.v2.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (4147, 4176), True, 'import tensorflow.compat.v2 as tf\n'), ((5604, 5698), 'numpy.random.choice', 'np.random.choice', (['[common.AC_LOSS, common.CE_LOSS]'], {'p': '[self._prob_ac, 1.0 - self._prob_ac]'}), '([common.AC_LOSS, common.CE_LOSS], p=[self._prob_ac, 1.0 -\n self._prob_ac])\n', (5620, 5698), True, 'import numpy as np\n'), ((6069, 6110), 'tensorflow.compat.v2.reduce_min', 'tf.reduce_min', (['oracle_next_action_indices'], {}), '(oracle_next_action_indices)\n', (6082, 6110), True, 'import tensorflow.compat.v2 as tf\n'), ((6882, 6927), 'tensorflow.compat.v2.nn.log_softmax', 'tf.nn.log_softmax', (['agent_output.policy_logits'], {}), '(agent_output.policy_logits)\n', (6899, 6927), True, 'import tensorflow.compat.v2 as tf\n'), ((4642, 4750), 'tensorflow.compat.v2.summary.scalar', 'tf.summary.scalar', (['"""train_debug/episode_undiscounted_reward"""', '(sum_episode_reward / num_infos)'], {'step': 'step'}), "('train_debug/episode_undiscounted_reward', \n sum_episode_reward / num_infos, step=step)\n", (4659, 4750), True, 'import tensorflow.compat.v2 as tf\n'), ((4783, 4883), 'tensorflow.compat.v2.summary.scalar', 'tf.summary.scalar', (['"""train_debug/episode_num_steps"""', '(sum_episode_num_steps / num_infos)'], {'step': 'step'}), "('train_debug/episode_num_steps', sum_episode_num_steps /\n num_infos, step=step)\n", (4800, 4883), True, 'import tensorflow.compat.v2 as tf\n'), ((5949, 6021), 'tensorflow.compat.v2.equal', 'tf.equal', (['env_output.observation[constants.CONN_IDS]', 'oracle_next_action'], {}), '(env_output.observation[constants.CONN_IDS], oracle_next_action)\n', (5957, 6021), True, 'import tensorflow.compat.v2 as tf\n'), ((6742, 6788), 'tensorflow.compat.v2.argmax', 'tf.argmax', (['agent_output.policy_logits'], {'axis': '(-1)'}), '(agent_output.policy_logits, axis=-1)\n', (6751, 6788), True, 'import tensorflow.compat.v2 as tf\n'), ((7507, 7668), 'valan.framework.common.PlanningState', 'common.PlanningState', ([], {'score': '(0)', 'agent_output': 'agent_output', 'agent_state': 'agent_state', 'env_output': 'env_output', 'env_state': 'initial_env_state', 'action_history': '[]'}), '(score=0, agent_output=agent_output, agent_state=\n agent_state, env_output=env_output, env_state=initial_env_state,\n action_history=[])\n', (7527, 7668), False, 'from valan.framework import common\n'), ((11456, 11504), 'absl.logging.debug', 'logging.debug', (['"""Planning step %d"""', 'planning_step'], {}), "('Planning step %d', planning_step)\n", (11469, 11504), False, 'from absl import logging\n'), ((1999, 2034), 'valan.r2r.agent_config.get_r2r_agent_config', 'agent_config.get_r2r_agent_config', ([], {}), '()\n', (2032, 2034), False, 'from valan.r2r import agent_config\n'), ((8482, 8547), 'tensorflow.compat.v2.nn.log_softmax', 'tf.nn.log_softmax', (['(state.agent_output.policy_logits / temperature)'], {}), '(state.agent_output.policy_logits / temperature)\n', (8499, 8547), True, 'import tensorflow.compat.v2 as tf\n'), ((8679, 8742), 'tensorflow.compat.v2.gather', 'tf.gather', (['state.env_output.observation[constants.CONN_IDS]', 'ix'], {}), '(state.env_output.observation[constants.CONN_IDS], ix)\n', (8688, 8742), True, 'import tensorflow.compat.v2 as tf\n'), ((9030, 9066), 'tensorflow.compat.v2.reduce_min', 'tf.reduce_min', (['oracle_action_indices'], {}), '(oracle_action_indices)\n', (9043, 9066), True, 'import tensorflow.compat.v2 as tf\n'), ((2137, 2171), 'valan.r2r.multi_task.mt_agent_config.get_agent_config', 'mt_agent_config.get_agent_config', ([], {}), '()\n', (2169, 2171), False, 'from valan.r2r.multi_task import mt_agent_config\n'), ((8906, 8979), 'tensorflow.compat.v2.equal', 'tf.equal', (['state.env_output.observation[constants.CONN_IDS]', 'oracle_action'], {}), '(state.env_output.observation[constants.CONN_IDS], oracle_action)\n', (8914, 8979), True, 'import tensorflow.compat.v2 as tf\n'), ((11127, 11170), 'absl.logging.debug', 'logging.debug', (['"""Score: %.4f"""', 'item.score'], {}), "('Score: %.4f', item.score)\n", (11140, 11170), False, 'from absl import logging\n'), ((11181, 11224), 'absl.logging.debug', 'logging.debug', (['"""Log prob: %s"""', 'score_string'], {}), "('Log prob: %s', score_string)\n", (11194, 11224), False, 'from absl import logging\n'), ((11235, 11277), 'absl.logging.debug', 'logging.debug', (['"""Steps: %s"""', 'path_string'], {}), "('Steps: %s', path_string)\n", (11248, 11277), False, 'from absl import logging\n'), ((11390, 11418), 'operator.attrgetter', 'operator.attrgetter', (['"""score"""'], {}), "('score')\n", (11409, 11418), False, 'import operator\n'), ((3694, 3771), 'valan.r2r.curriculum_env_config.get_default_curriculum_env_config', 'curriculum_env_config_lib.get_default_curriculum_env_config', (['self._curriculum'], {}), '(self._curriculum)\n', (3753, 3771), True, 'from valan.r2r import curriculum_env_config as curriculum_env_config_lib\n'), ((3960, 3999), 'valan.r2r.env_config.get_default_env_config', 'env_config_lib.get_default_env_config', ([], {}), '()\n', (3997, 3999), True, 'from valan.r2r import env_config as env_config_lib\n'), ((8338, 8439), 'tensorflow.compat.v2.math.count_nonzero', 'tf.math.count_nonzero', (['(state.env_output.observation[constants.CONN_IDS] >= constants.STOP_NODE_ID)'], {}), '(state.env_output.observation[constants.CONN_IDS] >=\n constants.STOP_NODE_ID)\n', (8359, 8439), True, 'import tensorflow.compat.v2 as tf\n'), ((9655, 9895), 'valan.framework.common.PlanningState', 'common.PlanningState', ([], {'score': '(state.score + logprob[j])', 'agent_output': 'state.agent_output', 'agent_state': 'state.agent_state', 'env_output': 'state.env_output', 'env_state': 'state.env_state', 'action_history': '(state.action_history + [next_action])'}), '(score=state.score + logprob[j], agent_output=state.\n agent_output, agent_state=state.agent_state, env_output=state.\n env_output, env_state=state.env_state, action_history=state.\n action_history + [next_action])\n', (9675, 9895), False, 'from valan.framework import common\n'), ((10179, 10220), 'valan.framework.utils.add_time_batch_dim', 'utils.add_time_batch_dim', (['next_env_output'], {}), '(next_env_output)\n', (10203, 10220), False, 'from valan.framework import utils\n'), ((10388, 10451), 'valan.framework.utils.remove_time_batch_dim', 'utils.remove_time_batch_dim', (['next_env_output', 'next_agent_output'], {}), '(next_env_output, next_agent_output)\n', (10415, 10451), False, 'from valan.framework import utils\n'), ((6492, 6556), 'tensorflow_probability.distributions.Categorical', 'tfp.distributions.Categorical', ([], {'logits': 'agent_output.policy_logits'}), '(logits=agent_output.policy_logits)\n', (6521, 6556), True, 'import tensorflow_probability as tfp\n')] |
'''
MIT License
Copyright 2019 Oak Ridge National Laboratory
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Created on October 3, 2019
@author: srinivasn1
@ORNL
'''
import faro
import os
import faro.proto.proto_types as pt
import faro.proto.face_service_pb2 as fsd
import numpy as np
import pyvision as pv
from faro.FaceGallery import SearchableGalleryWorker
def getGalleryWorker(options):
print("Arcface: Creating indexed gallery worker...")
return SearchableGalleryWorker(options,fsd.NEG_DOT)
class ArcfaceFaceWorker(faro.FaceWorker):
'''
classdocs
'''
def __init__(self, options):
'''
Constructor
'''
import insightface
os.environ['MXNET_CUDNN_AUTOTUNE_DEFAULT'] = '0'
kwargs = {'root':os.path.join(options.storage_dir,'models')}
#load Retina face model
self.detector = insightface.model_zoo.get_model('retinaface_r50_v1',**kwargs)
if options.gpuid == -1:
ctx_id = -1
else:
ctx_id = int(options.gpuid)
#self.detector.rac = 'net5'
#set ctx_id to a gpu a predefined gpu value
self.detector.prepare(ctx_id, nms=0.4)
# load arcface FR model
self.fr_model = insightface.model_zoo.get_model('arcface_r100_v1',**kwargs)
self.fr_model.prepare(ctx_id)
self.preprocess = insightface.utils.face_align
print("ArcFace Models Loaded.")
def detect(self,img,face_records,options):
'''Run a face detector and return rectangles.'''
#print('Running Face Detector For ArchFace')
img = img[:,:,::-1] #convert from rgb to bgr . There is a reordering from bgr to RGB internally in the detector code.
dets, lpts = self.detector.detect(img, threshold=options.threshold, scale=1)
#print('Number of detections ', dets.shape[0])
# Now process each face we found and add a face to the records list.
for idx in range(0,dets.shape[0]):
face_record = face_records.face_records.add()
try: # todo: this seems to be different depending on mxnet version
face_record.detection.score = dets[idx,-1:]
except:
face_record.detection.score = dets[idx,-1:][0]
ulx, uly, lrx, lry = dets[idx,:-1]
#create_square_bbox = np.amax(abs(lrx-ulx) , abs(lry-uly))
face_record.detection.location.CopyFrom(pt.rect_val2proto(ulx, uly, abs(lrx-ulx) , abs(lry-uly)))
face_record.detection.detection_id = idx
face_record.detection.detection_class = "FACE_%d"%idx
#lmark = face_record.landmarks.add()
lmarkloc = lpts[idx]
for ldx in range(0,lmarkloc.shape[0]):
lmark = face_record.landmarks.add()
lmark.landmark_id = "point_%02d"%ldx
lmark.location.x = lmarkloc[ldx][0]
lmark.location.y = lmarkloc[ldx][1]
if options.best:
face_records.face_records.sort(key = lambda x: -x.detection.score)
while len(face_records.face_records) > 1:
del face_records.face_records[-1]
#print('Done Running Face Detector For ArchFace')
def locate(self,img,face_records,options):
'''Locate facial features.'''
pass #the 5 landmarks points that retina face detects are stored during detection
def align(self,image,face_records):
'''Align the images to a standard size and orientation to allow
recognition.'''
pass # Not needed for this algorithm.
def extract(self,img,face_records):
'''Extract a template that allows the face to be matched.'''
# Compute the 512D vector that describes the face in img identified by
#shape.
#print(type(img),img.shape)
img = img[:,:,::-1] #convert from rgb to bgr. There is BGRtoRGB conversion in get_embedding
for face_record in face_records.face_records:
#print(face_record)
if face_record.detection.score != -1:
landmarks = np.zeros((5,2),dtype=np.float)
for i in range(0,len(face_record.landmarks)):
vals = face_record.landmarks[i]
landmarks[i,0] = vals.location.x
landmarks[i,1] = vals.location.y
_img = self.preprocess.norm_crop(img, landmark = landmarks)
#print(_img.shape)
embedding = self.fr_model.get_embedding(_img).flatten()
embedding_norm = np.linalg.norm(embedding)
normed_embedding = embedding / embedding_norm
#print(normed_embedding.shape)
# Extract view
x,y,w,h = pt.rect_proto2pv(face_record.detection.location).asTuple()
cx,cy = x+0.5*w,y+0.5*h
tmp = 1.5*max(w,h)
cw,ch = tmp,tmp
crop = pv.AffineFromRect(pv.CenteredRect(cx,cy,cw,ch),(256,256))
#print (x,y,w,h,cx,cy,cw,ch,crop)
pvim = pv.Image(img[:,:,::-1]) # convert rgb to bgr
pvim = crop(pvim)
view = pt.image_pv2proto(pvim)
face_record.view.CopyFrom(view)
else:
normed_embedding = np.zeros(512,dtype=float)
face_record.template.data.CopyFrom(pt.vector_np2proto(normed_embedding))
def scoreType(self):
'''Return the method used to create a score from the template.
By default server computation is required.
SCORE_L1, SCORE_L2, SCORE_DOT, SCORE_SERVER
'''
return fsd.NEG_DOT
def status(self):
'''Return a simple status message.'''
status_message = fsd.FaceServiceInfo()
status_message.status = fsd.READY
status_message.detection_support = True
status_message.extract_support = True
status_message.score_support = True
status_message.score_type = self.scoreType()
status_message.detection_threshold = self.recommendedDetectionThreshold()
status_message.match_threshold = self.recommendedScoreThreshold()
status_message.algorithm = "ArcFace-model arcface_r100_v1"
return status_message
def recommendedDetectionThreshold(self):
return 0.5
def recommendedScoreThreshold(self,far=-1):
'''
Arcface does not provide a match threshold
'''
return -0.42838144
| [
"faro.FaceGallery.SearchableGalleryWorker",
"faro.proto.face_service_pb2.FaceServiceInfo",
"os.path.join",
"pyvision.CenteredRect",
"numpy.zeros",
"insightface.model_zoo.get_model",
"numpy.linalg.norm",
"faro.proto.proto_types.image_pv2proto",
"faro.proto.proto_types.vector_np2proto",
"faro.proto.... | [((1442, 1487), 'faro.FaceGallery.SearchableGalleryWorker', 'SearchableGalleryWorker', (['options', 'fsd.NEG_DOT'], {}), '(options, fsd.NEG_DOT)\n', (1465, 1487), False, 'from faro.FaceGallery import SearchableGalleryWorker\n'), ((1847, 1909), 'insightface.model_zoo.get_model', 'insightface.model_zoo.get_model', (['"""retinaface_r50_v1"""'], {}), "('retinaface_r50_v1', **kwargs)\n", (1878, 1909), False, 'import insightface\n'), ((2210, 2270), 'insightface.model_zoo.get_model', 'insightface.model_zoo.get_model', (['"""arcface_r100_v1"""'], {}), "('arcface_r100_v1', **kwargs)\n", (2241, 2270), False, 'import insightface\n'), ((6865, 6886), 'faro.proto.face_service_pb2.FaceServiceInfo', 'fsd.FaceServiceInfo', ([], {}), '()\n', (6884, 6886), True, 'import faro.proto.face_service_pb2 as fsd\n'), ((1747, 1790), 'os.path.join', 'os.path.join', (['options.storage_dir', '"""models"""'], {}), "(options.storage_dir, 'models')\n", (1759, 1790), False, 'import os\n'), ((5135, 5167), 'numpy.zeros', 'np.zeros', (['(5, 2)'], {'dtype': 'np.float'}), '((5, 2), dtype=np.float)\n', (5143, 5167), True, 'import numpy as np\n'), ((5628, 5653), 'numpy.linalg.norm', 'np.linalg.norm', (['embedding'], {}), '(embedding)\n', (5642, 5653), True, 'import numpy as np\n'), ((6141, 6166), 'pyvision.Image', 'pv.Image', (['img[:, :, ::-1]'], {}), '(img[:, :, ::-1])\n', (6149, 6166), True, 'import pyvision as pv\n'), ((6243, 6266), 'faro.proto.proto_types.image_pv2proto', 'pt.image_pv2proto', (['pvim'], {}), '(pvim)\n', (6260, 6266), True, 'import faro.proto.proto_types as pt\n'), ((6369, 6395), 'numpy.zeros', 'np.zeros', (['(512)'], {'dtype': 'float'}), '(512, dtype=float)\n', (6377, 6395), True, 'import numpy as np\n'), ((6455, 6491), 'faro.proto.proto_types.vector_np2proto', 'pt.vector_np2proto', (['normed_embedding'], {}), '(normed_embedding)\n', (6473, 6491), True, 'import faro.proto.proto_types as pt\n'), ((6028, 6059), 'pyvision.CenteredRect', 'pv.CenteredRect', (['cx', 'cy', 'cw', 'ch'], {}), '(cx, cy, cw, ch)\n', (6043, 6059), True, 'import pyvision as pv\n'), ((5821, 5869), 'faro.proto.proto_types.rect_proto2pv', 'pt.rect_proto2pv', (['face_record.detection.location'], {}), '(face_record.detection.location)\n', (5837, 5869), True, 'import faro.proto.proto_types as pt\n')] |
import numpy as np
import copy
def update_income(behavioral_effect, calcY):
delta_inc = np.where(calcY.c00100 > 0, behavioral_effect, 0)
# Attribute the behavioral effects across itemized deductions,
# wages, and other income.
_itemized = np.where(calcY.c04470 < calcY._standard,
0, calcY.c04470)
# TODO, verify that this is needed.
delta_wages = (delta_inc * calcY.e00200 /
(calcY.c00100 + _itemized + .001))
other_inc = calcY.c00100 - calcY.e00200
delta_other_inc = (delta_inc * other_inc /
(calcY.c00100 + _itemized + .001))
delta_itemized = (delta_inc * _itemized /
(calcY.c00100 + _itemized + .001))
calcY.e00200 = calcY.e00200 + delta_wages
calcY.e00300 = calcY.e00300 + delta_other_inc
calcY.e19570 = np.where(_itemized > 0,
calcY.e19570 + delta_itemized, 0)
# TODO, we should create a behavioral modification
# variable instead of using e19570
calcY.calc_all()
return calcY
def behavior(calcX, calcY, elast_wrt_atr=0.4, inc_effect=0.15,
update_income=update_income):
"""
Modify plan Y records to account for micro-feedback effect that arrise
from moving from plan X to plan Y.
"""
# Calculate marginal tax rates for plan x and plan y.
mtrX = calcX.mtr('e00200')
mtrY = calcY.mtr('e00200')
# Calculate the percent change in after-tax rate.
pct_diff_atr = ((1-mtrY) - (1-mtrX))/(1-mtrX)
calcY_behavior = copy.deepcopy(calcY)
# Calculate the magnitude of the substitution and income effects.
substitution_effect = (elast_wrt_atr * pct_diff_atr *
(calcX._ospctax))
calcY_behavior = update_income(substitution_effect, calcY_behavior)
income_effect = inc_effect * (calcY_behavior._ospctax - calcX._ospctax)
calcY_behavior = update_income(income_effect, calcY_behavior)
return calcY_behavior
| [
"numpy.where",
"copy.deepcopy"
] | [((94, 142), 'numpy.where', 'np.where', (['(calcY.c00100 > 0)', 'behavioral_effect', '(0)'], {}), '(calcY.c00100 > 0, behavioral_effect, 0)\n', (102, 142), True, 'import numpy as np\n'), ((259, 316), 'numpy.where', 'np.where', (['(calcY.c04470 < calcY._standard)', '(0)', 'calcY.c04470'], {}), '(calcY.c04470 < calcY._standard, 0, calcY.c04470)\n', (267, 316), True, 'import numpy as np\n'), ((856, 913), 'numpy.where', 'np.where', (['(_itemized > 0)', '(calcY.e19570 + delta_itemized)', '(0)'], {}), '(_itemized > 0, calcY.e19570 + delta_itemized, 0)\n', (864, 913), True, 'import numpy as np\n'), ((1545, 1565), 'copy.deepcopy', 'copy.deepcopy', (['calcY'], {}), '(calcY)\n', (1558, 1565), False, 'import copy\n')] |
import numpy as np
import sacrebleu
import torch
from torch import nn, optim
from tqdm import tqdm
from src.modules import make_baseline_model, make_ps_model
class ModelManager:
"""
Manages PyTorch nn.Module instance
- Train Loop
- Evaluate Loop
- TODO: early stopping, better logging
"""
# noinspection PyUnusedLocal
def __init__(self, src_vocab, tgt_vocab, pad_idx=0):
# Seq2Seq model
self.model = None
# optimizer
self.optimizer = None
# loss function criterion
self.criterion = None
# gradient clip value
self.clip_value = None
# model save path
self.path = None
# vocab to lookup words
self.vocab = src_vocab.itos
# internal variables
self.loss = None
# seed first
self.seed()
def train(self, iterator):
"""
Training loop for the model
:param iterator: PyTorch DataIterator instance
:return: average epoch loss
"""
# enable training of model layers
self.model.train()
epoch_loss = 0
for i, batch in tqdm(enumerate(iterator), total=len(iterator),
desc='training loop'):
# get source and target data
src, src_lengths = batch.src
trg, trg_lengths = batch.trg
output, _ = self.model(src, trg, src_lengths, trg_lengths)
# flatten output, trg tensors and ignore <sos> token
# output -> [(seq_len - 1) * batch x output_size] (2D logits)
# trg -> [(seq_len - 1) * batch] (1D targets)
y_pred = output[:, 1:].contiguous().view(-1, output.size(-1))
y = trg[:, 1:].contiguous().view(-1)
# compute loss
self.loss = self.criterion(y_pred, y)
# backward pass
self.loss.backward()
# clip the gradients
nn.utils.clip_grad_norm_(self.model.parameters(), self.clip_value)
# update the parameters
self.optimizer.step()
# zero gradients for next batch
self.optimizer.zero_grad()
epoch_loss += self.loss.item()
# return the average loss
return epoch_loss / len(iterator)
def evaluate(self, iterator):
"""
Evaluation loop for the model
:param iterator: PyTorch DataIterator instance
:return: average epoch loss
"""
# disable training of model layers
self.model.eval()
epoch_loss = 0
accuracy = 0
# don't update model parameters
with torch.no_grad():
for i, batch in tqdm(enumerate(iterator), total=len(iterator),
desc='evaluation loop'):
# get source and target data
src, src_lengths = batch.src
trg, trg_lengths = batch.trg
output, _ = self.model(src, trg, src_lengths, trg_lengths)
decoded_output = self.model.decoder.decode_mechanism(output)
# reshape same as train loop
y_pred = output[:, 1:].contiguous().view(-1, output.size(-1))
y = trg[:, 1:].contiguous().view(-1)
# compute loss
loss = self.criterion(y_pred, y)
epoch_loss += loss.item()
# using BLEU score for machine translation tasks
accuracy += sacrebleu.raw_corpus_bleu(sys_stream=self.lookup_words(decoded_output),
ref_streams=[self.lookup_words(trg)]).score
# return the average loss
return epoch_loss / len(iterator), accuracy / len(iterator)
def save_checkpoint(self):
checkpoint = {
'model_state_dict': self.model.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
'loss': self.loss
}
torch.save(checkpoint, self.path)
def load_checkpoint(self):
checkpoint = torch.load(self.path)
self.model.load_state_dict(checkpoint['model_state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self.loss = checkpoint['loss']
@staticmethod
def seed(_seed=0):
np.random.seed(_seed)
torch.manual_seed(_seed)
torch.backends.cudnn.deterministic = True
def lookup_words(self, batch):
batch = [[self.vocab[ind] if ind < len(self.vocab) else self.vocab[0] for ind in ex]
for ex in batch] # denumericalize
def filter_special(tok):
return tok not in "<pad>"
batch = [filter(filter_special, ex) for ex in batch]
return [' '.join(ex) for ex in batch]
class BaselineModelManager(ModelManager):
def __init__(self, src_vocab, tgt_vocab, pad_idx=0):
super(BaselineModelManager, self).__init__(src_vocab, tgt_vocab, pad_idx)
# Seq2Seq model
self.model = make_baseline_model(len(src_vocab), len(tgt_vocab), pad_idx=pad_idx)
# optimizer
self.optimizer = optim.Adam(self.model.parameters())
# loss function criterion
self.criterion = nn.NLLLoss(ignore_index=pad_idx)
# gradient clip value
self.clip_value = 1
# model save path
self.path = 'models/baseline.pt'
class PointerSoftmaxModelManager(ModelManager):
def __init__(self, src_vocab, tgt_vocab, pad_idx=0):
super(PointerSoftmaxModelManager, self).__init__(src_vocab, tgt_vocab, pad_idx)
# Seq2Seq model
self.model = make_ps_model(len(src_vocab), len(tgt_vocab), pad_idx=pad_idx)
# optimizer
self.optimizer = optim.Adam(self.model.parameters())
# loss function criterion
self.criterion = nn.NLLLoss(ignore_index=pad_idx)
# gradient clip value
self.clip_value = 1
# model save path
self.path = 'models/pointer_softmax.pt'
| [
"torch.manual_seed",
"torch.load",
"torch.nn.NLLLoss",
"numpy.random.seed",
"torch.save",
"torch.no_grad"
] | [((3998, 4031), 'torch.save', 'torch.save', (['checkpoint', 'self.path'], {}), '(checkpoint, self.path)\n', (4008, 4031), False, 'import torch\n'), ((4085, 4106), 'torch.load', 'torch.load', (['self.path'], {}), '(self.path)\n', (4095, 4106), False, 'import torch\n'), ((4339, 4360), 'numpy.random.seed', 'np.random.seed', (['_seed'], {}), '(_seed)\n', (4353, 4360), True, 'import numpy as np\n'), ((4369, 4393), 'torch.manual_seed', 'torch.manual_seed', (['_seed'], {}), '(_seed)\n', (4386, 4393), False, 'import torch\n'), ((5243, 5275), 'torch.nn.NLLLoss', 'nn.NLLLoss', ([], {'ignore_index': 'pad_idx'}), '(ignore_index=pad_idx)\n', (5253, 5275), False, 'from torch import nn, optim\n'), ((5844, 5876), 'torch.nn.NLLLoss', 'nn.NLLLoss', ([], {'ignore_index': 'pad_idx'}), '(ignore_index=pad_idx)\n', (5854, 5876), False, 'from torch import nn, optim\n'), ((2666, 2681), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2679, 2681), False, 'import torch\n')] |
import base64
import logging
import operator
import numpy as np
from api.v1alpha1.grpc_proto.grpc_algorithm.python3 import api_pb2
logger = logging.getLogger(__name__)
class Parameter:
def __init__(self, name, space_list):
self.name = name
self.space_list = space_list
self.space_list.sort()
self.length = int(len(self.space_list))
def __str__(self):
return "Parameter(name: {}, list: {})".format(
self.name, ", ".join(self.space_list)
)
def num2str(assignments, num):
assignments.sort(key=operator.attrgetter("key"))
result = ""
for i in range(num):
result += str(assignments[i].key) + ": " + str(assignments[i].value)
if i < num - 1:
result += "-"
return result
class BaseSamplingService(object):
def __init__(self, request):
self.space = []
self.space_size = 1
for _par in request.parameters:
new_par = Parameter(_par.name, _par.feasible_space)
self.space.append(new_par)
self.space_size *= new_par.length
self.space_size = int(self.space_size)
self.space.sort(key=operator.attrgetter("name"))
self.existing_trials = {}
self.num_pars = int(len(request.parameters))
for _trial in request.existing_results:
self.existing_trials[
num2str(_trial.parameter_assignments, self.num_pars)
] = _trial.object_value
def get_assignment(self, request):
logger.info("-" * 100 + "\n")
print("-" * 100 + "\n")
logger.info("New getSuggestions call\n")
print("New getSuggestions call\n")
if request.algorithm_name == "grid":
return self.get_assignment_grid(request)
elif request.algorithm_name == "random":
return self.get_assignment_random(request)
return []
def grid_index_search(self, index):
assignments = []
for i in range(self.num_pars):
sub_space_size = 1
for j in range(i + 1, self.num_pars):
sub_space_size *= self.space[j].length
index_ = int(
(index % (sub_space_size * self.space[i].length)) / sub_space_size
)
assignments.append(
api_pb2.KeyValue(
key=self.space[i].name, value=self.space[i].space_list[index_]
)
)
assert num2str(assignments, self.num_pars) not in self.existing_trials
self.existing_trials[num2str(assignments, self.num_pars)] = -1
return assignments
def random_index_search(self):
while True:
assignments = []
for i in range(self.num_pars):
assignments.append(
api_pb2.KeyValue(
key=self.space[i].name,
value=self.space[i].space_list[
np.random.randint(self.space[i].length)
],
)
)
if num2str(assignments, self.num_pars) not in self.existing_trials:
break
assert num2str(assignments, self.num_pars) not in self.existing_trials
self.existing_trials[num2str(assignments, self.num_pars)] = -1
return assignments
def get_assignment_grid(self, request):
assignments_set = []
next_assignment_index = int(len(request.existing_results))
for _ in range(request.required_sampling):
assignments = self.grid_index_search(next_assignment_index)
assignments_set.append(api_pb2.ParameterAssignments(key_values=assignments))
next_assignment_index += 1
for assignment in assignments:
logger.info(
"Name = {}, Value = {}, ".format(assignment.key, assignment.value)
)
print(
"Name = {}, Value = {}, ".format(assignment.key, assignment.value)
)
logger.info("\n")
return assignments_set
def get_assignment_random(self, request):
assignments_set = []
for _ in range(request.required_sampling):
assignments = self.random_index_search()
assignments_set.append(api_pb2.ParameterAssignments(key_values=assignments))
for assignment in assignments:
logger.info(
"Name = {}, Value = {}, ".format(assignment.key, assignment.value)
)
print(
"Name = {}, Value = {}, ".format(assignment.key, assignment.value)
)
logger.info("\n")
return assignments_set
@staticmethod
def encode(name):
"""Encode the name. Chocolate will check if the name contains hyphens.
Thus we need to encode it.
"""
return base64.b64encode(name.encode("utf-8")).decode("utf-8")
| [
"logging.getLogger",
"operator.attrgetter",
"api.v1alpha1.grpc_proto.grpc_algorithm.python3.api_pb2.KeyValue",
"numpy.random.randint",
"api.v1alpha1.grpc_proto.grpc_algorithm.python3.api_pb2.ParameterAssignments"
] | [((143, 170), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (160, 170), False, 'import logging\n'), ((570, 596), 'operator.attrgetter', 'operator.attrgetter', (['"""key"""'], {}), "('key')\n", (589, 596), False, 'import operator\n'), ((1170, 1197), 'operator.attrgetter', 'operator.attrgetter', (['"""name"""'], {}), "('name')\n", (1189, 1197), False, 'import operator\n'), ((2308, 2393), 'api.v1alpha1.grpc_proto.grpc_algorithm.python3.api_pb2.KeyValue', 'api_pb2.KeyValue', ([], {'key': 'self.space[i].name', 'value': 'self.space[i].space_list[index_]'}), '(key=self.space[i].name, value=self.space[i].space_list[index_]\n )\n', (2324, 2393), False, 'from api.v1alpha1.grpc_proto.grpc_algorithm.python3 import api_pb2\n'), ((3637, 3689), 'api.v1alpha1.grpc_proto.grpc_algorithm.python3.api_pb2.ParameterAssignments', 'api_pb2.ParameterAssignments', ([], {'key_values': 'assignments'}), '(key_values=assignments)\n', (3665, 3689), False, 'from api.v1alpha1.grpc_proto.grpc_algorithm.python3 import api_pb2\n'), ((4311, 4363), 'api.v1alpha1.grpc_proto.grpc_algorithm.python3.api_pb2.ParameterAssignments', 'api_pb2.ParameterAssignments', ([], {'key_values': 'assignments'}), '(key_values=assignments)\n', (4339, 4363), False, 'from api.v1alpha1.grpc_proto.grpc_algorithm.python3 import api_pb2\n'), ((2952, 2991), 'numpy.random.randint', 'np.random.randint', (['self.space[i].length'], {}), '(self.space[i].length)\n', (2969, 2991), True, 'import numpy as np\n')] |
"""Makes event-attribution schematics for 2019 tornado-prediction paper."""
import numpy
import pandas
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as pyplot
from descartes import PolygonPatch
from gewittergefahr.gg_utils import storm_tracking_utils as tracking_utils
from gewittergefahr.gg_utils import polygons
from gewittergefahr.gg_utils import file_system_utils
from gewittergefahr.plotting import plotting_utils
from gewittergefahr.plotting import storm_plotting
from gewittergefahr.plotting import imagemagick_utils
TORNADIC_FLAG_COLUMN = 'is_tornadic'
SPECIAL_FLAG_COLUMN = 'is_main_tornadic_link'
POLYGON_COLUMN = 'polygon_object_xy_metres'
TORNADO_TIME_COLUMN = 'valid_time_unix_sec'
TORNADO_X_COLUMN = 'x_coord_metres'
TORNADO_Y_COLUMN = 'y_coord_metres'
SQUARE_X_COORDS = 2 * numpy.array([-1, -1, 1, 1, -1], dtype=float)
SQUARE_Y_COORDS = numpy.array([-1, 1, 1, -1, -1], dtype=float)
THIS_NUM = numpy.sqrt(3) / 2
HEXAGON_X_COORDS = 2 * numpy.array([1, 0.5, -0.5, -1, -0.5, 0.5, 1])
HEXAGON_Y_COORDS = numpy.array([
0, -THIS_NUM, -THIS_NUM, 0, THIS_NUM, THIS_NUM, 0
])
THIS_NUM = numpy.sqrt(2) / 2
OCTAGON_X_COORDS = 2 * numpy.array([
1, THIS_NUM, 0, -THIS_NUM, -1, -THIS_NUM, 0, THIS_NUM, 1
])
OCTAGON_Y_COORDS = numpy.array([
0, THIS_NUM, 1, THIS_NUM, 0, -THIS_NUM, -1, -THIS_NUM, 0
])
TRACK_COLOUR = numpy.full(3, 0.)
MIDPOINT_COLOUR = numpy.full(3, 152. / 255)
TORNADIC_STORM_COLOUR = numpy.array([117, 112, 179], dtype=float) / 255
NON_TORNADIC_STORM_COLOUR = numpy.array([27, 158, 119], dtype=float) / 255
NON_INTERP_COLOUR = numpy.array([217, 95, 2], dtype=float) / 255
INTERP_COLOUR = numpy.array([27, 158, 119], dtype=float) / 255
DEFAULT_FONT_SIZE = 40
SMALL_LEGEND_FONT_SIZE = 30
TEXT_OFFSET = 0.25
pyplot.rc('font', size=DEFAULT_FONT_SIZE)
pyplot.rc('axes', titlesize=DEFAULT_FONT_SIZE)
pyplot.rc('axes', labelsize=DEFAULT_FONT_SIZE)
pyplot.rc('xtick', labelsize=DEFAULT_FONT_SIZE)
pyplot.rc('ytick', labelsize=DEFAULT_FONT_SIZE)
pyplot.rc('legend', fontsize=DEFAULT_FONT_SIZE)
pyplot.rc('figure', titlesize=DEFAULT_FONT_SIZE)
TRACK_WIDTH = 4
POLYGON_OPACITY = 0.5
DEFAULT_MARKER_TYPE = 'o'
DEFAULT_MARKER_SIZE = 24
DEFAULT_MARKER_EDGE_WIDTH = 4
TORNADIC_STORM_MARKER_TYPE = 'v'
TORNADIC_STORM_MARKER_SIZE = 48
TORNADIC_STORM_MARKER_EDGE_WIDTH = 0
TORNADO_MARKER_TYPE = 'v'
TORNADO_MARKER_SIZE = 48
TORNADO_MARKER_EDGE_WIDTH = 0
FIGURE_WIDTH_INCHES = 15
FIGURE_HEIGHT_INCHES = 15
FIGURE_RESOLUTION_DPI = 300
CONCAT_FIGURE_SIZE_PX = int(1e7)
OUTPUT_DIR_NAME = (
'/localdata/ryan.lagerquist/eager/prediction_paper_2019/attribution_schemas'
)
def _get_data_for_interp_with_split():
"""Creates synthetic data for interpolation with storm split.
:return: storm_object_table: pandas DataFrame with the following columns.
Each row is one storm object.
storm_object_table.primary_id_string: Primary storm ID.
storm_object_table.secondary_id_string: Secondary storm ID.
storm_object_table.valid_time_unix_sec: Valid time.
storm_object_table.centroid_x_metres: x-coordinate of centroid.
storm_object_table.centroid_y_metres: y-coordinate of centroid.
storm_object_table.polygon_object_xy_metres: Storm outline (instance of
`shapely.geometry.Polygon`).
storm_object_table.first_prev_secondary_id_string: Secondary ID of first
predecessor ("" if no predecessors).
storm_object_table.second_prev_secondary_id_string: Secondary ID of second
predecessor ("" if only one predecessor).
storm_object_table.first_next_secondary_id_string: Secondary ID of first
successor ("" if no successors).
storm_object_table.second_next_secondary_id_string: Secondary ID of second
successor ("" if no successors).
:return: tornado_table: pandas DataFrame with the following columns.
tornado_table.valid_time_unix_sec: Valid time.
tornado_table.x_coord_metres: x-coordinate.
tornado_table.y_coord_metres: y-coordinate.
"""
primary_id_strings = ['foo'] * 5
secondary_id_strings = ['A', 'A', 'A', 'B', 'C']
valid_times_unix_sec = numpy.array([5, 10, 15, 20, 20], dtype=int)
centroid_x_coords = numpy.array([2, 7, 12, 17, 17], dtype=float)
centroid_y_coords = numpy.array([5, 5, 5, 8, 2], dtype=float)
first_prev_sec_id_strings = ['', 'A', 'A', 'A', 'A']
second_prev_sec_id_strings = ['', '', '', '', '']
first_next_sec_id_strings = ['A', 'A', 'B', '', '']
second_next_sec_id_strings = ['', '', 'C', '', '']
num_storm_objects = len(secondary_id_strings)
polygon_objects_xy = [None] * num_storm_objects
for i in range(num_storm_objects):
if secondary_id_strings[i] == 'B':
these_x_coords = OCTAGON_X_COORDS
these_y_coords = OCTAGON_Y_COORDS
elif secondary_id_strings[i] == 'C':
these_x_coords = HEXAGON_X_COORDS
these_y_coords = HEXAGON_Y_COORDS
else:
these_x_coords = SQUARE_X_COORDS
these_y_coords = SQUARE_Y_COORDS
polygon_objects_xy[i] = polygons.vertex_arrays_to_polygon_object(
exterior_x_coords=centroid_x_coords[i] + these_x_coords / 2,
exterior_y_coords=centroid_y_coords[i] + these_y_coords / 2
)
storm_object_table = pandas.DataFrame.from_dict({
tracking_utils.PRIMARY_ID_COLUMN: primary_id_strings,
tracking_utils.SECONDARY_ID_COLUMN: secondary_id_strings,
tracking_utils.VALID_TIME_COLUMN: valid_times_unix_sec,
tracking_utils.CENTROID_X_COLUMN: centroid_x_coords,
tracking_utils.CENTROID_Y_COLUMN: centroid_y_coords,
tracking_utils.FIRST_PREV_SECONDARY_ID_COLUMN:
first_prev_sec_id_strings,
tracking_utils.SECOND_PREV_SECONDARY_ID_COLUMN:
second_prev_sec_id_strings,
tracking_utils.FIRST_NEXT_SECONDARY_ID_COLUMN:
first_next_sec_id_strings,
tracking_utils.SECOND_NEXT_SECONDARY_ID_COLUMN:
second_next_sec_id_strings,
POLYGON_COLUMN: polygon_objects_xy
})
tornado_table = pandas.DataFrame.from_dict({
TORNADO_TIME_COLUMN: numpy.array([18], dtype=int),
TORNADO_X_COLUMN: numpy.array([15.]),
TORNADO_Y_COLUMN: numpy.array([3.2])
})
return storm_object_table, tornado_table
def _get_data_for_interp_with_merger():
"""Creates synthetic data for interpolation with storm merger.
:return: storm_object_table: See doc for `_get_data_for_interp_with_split`.
:return: tornado_table: Same.
"""
primary_id_strings = ['foo'] * 6
secondary_id_strings = ['A', 'B', 'A', 'B', 'C', 'C']
valid_times_unix_sec = numpy.array([5, 5, 10, 10, 15, 20], dtype=int)
centroid_x_coords = numpy.array([2, 2, 7, 7, 12, 17], dtype=float)
centroid_y_coords = numpy.array([8, 2, 8, 2, 5, 5], dtype=float)
first_prev_sec_id_strings = ['', '', 'A', 'B', 'A', 'C']
second_prev_sec_id_strings = ['', '', '', '', 'B', '']
first_next_sec_id_strings = ['A', 'B', 'C', 'C', 'C', '']
second_next_sec_id_strings = ['', '', '', '', '', '']
num_storm_objects = len(secondary_id_strings)
polygon_objects_xy = [None] * num_storm_objects
for i in range(num_storm_objects):
if secondary_id_strings[i] == 'A':
these_x_coords = OCTAGON_X_COORDS
these_y_coords = OCTAGON_Y_COORDS
elif secondary_id_strings[i] == 'B':
these_x_coords = HEXAGON_X_COORDS
these_y_coords = HEXAGON_Y_COORDS
else:
these_x_coords = SQUARE_X_COORDS
these_y_coords = SQUARE_Y_COORDS
polygon_objects_xy[i] = polygons.vertex_arrays_to_polygon_object(
exterior_x_coords=centroid_x_coords[i] + these_x_coords / 2,
exterior_y_coords=centroid_y_coords[i] + these_y_coords / 2
)
storm_object_table = pandas.DataFrame.from_dict({
tracking_utils.PRIMARY_ID_COLUMN: primary_id_strings,
tracking_utils.SECONDARY_ID_COLUMN: secondary_id_strings,
tracking_utils.VALID_TIME_COLUMN: valid_times_unix_sec,
tracking_utils.CENTROID_X_COLUMN: centroid_x_coords,
tracking_utils.CENTROID_Y_COLUMN: centroid_y_coords,
tracking_utils.FIRST_PREV_SECONDARY_ID_COLUMN:
first_prev_sec_id_strings,
tracking_utils.SECOND_PREV_SECONDARY_ID_COLUMN:
second_prev_sec_id_strings,
tracking_utils.FIRST_NEXT_SECONDARY_ID_COLUMN:
first_next_sec_id_strings,
tracking_utils.SECOND_NEXT_SECONDARY_ID_COLUMN:
second_next_sec_id_strings,
POLYGON_COLUMN: polygon_objects_xy
})
tornado_table = pandas.DataFrame.from_dict({
TORNADO_TIME_COLUMN: numpy.array([12], dtype=int),
TORNADO_X_COLUMN: numpy.array([9.]),
TORNADO_Y_COLUMN: numpy.array([3.2])
})
return storm_object_table, tornado_table
def _get_track1_for_simple_pred():
"""Creates synthetic data for simple predecessors.
:return: storm_object_table: Same as table produced by
`_get_data_for_interp_with_split`, except without column
"polygon_object_xy_metres" and with the following extra columns.
storm_object_table.is_tornadic: Boolean flag (True if storm object is
linked to a tornado).
storm_object_table.is_main_tornadic_link: Boolean flag (True if storm object
is the main one linked to a tornado, rather than being linked to tornado
as a predecessor or successor).
"""
primary_id_strings = ['foo'] * 10
secondary_id_strings = ['X', 'Y', 'X', 'Y', 'X', 'Y', 'Z', 'Z', 'Z', 'Z']
valid_times_unix_sec = numpy.array(
[5, 5, 10, 10, 15, 15, 20, 25, 30, 35], dtype=int
)
centroid_x_coords = numpy.array(
[2, 2, 7, 7, 12, 12, 17, 22, 27, 32], dtype=float
)
centroid_y_coords = numpy.array(
[8, 2, 8, 2, 8, 2, 5, 5, 5, 5], dtype=float
)
tornadic_flags = numpy.array([0, 0, 0, 0, 0, 0, 1, 1, 1, 0], dtype=bool)
main_tornadic_flags = numpy.array(
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0], dtype=bool
)
first_prev_sec_id_strings = ['', '', 'X', 'Y', 'X', 'Y', 'X', 'Z', 'Z', 'Z']
second_prev_sec_id_strings = ['', '', '', '', '', '', 'Y', '', '', '']
first_next_sec_id_strings = [
'X', 'Y', 'X', 'Y', 'Z', 'Z', 'Z', 'Z', 'Z', ''
]
second_next_sec_id_strings = ['', '', '', '', '', '', '', '', '', '']
return pandas.DataFrame.from_dict({
tracking_utils.PRIMARY_ID_COLUMN: primary_id_strings,
tracking_utils.SECONDARY_ID_COLUMN: secondary_id_strings,
tracking_utils.VALID_TIME_COLUMN: valid_times_unix_sec,
tracking_utils.CENTROID_X_COLUMN: centroid_x_coords,
tracking_utils.CENTROID_Y_COLUMN: centroid_y_coords,
TORNADIC_FLAG_COLUMN: tornadic_flags,
SPECIAL_FLAG_COLUMN: main_tornadic_flags,
tracking_utils.FIRST_PREV_SECONDARY_ID_COLUMN:
first_prev_sec_id_strings,
tracking_utils.SECOND_PREV_SECONDARY_ID_COLUMN:
second_prev_sec_id_strings,
tracking_utils.FIRST_NEXT_SECONDARY_ID_COLUMN:
first_next_sec_id_strings,
tracking_utils.SECOND_NEXT_SECONDARY_ID_COLUMN:
second_next_sec_id_strings
})
def _get_track2_for_simple_pred():
"""Creates synthetic data for simple predecessors.
:return: storm_object_table: See doc for `_get_track1_for_simple_pred`.
"""
primary_id_strings = ['bar'] * 17
secondary_id_strings = [
'A', 'A', 'A', 'B', 'C', 'B', 'C', 'B', 'C',
'D', 'E', 'D', 'E', 'D', 'E', 'D', 'E'
]
valid_times_unix_sec = numpy.array(
[5, 10, 15, 20, 20, 25, 25, 30, 30, 35, 35, 40, 40, 45, 45, 50, 50],
dtype=int
)
centroid_x_coords = numpy.array(
[2, 6, 10, 14, 14, 18, 18, 22, 22, 26, 26, 30, 30, 34, 34, 38, 38],
dtype=float
)
centroid_y_coords = numpy.array(
[10, 10, 10, 13, 7, 13, 7, 13, 7, 10, 4, 10, 4, 10, 4, 10, 4],
dtype=float
)
tornadic_flags = numpy.array(
[0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0], dtype=bool
)
main_tornadic_flags = numpy.array(
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], dtype=bool
)
first_prev_sec_id_strings = [
'', 'A', 'A', 'A', 'A', 'B', 'C', 'B', 'C',
'C', 'C', 'D', 'E', 'D', 'E', 'D', 'E'
]
second_prev_sec_id_strings = [
'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', ''
]
first_next_sec_id_strings = [
'A', 'A', 'B', 'B', 'C', 'B', 'C', '', 'D',
'D', 'E', 'D', 'E', 'D', 'E', '', ''
]
second_next_sec_id_strings = [
'', '', 'C', '', '', '', '', '', 'E',
'', '', '', '', '', '', '', ''
]
return pandas.DataFrame.from_dict({
tracking_utils.PRIMARY_ID_COLUMN: primary_id_strings,
tracking_utils.SECONDARY_ID_COLUMN: secondary_id_strings,
tracking_utils.VALID_TIME_COLUMN: valid_times_unix_sec,
tracking_utils.CENTROID_X_COLUMN: centroid_x_coords,
tracking_utils.CENTROID_Y_COLUMN: centroid_y_coords,
TORNADIC_FLAG_COLUMN: tornadic_flags,
SPECIAL_FLAG_COLUMN: main_tornadic_flags,
tracking_utils.FIRST_PREV_SECONDARY_ID_COLUMN:
first_prev_sec_id_strings,
tracking_utils.SECOND_PREV_SECONDARY_ID_COLUMN:
second_prev_sec_id_strings,
tracking_utils.FIRST_NEXT_SECONDARY_ID_COLUMN:
first_next_sec_id_strings,
tracking_utils.SECOND_NEXT_SECONDARY_ID_COLUMN:
second_next_sec_id_strings
})
def _get_track_for_simple_succ():
"""Creates synthetic data for simple successors.
:return: storm_object_table: See doc for `_get_track1_for_simple_pred`.
"""
primary_id_strings = ['moo'] * 21
secondary_id_strings = [
'A', 'B', 'A', 'B', 'A', 'B', 'A', 'B', 'C', 'D', 'C', 'D', 'C', 'D',
'E', 'E', 'E', 'F', 'G', 'F', 'G'
]
valid_times_unix_sec = numpy.array([
5, 5, 10, 10, 15, 15, 20, 20, 25, 25, 30, 30, 35, 35, 40, 45, 50, 55,
55, 60, 60
], dtype=int)
centroid_x_coords = numpy.array([
5, 5, 10, 10, 15, 15, 20, 20, 25, 25, 30, 30, 35, 35, 40, 45, 50, 55,
55, 60, 60
], dtype=float)
centroid_y_coords = numpy.array(
[8, 2, 8, 2, 8, 2, 8, 2, 11, 5, 11, 5, 11, 5, 8, 8, 8, 11, 5, 11, 5],
dtype=float
)
tornadic_flags = numpy.array(
[0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0],
dtype=bool
)
main_tornadic_flags = numpy.array(
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
dtype=bool
)
first_prev_sec_id_strings = [
'', '', 'A', 'B', 'A', 'B', 'A', 'B',
'', 'A', 'C', 'D', 'C', 'D',
'C', 'E', 'E',
'E', 'E', 'F', 'G'
]
second_prev_sec_id_strings = [
'', '', '', '', '', '', '', '',
'', 'B', '', '', '', '',
'D', '', '',
'', '', '', ''
]
first_next_sec_id_strings = [
'A', 'B', 'A', 'B', 'A', 'B', 'D', 'D',
'C', 'D', 'C', 'D', 'E', 'E',
'E', 'E', 'F',
'F', 'G', '', ''
]
second_next_sec_id_strings = [
'', '', '', '', '', '', '', '',
'', '', '', '', '', '',
'', '', 'G',
'', '', '', ''
]
return pandas.DataFrame.from_dict({
tracking_utils.PRIMARY_ID_COLUMN: primary_id_strings,
tracking_utils.SECONDARY_ID_COLUMN: secondary_id_strings,
tracking_utils.VALID_TIME_COLUMN: valid_times_unix_sec,
tracking_utils.CENTROID_X_COLUMN: centroid_x_coords,
tracking_utils.CENTROID_Y_COLUMN: centroid_y_coords,
TORNADIC_FLAG_COLUMN: tornadic_flags,
SPECIAL_FLAG_COLUMN: main_tornadic_flags,
tracking_utils.FIRST_PREV_SECONDARY_ID_COLUMN:
first_prev_sec_id_strings,
tracking_utils.SECOND_PREV_SECONDARY_ID_COLUMN:
second_prev_sec_id_strings,
tracking_utils.FIRST_NEXT_SECONDARY_ID_COLUMN:
first_next_sec_id_strings,
tracking_utils.SECOND_NEXT_SECONDARY_ID_COLUMN:
second_next_sec_id_strings
})
def _plot_interp_two_times(storm_object_table, tornado_table, legend_font_size,
legend_position_string):
"""Plots interpolation for one pair of times.
:param storm_object_table: See doc for `_get_interp_data_for_split`.
:param tornado_table: Same.
:param legend_font_size: Font size in legend.
:param legend_position_string: Legend position.
:return: figure_object: Figure handle (instance of
`matplotlib.figure.Figure`).
:return: axes_object: Axes handle (instance of
`matplotlib.axes._subplots.AxesSubplot`).
"""
centroid_x_coords = (
storm_object_table[tracking_utils.CENTROID_X_COLUMN].values
)
centroid_y_coords = (
storm_object_table[tracking_utils.CENTROID_Y_COLUMN].values
)
storm_times_minutes = (
storm_object_table[tracking_utils.VALID_TIME_COLUMN].values
).astype(float)
secondary_id_strings = (
storm_object_table[tracking_utils.SECONDARY_ID_COLUMN].values
)
storm_object_table = storm_object_table.assign(**{
tracking_utils.CENTROID_LONGITUDE_COLUMN: centroid_x_coords,
tracking_utils.CENTROID_LATITUDE_COLUMN: centroid_y_coords
})
figure_object, axes_object, basemap_object = (
plotting_utils.create_equidist_cylindrical_map(
min_latitude_deg=numpy.min(centroid_y_coords),
max_latitude_deg=numpy.max(centroid_y_coords),
min_longitude_deg=numpy.min(centroid_x_coords),
max_longitude_deg=numpy.max(centroid_x_coords)
)
)
storm_plotting.plot_storm_tracks(
storm_object_table=storm_object_table, axes_object=axes_object,
basemap_object=basemap_object, colour_map_object=None,
constant_colour=TRACK_COLOUR, line_width=TRACK_WIDTH,
start_marker_type=None, end_marker_type=None)
num_storm_objects = len(storm_object_table.index)
legend_handles = []
legend_strings = []
for i in range(num_storm_objects):
this_patch_object = PolygonPatch(
storm_object_table[POLYGON_COLUMN].values[i],
lw=0, ec=NON_INTERP_COLOUR, fc=NON_INTERP_COLOUR,
alpha=POLYGON_OPACITY)
axes_object.add_patch(this_patch_object)
this_handle = axes_object.plot(
storm_object_table[tracking_utils.CENTROID_X_COLUMN].values,
storm_object_table[tracking_utils.CENTROID_Y_COLUMN].values,
linestyle='None', marker=DEFAULT_MARKER_TYPE,
markersize=DEFAULT_MARKER_SIZE, markerfacecolor=NON_INTERP_COLOUR,
markeredgecolor=NON_INTERP_COLOUR,
markeredgewidth=DEFAULT_MARKER_EDGE_WIDTH
)[0]
legend_handles.append(this_handle)
legend_strings.append('Actual storm')
for i in range(num_storm_objects):
axes_object.text(
centroid_x_coords[i], centroid_y_coords[i] - TEXT_OFFSET,
secondary_id_strings[i], color=TRACK_COLOUR,
fontsize=DEFAULT_FONT_SIZE, fontweight='bold',
horizontalalignment='center', verticalalignment='top')
tornado_time_minutes = tornado_table[TORNADO_TIME_COLUMN].values[0]
previous_time_minutes = numpy.max(
storm_times_minutes[storm_times_minutes < tornado_time_minutes]
)
next_time_minutes = numpy.min(
storm_times_minutes[storm_times_minutes > tornado_time_minutes]
)
previous_object_indices = numpy.where(
storm_times_minutes == previous_time_minutes
)[0]
next_object_indices = numpy.where(
storm_times_minutes == next_time_minutes
)[0]
previous_x_coord = numpy.mean(centroid_x_coords[previous_object_indices])
previous_y_coord = numpy.mean(centroid_y_coords[previous_object_indices])
next_x_coord = numpy.mean(centroid_x_coords[next_object_indices])
next_y_coord = numpy.mean(centroid_y_coords[next_object_indices])
if len(next_object_indices) == 1:
midpoint_x_coord = previous_x_coord
midpoint_y_coord = previous_y_coord
midpoint_label_string = 'Midpoint of {0:s} and {1:s}'.format(
secondary_id_strings[previous_object_indices[0]],
secondary_id_strings[previous_object_indices[1]]
)
line_x_coords = numpy.array([midpoint_x_coord, next_x_coord])
line_y_coords = numpy.array([midpoint_y_coord, next_y_coord])
else:
midpoint_x_coord = next_x_coord
midpoint_y_coord = next_y_coord
midpoint_label_string = 'Midpoint of {0:s} and {1:s}'.format(
secondary_id_strings[next_object_indices[0]],
secondary_id_strings[next_object_indices[1]]
)
line_x_coords = numpy.array([previous_x_coord, midpoint_x_coord])
line_y_coords = numpy.array([previous_y_coord, midpoint_y_coord])
this_handle = axes_object.plot(
midpoint_x_coord, midpoint_y_coord, linestyle='None',
marker=DEFAULT_MARKER_TYPE, markersize=DEFAULT_MARKER_SIZE,
markerfacecolor=MIDPOINT_COLOUR, markeredgecolor=MIDPOINT_COLOUR,
markeredgewidth=DEFAULT_MARKER_EDGE_WIDTH
)[0]
legend_handles.append(this_handle)
legend_strings.append(midpoint_label_string)
this_ratio = (
(tornado_time_minutes - previous_time_minutes) /
(next_time_minutes - previous_time_minutes)
)
interp_x_coord = previous_x_coord + (
this_ratio * (next_x_coord - previous_x_coord)
)
interp_y_coord = previous_y_coord + (
this_ratio * (next_y_coord - previous_y_coord)
)
if len(next_object_indices) == 1:
x_offset = interp_x_coord - next_x_coord
y_offset = interp_y_coord - next_y_coord
interp_polygon_object_xy = storm_object_table[POLYGON_COLUMN].values[
next_object_indices[0]
]
else:
x_offset = interp_x_coord - previous_x_coord
y_offset = interp_y_coord - previous_y_coord
interp_polygon_object_xy = storm_object_table[POLYGON_COLUMN].values[
previous_object_indices[0]
]
interp_polygon_object_xy = polygons.vertex_arrays_to_polygon_object(
exterior_x_coords=(
x_offset + numpy.array(interp_polygon_object_xy.exterior.xy[0])
),
exterior_y_coords=(
y_offset + numpy.array(interp_polygon_object_xy.exterior.xy[1])
)
)
this_patch_object = PolygonPatch(
interp_polygon_object_xy, lw=0, ec=INTERP_COLOUR, fc=INTERP_COLOUR,
alpha=POLYGON_OPACITY)
axes_object.add_patch(this_patch_object)
this_handle = axes_object.plot(
interp_x_coord, interp_y_coord, linestyle='None',
marker=DEFAULT_MARKER_TYPE, markersize=DEFAULT_MARKER_SIZE,
markerfacecolor=INTERP_COLOUR, markeredgecolor=INTERP_COLOUR,
markeredgewidth=DEFAULT_MARKER_EDGE_WIDTH
)[0]
legend_handles.append(this_handle)
legend_strings.append('Interpolated storm')
this_handle = axes_object.plot(
line_x_coords, line_y_coords,
linestyle='dashed', color=MIDPOINT_COLOUR, linewidth=4
)[0]
legend_handles.insert(-1, this_handle)
legend_strings.insert(-1, 'Interpolation line')
this_handle = axes_object.plot(
tornado_table[TORNADO_X_COLUMN].values[0],
tornado_table[TORNADO_Y_COLUMN].values[0], linestyle='None',
marker=TORNADO_MARKER_TYPE, markersize=TORNADO_MARKER_SIZE,
markerfacecolor=INTERP_COLOUR, markeredgecolor=INTERP_COLOUR,
markeredgewidth=TORNADO_MARKER_EDGE_WIDTH
)[0]
legend_handles.insert(1, this_handle)
this_string = 'Tornado (at {0:d} min)'.format(
int(numpy.round(tornado_time_minutes))
)
legend_strings.insert(1, this_string)
x_tick_values, unique_indices = numpy.unique(
centroid_x_coords, return_index=True)
x_tick_labels = [
'{0:d}'.format(int(numpy.round(storm_times_minutes[i])))
for i in unique_indices
]
axes_object.set_xticks(x_tick_values)
axes_object.set_xticklabels(x_tick_labels)
axes_object.set_xlabel('Storm time (minutes)')
axes_object.set_yticks([], [])
axes_object.legend(
legend_handles, legend_strings, fontsize=legend_font_size,
loc=legend_position_string)
return figure_object, axes_object
def _plot_attribution_one_track(storm_object_table, plot_legend, plot_x_ticks,
legend_font_size=None, legend_location=None):
"""Plots tornado attribution for one storm track.
:param storm_object_table: pandas DataFrame created by
`_get_track1_for_simple_pred`, `_get_track2_for_simple_pred`, or
`_get_track_for_simple_succ`.
:param plot_legend: Boolean flag.
:param plot_x_ticks: Boolean flag.
:param legend_font_size: Font size in legend (used only if
`plot_legend == True`).
:param legend_location: Legend location (used only if
`plot_legend == True`).
:return: figure_object: See doc for `_plot_interp_two_times`.
:return: axes_object: Same.
"""
centroid_x_coords = storm_object_table[
tracking_utils.CENTROID_X_COLUMN].values
centroid_y_coords = storm_object_table[
tracking_utils.CENTROID_Y_COLUMN].values
secondary_id_strings = storm_object_table[
tracking_utils.SECONDARY_ID_COLUMN].values
storm_object_table = storm_object_table.assign(**{
tracking_utils.CENTROID_LONGITUDE_COLUMN: centroid_x_coords,
tracking_utils.CENTROID_LATITUDE_COLUMN: centroid_y_coords
})
figure_object, axes_object, basemap_object = (
plotting_utils.create_equidist_cylindrical_map(
min_latitude_deg=numpy.min(centroid_y_coords),
max_latitude_deg=numpy.max(centroid_y_coords),
min_longitude_deg=numpy.min(centroid_x_coords),
max_longitude_deg=numpy.max(centroid_x_coords)
)
)
storm_plotting.plot_storm_tracks(
storm_object_table=storm_object_table, axes_object=axes_object,
basemap_object=basemap_object, colour_map_object=None,
constant_colour=TRACK_COLOUR, line_width=TRACK_WIDTH,
start_marker_type=None, end_marker_type=None)
tornadic_flags = storm_object_table[TORNADIC_FLAG_COLUMN].values
main_tornadic_flags = storm_object_table[SPECIAL_FLAG_COLUMN].values
legend_handles = [None] * 3
legend_strings = [None] * 3
for i in range(len(centroid_x_coords)):
if main_tornadic_flags[i]:
this_handle = axes_object.plot(
centroid_x_coords[i], centroid_y_coords[i], linestyle='None',
marker=TORNADIC_STORM_MARKER_TYPE,
markersize=TORNADIC_STORM_MARKER_SIZE,
markerfacecolor=TORNADIC_STORM_COLOUR,
markeredgecolor=TORNADIC_STORM_COLOUR,
markeredgewidth=TORNADIC_STORM_MARKER_EDGE_WIDTH
)[0]
legend_handles[0] = this_handle
legend_strings[0] = 'Object initially linked\nto tornado'
axes_object.text(
centroid_x_coords[i], centroid_y_coords[i] - TEXT_OFFSET,
secondary_id_strings[i], color=TORNADIC_STORM_COLOUR,
fontsize=DEFAULT_FONT_SIZE, fontweight='bold',
horizontalalignment='center', verticalalignment='top')
else:
if tornadic_flags[i]:
this_edge_colour = TORNADIC_STORM_COLOUR
this_face_colour = TORNADIC_STORM_COLOUR
else:
this_edge_colour = NON_TORNADIC_STORM_COLOUR
this_face_colour = 'white'
this_handle = axes_object.plot(
centroid_x_coords[i], centroid_y_coords[i], linestyle='None',
marker=DEFAULT_MARKER_TYPE, markersize=DEFAULT_MARKER_SIZE,
markerfacecolor=this_face_colour,
markeredgecolor=this_edge_colour,
markeredgewidth=DEFAULT_MARKER_EDGE_WIDTH
)[0]
if tornadic_flags[i] and legend_handles[1] is None:
legend_handles[1] = this_handle
legend_strings[1] = 'Also linked to tornado'
if not tornadic_flags[i] and legend_handles[2] is None:
legend_handles[2] = this_handle
legend_strings[2] = 'Not linked to tornado'
axes_object.text(
centroid_x_coords[i], centroid_y_coords[i] - TEXT_OFFSET,
secondary_id_strings[i], color=this_edge_colour,
fontsize=DEFAULT_FONT_SIZE, fontweight='bold',
horizontalalignment='center', verticalalignment='top')
if plot_x_ticks:
storm_times_minutes = storm_object_table[
tracking_utils.VALID_TIME_COLUMN].values
x_tick_values, unique_indices = numpy.unique(
centroid_x_coords, return_index=True)
x_tick_labels = [
'{0:d}'.format(int(numpy.round(storm_times_minutes[i])))
for i in unique_indices
]
axes_object.set_xticks(x_tick_values)
axes_object.set_xticklabels(x_tick_labels)
axes_object.set_xlabel('Storm time (minutes)')
else:
axes_object.set_xticks([], [])
axes_object.set_xlabel(r'Time $\longrightarrow$')
axes_object.set_yticks([], [])
y_min, y_max = axes_object.get_ylim()
axes_object.set_ylim([y_min - 0.25, y_max])
if plot_legend:
axes_object.legend(
legend_handles, legend_strings, fontsize=legend_font_size,
loc=legend_location)
return figure_object, axes_object
def _run():
"""Makes event-attribution schematics for 2019 tornado-prediction paper.
This is effectively the main method.
"""
file_system_utils.mkdir_recursive_if_necessary(
directory_name=OUTPUT_DIR_NAME)
# Interpolation with merger.
figure_object, axes_object = _plot_interp_two_times(
storm_object_table=_get_data_for_interp_with_merger()[0],
tornado_table=_get_data_for_interp_with_merger()[1],
legend_font_size=SMALL_LEGEND_FONT_SIZE, legend_position_string='upper right'
)
axes_object.set_title('Interpolation with merger')
this_file_name = '{0:s}/interp_with_merger_standalone.jpg'.format(
OUTPUT_DIR_NAME)
print('Saving figure to: "{0:s}"...'.format(this_file_name))
figure_object.savefig(
this_file_name, dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,
bbox_inches='tight'
)
plotting_utils.label_axes(axes_object=axes_object, label_string='(a)')
panel_file_names = ['{0:s}/interp_with_merger.jpg'.format(OUTPUT_DIR_NAME)]
print('Saving figure to: "{0:s}"...'.format(panel_file_names[-1]))
figure_object.savefig(
panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,
bbox_inches='tight'
)
pyplot.close(figure_object)
# Interpolation with split.
figure_object, axes_object = _plot_interp_two_times(
storm_object_table=_get_data_for_interp_with_split()[0],
tornado_table=_get_data_for_interp_with_split()[1],
legend_font_size=DEFAULT_FONT_SIZE,
legend_position_string='upper left'
)
axes_object.set_title('Interpolation with split')
this_file_name = '{0:s}/interp_with_split_standalone.jpg'.format(
OUTPUT_DIR_NAME)
print('Saving figure to: "{0:s}"...'.format(this_file_name))
figure_object.savefig(
this_file_name, dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,
bbox_inches='tight'
)
plotting_utils.label_axes(axes_object=axes_object, label_string='(b)')
panel_file_names.append(
'{0:s}/interp_with_split.jpg'.format(OUTPUT_DIR_NAME)
)
print('Saving figure to: "{0:s}"...'.format(panel_file_names[-1]))
figure_object.savefig(
panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,
bbox_inches='tight'
)
pyplot.close(figure_object)
# Simple successors.
figure_object, axes_object = _plot_attribution_one_track(
storm_object_table=_get_track_for_simple_succ(),
plot_legend=True, plot_x_ticks=True,
legend_font_size=SMALL_LEGEND_FONT_SIZE, legend_location='lower right'
)
this_file_name = '{0:s}/simple_successors_standalone.jpg'.format(
OUTPUT_DIR_NAME)
print('Saving figure to: "{0:s}"...'.format(this_file_name))
figure_object.savefig(
this_file_name, dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,
bbox_inches='tight'
)
plotting_utils.label_axes(axes_object=axes_object, label_string='(c)')
axes_object.set_title('Linking to simple successors')
panel_file_names.append(
'{0:s}/simple_successors.jpg'.format(OUTPUT_DIR_NAME)
)
print('Saving figure to: "{0:s}"...'.format(panel_file_names[-1]))
figure_object.savefig(
panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,
bbox_inches='tight'
)
pyplot.close(figure_object)
# Simple predecessors, example 1.
figure_object, axes_object = _plot_attribution_one_track(
storm_object_table=_get_track1_for_simple_pred(),
plot_legend=True, plot_x_ticks=False,
legend_font_size=DEFAULT_FONT_SIZE, legend_location=(0.28, 0.1)
)
axes_object.set_title('Simple predecessors, example 1')
this_file_name = '{0:s}/simple_predecessors_track1_standalone.jpg'.format(
OUTPUT_DIR_NAME)
print('Saving figure to: "{0:s}"...'.format(this_file_name))
figure_object.savefig(
this_file_name, dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,
bbox_inches='tight'
)
plotting_utils.label_axes(axes_object=axes_object, label_string='(d)')
axes_object.set_title('Linking to simple predecessors, example 1')
panel_file_names.append(
'{0:s}/simple_predecessors_track1.jpg'.format(OUTPUT_DIR_NAME)
)
print('Saving figure to: "{0:s}"...'.format(panel_file_names[-1]))
figure_object.savefig(
panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,
bbox_inches='tight'
)
pyplot.close(figure_object)
# Simple predecessors, example 2.
figure_object, axes_object = _plot_attribution_one_track(
storm_object_table=_get_track2_for_simple_pred(),
plot_legend=False, plot_x_ticks=False
)
axes_object.set_title('Simple predecessors, example 2')
this_file_name = '{0:s}/simple_predecessors_track2_standalone.jpg'.format(
OUTPUT_DIR_NAME)
print('Saving figure to: "{0:s}"...'.format(this_file_name))
figure_object.savefig(
this_file_name, dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,
bbox_inches='tight'
)
plotting_utils.label_axes(axes_object=axes_object, label_string='(e)')
axes_object.set_title('Linking to simple predecessors, example 2')
panel_file_names.append(
'{0:s}/simple_predecessors_track2.jpg'.format(OUTPUT_DIR_NAME)
)
print('Saving figure to: "{0:s}"...'.format(panel_file_names[-1]))
figure_object.savefig(
panel_file_names[-1], dpi=FIGURE_RESOLUTION_DPI, pad_inches=0,
bbox_inches='tight'
)
pyplot.close(figure_object)
# Concatenate all panels into one figure.
concat_file_name = '{0:s}/attribution_schemas.jpg'.format(OUTPUT_DIR_NAME)
print('Concatenating panels to: "{0:s}"...'.format(concat_file_name))
imagemagick_utils.concatenate_images(
input_file_names=panel_file_names, output_file_name=concat_file_name,
num_panel_rows=2, num_panel_columns=3)
imagemagick_utils.resize_image(
input_file_name=concat_file_name, output_file_name=concat_file_name,
output_size_pixels=CONCAT_FIGURE_SIZE_PX)
if __name__ == '__main__':
_run()
| [
"numpy.sqrt",
"numpy.array",
"numpy.mean",
"numpy.where",
"pandas.DataFrame.from_dict",
"numpy.max",
"matplotlib.pyplot.close",
"gewittergefahr.plotting.imagemagick_utils.concatenate_images",
"numpy.min",
"gewittergefahr.gg_utils.polygons.vertex_arrays_to_polygon_object",
"numpy.round",
"matpl... | [((122, 143), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (136, 143), False, 'import matplotlib\n'), ((875, 919), 'numpy.array', 'numpy.array', (['[-1, 1, 1, -1, -1]'], {'dtype': 'float'}), '([-1, 1, 1, -1, -1], dtype=float)\n', (886, 919), False, 'import numpy\n'), ((1038, 1102), 'numpy.array', 'numpy.array', (['[0, -THIS_NUM, -THIS_NUM, 0, THIS_NUM, THIS_NUM, 0]'], {}), '([0, -THIS_NUM, -THIS_NUM, 0, THIS_NUM, THIS_NUM, 0])\n', (1049, 1102), False, 'import numpy\n'), ((1259, 1330), 'numpy.array', 'numpy.array', (['[0, THIS_NUM, 1, THIS_NUM, 0, -THIS_NUM, -1, -THIS_NUM, 0]'], {}), '([0, THIS_NUM, 1, THIS_NUM, 0, -THIS_NUM, -1, -THIS_NUM, 0])\n', (1270, 1330), False, 'import numpy\n'), ((1353, 1371), 'numpy.full', 'numpy.full', (['(3)', '(0.0)'], {}), '(3, 0.0)\n', (1363, 1371), False, 'import numpy\n'), ((1389, 1415), 'numpy.full', 'numpy.full', (['(3)', '(152.0 / 255)'], {}), '(3, 152.0 / 255)\n', (1399, 1415), False, 'import numpy\n'), ((1762, 1803), 'matplotlib.pyplot.rc', 'pyplot.rc', (['"""font"""'], {'size': 'DEFAULT_FONT_SIZE'}), "('font', size=DEFAULT_FONT_SIZE)\n", (1771, 1803), True, 'import matplotlib.pyplot as pyplot\n'), ((1804, 1850), 'matplotlib.pyplot.rc', 'pyplot.rc', (['"""axes"""'], {'titlesize': 'DEFAULT_FONT_SIZE'}), "('axes', titlesize=DEFAULT_FONT_SIZE)\n", (1813, 1850), True, 'import matplotlib.pyplot as pyplot\n'), ((1851, 1897), 'matplotlib.pyplot.rc', 'pyplot.rc', (['"""axes"""'], {'labelsize': 'DEFAULT_FONT_SIZE'}), "('axes', labelsize=DEFAULT_FONT_SIZE)\n", (1860, 1897), True, 'import matplotlib.pyplot as pyplot\n'), ((1898, 1945), 'matplotlib.pyplot.rc', 'pyplot.rc', (['"""xtick"""'], {'labelsize': 'DEFAULT_FONT_SIZE'}), "('xtick', labelsize=DEFAULT_FONT_SIZE)\n", (1907, 1945), True, 'import matplotlib.pyplot as pyplot\n'), ((1946, 1993), 'matplotlib.pyplot.rc', 'pyplot.rc', (['"""ytick"""'], {'labelsize': 'DEFAULT_FONT_SIZE'}), "('ytick', labelsize=DEFAULT_FONT_SIZE)\n", (1955, 1993), True, 'import matplotlib.pyplot as pyplot\n'), ((1994, 2041), 'matplotlib.pyplot.rc', 'pyplot.rc', (['"""legend"""'], {'fontsize': 'DEFAULT_FONT_SIZE'}), "('legend', fontsize=DEFAULT_FONT_SIZE)\n", (2003, 2041), True, 'import matplotlib.pyplot as pyplot\n'), ((2042, 2090), 'matplotlib.pyplot.rc', 'pyplot.rc', (['"""figure"""'], {'titlesize': 'DEFAULT_FONT_SIZE'}), "('figure', titlesize=DEFAULT_FONT_SIZE)\n", (2051, 2090), True, 'import matplotlib.pyplot as pyplot\n'), ((812, 856), 'numpy.array', 'numpy.array', (['[-1, -1, 1, 1, -1]'], {'dtype': 'float'}), '([-1, -1, 1, 1, -1], dtype=float)\n', (823, 856), False, 'import numpy\n'), ((932, 945), 'numpy.sqrt', 'numpy.sqrt', (['(3)'], {}), '(3)\n', (942, 945), False, 'import numpy\n'), ((973, 1018), 'numpy.array', 'numpy.array', (['[1, 0.5, -0.5, -1, -0.5, 0.5, 1]'], {}), '([1, 0.5, -0.5, -1, -0.5, 0.5, 1])\n', (984, 1018), False, 'import numpy\n'), ((1121, 1134), 'numpy.sqrt', 'numpy.sqrt', (['(2)'], {}), '(2)\n', (1131, 1134), False, 'import numpy\n'), ((1162, 1233), 'numpy.array', 'numpy.array', (['[1, THIS_NUM, 0, -THIS_NUM, -1, -THIS_NUM, 0, THIS_NUM, 1]'], {}), '([1, THIS_NUM, 0, -THIS_NUM, -1, -THIS_NUM, 0, THIS_NUM, 1])\n', (1173, 1233), False, 'import numpy\n'), ((1439, 1480), 'numpy.array', 'numpy.array', (['[117, 112, 179]'], {'dtype': 'float'}), '([117, 112, 179], dtype=float)\n', (1450, 1480), False, 'import numpy\n'), ((1515, 1555), 'numpy.array', 'numpy.array', (['[27, 158, 119]'], {'dtype': 'float'}), '([27, 158, 119], dtype=float)\n', (1526, 1555), False, 'import numpy\n'), ((1582, 1620), 'numpy.array', 'numpy.array', (['[217, 95, 2]'], {'dtype': 'float'}), '([217, 95, 2], dtype=float)\n', (1593, 1620), False, 'import numpy\n'), ((1643, 1683), 'numpy.array', 'numpy.array', (['[27, 158, 119]'], {'dtype': 'float'}), '([27, 158, 119], dtype=float)\n', (1654, 1683), False, 'import numpy\n'), ((4101, 4144), 'numpy.array', 'numpy.array', (['[5, 10, 15, 20, 20]'], {'dtype': 'int'}), '([5, 10, 15, 20, 20], dtype=int)\n', (4112, 4144), False, 'import numpy\n'), ((4169, 4213), 'numpy.array', 'numpy.array', (['[2, 7, 12, 17, 17]'], {'dtype': 'float'}), '([2, 7, 12, 17, 17], dtype=float)\n', (4180, 4213), False, 'import numpy\n'), ((4238, 4279), 'numpy.array', 'numpy.array', (['[5, 5, 5, 8, 2]'], {'dtype': 'float'}), '([5, 5, 5, 8, 2], dtype=float)\n', (4249, 4279), False, 'import numpy\n'), ((5278, 5962), 'pandas.DataFrame.from_dict', 'pandas.DataFrame.from_dict', (['{tracking_utils.PRIMARY_ID_COLUMN: primary_id_strings, tracking_utils.\n SECONDARY_ID_COLUMN: secondary_id_strings, tracking_utils.\n VALID_TIME_COLUMN: valid_times_unix_sec, tracking_utils.\n CENTROID_X_COLUMN: centroid_x_coords, tracking_utils.CENTROID_Y_COLUMN:\n centroid_y_coords, tracking_utils.FIRST_PREV_SECONDARY_ID_COLUMN:\n first_prev_sec_id_strings, tracking_utils.\n SECOND_PREV_SECONDARY_ID_COLUMN: second_prev_sec_id_strings,\n tracking_utils.FIRST_NEXT_SECONDARY_ID_COLUMN:\n first_next_sec_id_strings, tracking_utils.\n SECOND_NEXT_SECONDARY_ID_COLUMN: second_next_sec_id_strings,\n POLYGON_COLUMN: polygon_objects_xy}'], {}), '({tracking_utils.PRIMARY_ID_COLUMN:\n primary_id_strings, tracking_utils.SECONDARY_ID_COLUMN:\n secondary_id_strings, tracking_utils.VALID_TIME_COLUMN:\n valid_times_unix_sec, tracking_utils.CENTROID_X_COLUMN:\n centroid_x_coords, tracking_utils.CENTROID_Y_COLUMN: centroid_y_coords,\n tracking_utils.FIRST_PREV_SECONDARY_ID_COLUMN:\n first_prev_sec_id_strings, tracking_utils.\n SECOND_PREV_SECONDARY_ID_COLUMN: second_prev_sec_id_strings,\n tracking_utils.FIRST_NEXT_SECONDARY_ID_COLUMN:\n first_next_sec_id_strings, tracking_utils.\n SECOND_NEXT_SECONDARY_ID_COLUMN: second_next_sec_id_strings,\n POLYGON_COLUMN: polygon_objects_xy})\n', (5304, 5962), False, 'import pandas\n'), ((6660, 6706), 'numpy.array', 'numpy.array', (['[5, 5, 10, 10, 15, 20]'], {'dtype': 'int'}), '([5, 5, 10, 10, 15, 20], dtype=int)\n', (6671, 6706), False, 'import numpy\n'), ((6731, 6777), 'numpy.array', 'numpy.array', (['[2, 2, 7, 7, 12, 17]'], {'dtype': 'float'}), '([2, 2, 7, 7, 12, 17], dtype=float)\n', (6742, 6777), False, 'import numpy\n'), ((6802, 6846), 'numpy.array', 'numpy.array', (['[8, 2, 8, 2, 5, 5]'], {'dtype': 'float'}), '([8, 2, 8, 2, 5, 5], dtype=float)\n', (6813, 6846), False, 'import numpy\n'), ((7863, 8547), 'pandas.DataFrame.from_dict', 'pandas.DataFrame.from_dict', (['{tracking_utils.PRIMARY_ID_COLUMN: primary_id_strings, tracking_utils.\n SECONDARY_ID_COLUMN: secondary_id_strings, tracking_utils.\n VALID_TIME_COLUMN: valid_times_unix_sec, tracking_utils.\n CENTROID_X_COLUMN: centroid_x_coords, tracking_utils.CENTROID_Y_COLUMN:\n centroid_y_coords, tracking_utils.FIRST_PREV_SECONDARY_ID_COLUMN:\n first_prev_sec_id_strings, tracking_utils.\n SECOND_PREV_SECONDARY_ID_COLUMN: second_prev_sec_id_strings,\n tracking_utils.FIRST_NEXT_SECONDARY_ID_COLUMN:\n first_next_sec_id_strings, tracking_utils.\n SECOND_NEXT_SECONDARY_ID_COLUMN: second_next_sec_id_strings,\n POLYGON_COLUMN: polygon_objects_xy}'], {}), '({tracking_utils.PRIMARY_ID_COLUMN:\n primary_id_strings, tracking_utils.SECONDARY_ID_COLUMN:\n secondary_id_strings, tracking_utils.VALID_TIME_COLUMN:\n valid_times_unix_sec, tracking_utils.CENTROID_X_COLUMN:\n centroid_x_coords, tracking_utils.CENTROID_Y_COLUMN: centroid_y_coords,\n tracking_utils.FIRST_PREV_SECONDARY_ID_COLUMN:\n first_prev_sec_id_strings, tracking_utils.\n SECOND_PREV_SECONDARY_ID_COLUMN: second_prev_sec_id_strings,\n tracking_utils.FIRST_NEXT_SECONDARY_ID_COLUMN:\n first_next_sec_id_strings, tracking_utils.\n SECOND_NEXT_SECONDARY_ID_COLUMN: second_next_sec_id_strings,\n POLYGON_COLUMN: polygon_objects_xy})\n', (7889, 8547), False, 'import pandas\n'), ((9637, 9699), 'numpy.array', 'numpy.array', (['[5, 5, 10, 10, 15, 15, 20, 25, 30, 35]'], {'dtype': 'int'}), '([5, 5, 10, 10, 15, 15, 20, 25, 30, 35], dtype=int)\n', (9648, 9699), False, 'import numpy\n'), ((9738, 9800), 'numpy.array', 'numpy.array', (['[2, 2, 7, 7, 12, 12, 17, 22, 27, 32]'], {'dtype': 'float'}), '([2, 2, 7, 7, 12, 12, 17, 22, 27, 32], dtype=float)\n', (9749, 9800), False, 'import numpy\n'), ((9839, 9895), 'numpy.array', 'numpy.array', (['[8, 2, 8, 2, 8, 2, 5, 5, 5, 5]'], {'dtype': 'float'}), '([8, 2, 8, 2, 8, 2, 5, 5, 5, 5], dtype=float)\n', (9850, 9895), False, 'import numpy\n'), ((9931, 9986), 'numpy.array', 'numpy.array', (['[0, 0, 0, 0, 0, 0, 1, 1, 1, 0]'], {'dtype': 'bool'}), '([0, 0, 0, 0, 0, 0, 1, 1, 1, 0], dtype=bool)\n', (9942, 9986), False, 'import numpy\n'), ((10013, 10068), 'numpy.array', 'numpy.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 1, 0]'], {'dtype': 'bool'}), '([0, 0, 0, 0, 0, 0, 0, 0, 1, 0], dtype=bool)\n', (10024, 10068), False, 'import numpy\n'), ((10422, 11150), 'pandas.DataFrame.from_dict', 'pandas.DataFrame.from_dict', (['{tracking_utils.PRIMARY_ID_COLUMN: primary_id_strings, tracking_utils.\n SECONDARY_ID_COLUMN: secondary_id_strings, tracking_utils.\n VALID_TIME_COLUMN: valid_times_unix_sec, tracking_utils.\n CENTROID_X_COLUMN: centroid_x_coords, tracking_utils.CENTROID_Y_COLUMN:\n centroid_y_coords, TORNADIC_FLAG_COLUMN: tornadic_flags,\n SPECIAL_FLAG_COLUMN: main_tornadic_flags, tracking_utils.\n FIRST_PREV_SECONDARY_ID_COLUMN: first_prev_sec_id_strings,\n tracking_utils.SECOND_PREV_SECONDARY_ID_COLUMN:\n second_prev_sec_id_strings, tracking_utils.\n FIRST_NEXT_SECONDARY_ID_COLUMN: first_next_sec_id_strings,\n tracking_utils.SECOND_NEXT_SECONDARY_ID_COLUMN: second_next_sec_id_strings}'], {}), '({tracking_utils.PRIMARY_ID_COLUMN:\n primary_id_strings, tracking_utils.SECONDARY_ID_COLUMN:\n secondary_id_strings, tracking_utils.VALID_TIME_COLUMN:\n valid_times_unix_sec, tracking_utils.CENTROID_X_COLUMN:\n centroid_x_coords, tracking_utils.CENTROID_Y_COLUMN: centroid_y_coords,\n TORNADIC_FLAG_COLUMN: tornadic_flags, SPECIAL_FLAG_COLUMN:\n main_tornadic_flags, tracking_utils.FIRST_PREV_SECONDARY_ID_COLUMN:\n first_prev_sec_id_strings, tracking_utils.\n SECOND_PREV_SECONDARY_ID_COLUMN: second_prev_sec_id_strings,\n tracking_utils.FIRST_NEXT_SECONDARY_ID_COLUMN:\n first_next_sec_id_strings, tracking_utils.\n SECOND_NEXT_SECONDARY_ID_COLUMN: second_next_sec_id_strings})\n', (10448, 11150), False, 'import pandas\n'), ((11626, 11721), 'numpy.array', 'numpy.array', (['[5, 10, 15, 20, 20, 25, 25, 30, 30, 35, 35, 40, 40, 45, 45, 50, 50]'], {'dtype': 'int'}), '([5, 10, 15, 20, 20, 25, 25, 30, 30, 35, 35, 40, 40, 45, 45, 50,\n 50], dtype=int)\n', (11637, 11721), False, 'import numpy\n'), ((11764, 11861), 'numpy.array', 'numpy.array', (['[2, 6, 10, 14, 14, 18, 18, 22, 22, 26, 26, 30, 30, 34, 34, 38, 38]'], {'dtype': 'float'}), '([2, 6, 10, 14, 14, 18, 18, 22, 22, 26, 26, 30, 30, 34, 34, 38, \n 38], dtype=float)\n', (11775, 11861), False, 'import numpy\n'), ((11903, 11994), 'numpy.array', 'numpy.array', (['[10, 10, 10, 13, 7, 13, 7, 13, 7, 10, 4, 10, 4, 10, 4, 10, 4]'], {'dtype': 'float'}), '([10, 10, 10, 13, 7, 13, 7, 13, 7, 10, 4, 10, 4, 10, 4, 10, 4],\n dtype=float)\n', (11914, 11994), False, 'import numpy\n'), ((12034, 12110), 'numpy.array', 'numpy.array', (['[0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0]'], {'dtype': 'bool'}), '([0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0], dtype=bool)\n', (12045, 12110), False, 'import numpy\n'), ((12151, 12227), 'numpy.array', 'numpy.array', (['[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0]'], {'dtype': 'bool'}), '([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], dtype=bool)\n', (12162, 12227), False, 'import numpy\n'), ((12773, 13501), 'pandas.DataFrame.from_dict', 'pandas.DataFrame.from_dict', (['{tracking_utils.PRIMARY_ID_COLUMN: primary_id_strings, tracking_utils.\n SECONDARY_ID_COLUMN: secondary_id_strings, tracking_utils.\n VALID_TIME_COLUMN: valid_times_unix_sec, tracking_utils.\n CENTROID_X_COLUMN: centroid_x_coords, tracking_utils.CENTROID_Y_COLUMN:\n centroid_y_coords, TORNADIC_FLAG_COLUMN: tornadic_flags,\n SPECIAL_FLAG_COLUMN: main_tornadic_flags, tracking_utils.\n FIRST_PREV_SECONDARY_ID_COLUMN: first_prev_sec_id_strings,\n tracking_utils.SECOND_PREV_SECONDARY_ID_COLUMN:\n second_prev_sec_id_strings, tracking_utils.\n FIRST_NEXT_SECONDARY_ID_COLUMN: first_next_sec_id_strings,\n tracking_utils.SECOND_NEXT_SECONDARY_ID_COLUMN: second_next_sec_id_strings}'], {}), '({tracking_utils.PRIMARY_ID_COLUMN:\n primary_id_strings, tracking_utils.SECONDARY_ID_COLUMN:\n secondary_id_strings, tracking_utils.VALID_TIME_COLUMN:\n valid_times_unix_sec, tracking_utils.CENTROID_X_COLUMN:\n centroid_x_coords, tracking_utils.CENTROID_Y_COLUMN: centroid_y_coords,\n TORNADIC_FLAG_COLUMN: tornadic_flags, SPECIAL_FLAG_COLUMN:\n main_tornadic_flags, tracking_utils.FIRST_PREV_SECONDARY_ID_COLUMN:\n first_prev_sec_id_strings, tracking_utils.\n SECOND_PREV_SECONDARY_ID_COLUMN: second_prev_sec_id_strings,\n tracking_utils.FIRST_NEXT_SECONDARY_ID_COLUMN:\n first_next_sec_id_strings, tracking_utils.\n SECOND_NEXT_SECONDARY_ID_COLUMN: second_next_sec_id_strings})\n', (12799, 13501), False, 'import pandas\n'), ((13994, 14105), 'numpy.array', 'numpy.array', (['[5, 5, 10, 10, 15, 15, 20, 20, 25, 25, 30, 30, 35, 35, 40, 45, 50, 55, 55, \n 60, 60]'], {'dtype': 'int'}), '([5, 5, 10, 10, 15, 15, 20, 20, 25, 25, 30, 30, 35, 35, 40, 45, \n 50, 55, 55, 60, 60], dtype=int)\n', (14005, 14105), False, 'import numpy\n'), ((14147, 14260), 'numpy.array', 'numpy.array', (['[5, 5, 10, 10, 15, 15, 20, 20, 25, 25, 30, 30, 35, 35, 40, 45, 50, 55, 55, \n 60, 60]'], {'dtype': 'float'}), '([5, 5, 10, 10, 15, 15, 20, 20, 25, 25, 30, 30, 35, 35, 40, 45, \n 50, 55, 55, 60, 60], dtype=float)\n', (14158, 14260), False, 'import numpy\n'), ((14302, 14401), 'numpy.array', 'numpy.array', (['[8, 2, 8, 2, 8, 2, 8, 2, 11, 5, 11, 5, 11, 5, 8, 8, 8, 11, 5, 11, 5]'], {'dtype': 'float'}), '([8, 2, 8, 2, 8, 2, 8, 2, 11, 5, 11, 5, 11, 5, 8, 8, 8, 11, 5, \n 11, 5], dtype=float)\n', (14313, 14401), False, 'import numpy\n'), ((14440, 14532), 'numpy.array', 'numpy.array', (['[0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0]'], {'dtype': 'bool'}), '([0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0],\n dtype=bool)\n', (14451, 14532), False, 'import numpy\n'), ((14577, 14669), 'numpy.array', 'numpy.array', (['[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]'], {'dtype': 'bool'}), '([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n dtype=bool)\n', (14588, 14669), False, 'import numpy\n'), ((15363, 16091), 'pandas.DataFrame.from_dict', 'pandas.DataFrame.from_dict', (['{tracking_utils.PRIMARY_ID_COLUMN: primary_id_strings, tracking_utils.\n SECONDARY_ID_COLUMN: secondary_id_strings, tracking_utils.\n VALID_TIME_COLUMN: valid_times_unix_sec, tracking_utils.\n CENTROID_X_COLUMN: centroid_x_coords, tracking_utils.CENTROID_Y_COLUMN:\n centroid_y_coords, TORNADIC_FLAG_COLUMN: tornadic_flags,\n SPECIAL_FLAG_COLUMN: main_tornadic_flags, tracking_utils.\n FIRST_PREV_SECONDARY_ID_COLUMN: first_prev_sec_id_strings,\n tracking_utils.SECOND_PREV_SECONDARY_ID_COLUMN:\n second_prev_sec_id_strings, tracking_utils.\n FIRST_NEXT_SECONDARY_ID_COLUMN: first_next_sec_id_strings,\n tracking_utils.SECOND_NEXT_SECONDARY_ID_COLUMN: second_next_sec_id_strings}'], {}), '({tracking_utils.PRIMARY_ID_COLUMN:\n primary_id_strings, tracking_utils.SECONDARY_ID_COLUMN:\n secondary_id_strings, tracking_utils.VALID_TIME_COLUMN:\n valid_times_unix_sec, tracking_utils.CENTROID_X_COLUMN:\n centroid_x_coords, tracking_utils.CENTROID_Y_COLUMN: centroid_y_coords,\n TORNADIC_FLAG_COLUMN: tornadic_flags, SPECIAL_FLAG_COLUMN:\n main_tornadic_flags, tracking_utils.FIRST_PREV_SECONDARY_ID_COLUMN:\n first_prev_sec_id_strings, tracking_utils.\n SECOND_PREV_SECONDARY_ID_COLUMN: second_prev_sec_id_strings,\n tracking_utils.FIRST_NEXT_SECONDARY_ID_COLUMN:\n first_next_sec_id_strings, tracking_utils.\n SECOND_NEXT_SECONDARY_ID_COLUMN: second_next_sec_id_strings})\n', (15389, 16091), False, 'import pandas\n'), ((17768, 18032), 'gewittergefahr.plotting.storm_plotting.plot_storm_tracks', 'storm_plotting.plot_storm_tracks', ([], {'storm_object_table': 'storm_object_table', 'axes_object': 'axes_object', 'basemap_object': 'basemap_object', 'colour_map_object': 'None', 'constant_colour': 'TRACK_COLOUR', 'line_width': 'TRACK_WIDTH', 'start_marker_type': 'None', 'end_marker_type': 'None'}), '(storm_object_table=storm_object_table,\n axes_object=axes_object, basemap_object=basemap_object,\n colour_map_object=None, constant_colour=TRACK_COLOUR, line_width=\n TRACK_WIDTH, start_marker_type=None, end_marker_type=None)\n', (17800, 18032), False, 'from gewittergefahr.plotting import storm_plotting\n'), ((19351, 19425), 'numpy.max', 'numpy.max', (['storm_times_minutes[storm_times_minutes < tornado_time_minutes]'], {}), '(storm_times_minutes[storm_times_minutes < tornado_time_minutes])\n', (19360, 19425), False, 'import numpy\n'), ((19464, 19538), 'numpy.min', 'numpy.min', (['storm_times_minutes[storm_times_minutes > tornado_time_minutes]'], {}), '(storm_times_minutes[storm_times_minutes > tornado_time_minutes])\n', (19473, 19538), False, 'import numpy\n'), ((19780, 19834), 'numpy.mean', 'numpy.mean', (['centroid_x_coords[previous_object_indices]'], {}), '(centroid_x_coords[previous_object_indices])\n', (19790, 19834), False, 'import numpy\n'), ((19858, 19912), 'numpy.mean', 'numpy.mean', (['centroid_y_coords[previous_object_indices]'], {}), '(centroid_y_coords[previous_object_indices])\n', (19868, 19912), False, 'import numpy\n'), ((19932, 19982), 'numpy.mean', 'numpy.mean', (['centroid_x_coords[next_object_indices]'], {}), '(centroid_x_coords[next_object_indices])\n', (19942, 19982), False, 'import numpy\n'), ((20002, 20052), 'numpy.mean', 'numpy.mean', (['centroid_y_coords[next_object_indices]'], {}), '(centroid_y_coords[next_object_indices])\n', (20012, 20052), False, 'import numpy\n'), ((22525, 22633), 'descartes.PolygonPatch', 'PolygonPatch', (['interp_polygon_object_xy'], {'lw': '(0)', 'ec': 'INTERP_COLOUR', 'fc': 'INTERP_COLOUR', 'alpha': 'POLYGON_OPACITY'}), '(interp_polygon_object_xy, lw=0, ec=INTERP_COLOUR, fc=\n INTERP_COLOUR, alpha=POLYGON_OPACITY)\n', (22537, 22633), False, 'from descartes import PolygonPatch\n'), ((23894, 23944), 'numpy.unique', 'numpy.unique', (['centroid_x_coords'], {'return_index': '(True)'}), '(centroid_x_coords, return_index=True)\n', (23906, 23944), False, 'import numpy\n'), ((26024, 26288), 'gewittergefahr.plotting.storm_plotting.plot_storm_tracks', 'storm_plotting.plot_storm_tracks', ([], {'storm_object_table': 'storm_object_table', 'axes_object': 'axes_object', 'basemap_object': 'basemap_object', 'colour_map_object': 'None', 'constant_colour': 'TRACK_COLOUR', 'line_width': 'TRACK_WIDTH', 'start_marker_type': 'None', 'end_marker_type': 'None'}), '(storm_object_table=storm_object_table,\n axes_object=axes_object, basemap_object=basemap_object,\n colour_map_object=None, constant_colour=TRACK_COLOUR, line_width=\n TRACK_WIDTH, start_marker_type=None, end_marker_type=None)\n', (26056, 26288), False, 'from gewittergefahr.plotting import storm_plotting\n'), ((29850, 29928), 'gewittergefahr.gg_utils.file_system_utils.mkdir_recursive_if_necessary', 'file_system_utils.mkdir_recursive_if_necessary', ([], {'directory_name': 'OUTPUT_DIR_NAME'}), '(directory_name=OUTPUT_DIR_NAME)\n', (29896, 29928), False, 'from gewittergefahr.gg_utils import file_system_utils\n'), ((30597, 30667), 'gewittergefahr.plotting.plotting_utils.label_axes', 'plotting_utils.label_axes', ([], {'axes_object': 'axes_object', 'label_string': '"""(a)"""'}), "(axes_object=axes_object, label_string='(a)')\n", (30622, 30667), False, 'from gewittergefahr.plotting import plotting_utils\n'), ((30956, 30983), 'matplotlib.pyplot.close', 'pyplot.close', (['figure_object'], {}), '(figure_object)\n', (30968, 30983), True, 'import matplotlib.pyplot as pyplot\n'), ((31640, 31710), 'gewittergefahr.plotting.plotting_utils.label_axes', 'plotting_utils.label_axes', ([], {'axes_object': 'axes_object', 'label_string': '"""(b)"""'}), "(axes_object=axes_object, label_string='(b)')\n", (31665, 31710), False, 'from gewittergefahr.plotting import plotting_utils\n'), ((32016, 32043), 'matplotlib.pyplot.close', 'pyplot.close', (['figure_object'], {}), '(figure_object)\n', (32028, 32043), True, 'import matplotlib.pyplot as pyplot\n'), ((32612, 32682), 'gewittergefahr.plotting.plotting_utils.label_axes', 'plotting_utils.label_axes', ([], {'axes_object': 'axes_object', 'label_string': '"""(c)"""'}), "(axes_object=axes_object, label_string='(c)')\n", (32637, 32682), False, 'from gewittergefahr.plotting import plotting_utils\n'), ((33046, 33073), 'matplotlib.pyplot.close', 'pyplot.close', (['figure_object'], {}), '(figure_object)\n', (33058, 33073), True, 'import matplotlib.pyplot as pyplot\n'), ((33719, 33789), 'gewittergefahr.plotting.plotting_utils.label_axes', 'plotting_utils.label_axes', ([], {'axes_object': 'axes_object', 'label_string': '"""(d)"""'}), "(axes_object=axes_object, label_string='(d)')\n", (33744, 33789), False, 'from gewittergefahr.plotting import plotting_utils\n'), ((34175, 34202), 'matplotlib.pyplot.close', 'pyplot.close', (['figure_object'], {}), '(figure_object)\n', (34187, 34202), True, 'import matplotlib.pyplot as pyplot\n'), ((34776, 34846), 'gewittergefahr.plotting.plotting_utils.label_axes', 'plotting_utils.label_axes', ([], {'axes_object': 'axes_object', 'label_string': '"""(e)"""'}), "(axes_object=axes_object, label_string='(e)')\n", (34801, 34846), False, 'from gewittergefahr.plotting import plotting_utils\n'), ((35232, 35259), 'matplotlib.pyplot.close', 'pyplot.close', (['figure_object'], {}), '(figure_object)\n', (35244, 35259), True, 'import matplotlib.pyplot as pyplot\n'), ((35465, 35614), 'gewittergefahr.plotting.imagemagick_utils.concatenate_images', 'imagemagick_utils.concatenate_images', ([], {'input_file_names': 'panel_file_names', 'output_file_name': 'concat_file_name', 'num_panel_rows': '(2)', 'num_panel_columns': '(3)'}), '(input_file_names=panel_file_names,\n output_file_name=concat_file_name, num_panel_rows=2, num_panel_columns=3)\n', (35501, 35614), False, 'from gewittergefahr.plotting import imagemagick_utils\n'), ((35633, 35783), 'gewittergefahr.plotting.imagemagick_utils.resize_image', 'imagemagick_utils.resize_image', ([], {'input_file_name': 'concat_file_name', 'output_file_name': 'concat_file_name', 'output_size_pixels': 'CONCAT_FIGURE_SIZE_PX'}), '(input_file_name=concat_file_name,\n output_file_name=concat_file_name, output_size_pixels=CONCAT_FIGURE_SIZE_PX\n )\n', (35663, 35783), False, 'from gewittergefahr.plotting import imagemagick_utils\n'), ((5055, 5227), 'gewittergefahr.gg_utils.polygons.vertex_arrays_to_polygon_object', 'polygons.vertex_arrays_to_polygon_object', ([], {'exterior_x_coords': '(centroid_x_coords[i] + these_x_coords / 2)', 'exterior_y_coords': '(centroid_y_coords[i] + these_y_coords / 2)'}), '(exterior_x_coords=\n centroid_x_coords[i] + these_x_coords / 2, exterior_y_coords=\n centroid_y_coords[i] + these_y_coords / 2)\n', (5095, 5227), False, 'from gewittergefahr.gg_utils import polygons\n'), ((7640, 7812), 'gewittergefahr.gg_utils.polygons.vertex_arrays_to_polygon_object', 'polygons.vertex_arrays_to_polygon_object', ([], {'exterior_x_coords': '(centroid_x_coords[i] + these_x_coords / 2)', 'exterior_y_coords': '(centroid_y_coords[i] + these_y_coords / 2)'}), '(exterior_x_coords=\n centroid_x_coords[i] + these_x_coords / 2, exterior_y_coords=\n centroid_y_coords[i] + these_y_coords / 2)\n', (7680, 7812), False, 'from gewittergefahr.gg_utils import polygons\n'), ((18224, 18360), 'descartes.PolygonPatch', 'PolygonPatch', (['storm_object_table[POLYGON_COLUMN].values[i]'], {'lw': '(0)', 'ec': 'NON_INTERP_COLOUR', 'fc': 'NON_INTERP_COLOUR', 'alpha': 'POLYGON_OPACITY'}), '(storm_object_table[POLYGON_COLUMN].values[i], lw=0, ec=\n NON_INTERP_COLOUR, fc=NON_INTERP_COLOUR, alpha=POLYGON_OPACITY)\n', (18236, 18360), False, 'from descartes import PolygonPatch\n'), ((19584, 19641), 'numpy.where', 'numpy.where', (['(storm_times_minutes == previous_time_minutes)'], {}), '(storm_times_minutes == previous_time_minutes)\n', (19595, 19641), False, 'import numpy\n'), ((19685, 19738), 'numpy.where', 'numpy.where', (['(storm_times_minutes == next_time_minutes)'], {}), '(storm_times_minutes == next_time_minutes)\n', (19696, 19738), False, 'import numpy\n'), ((20408, 20453), 'numpy.array', 'numpy.array', (['[midpoint_x_coord, next_x_coord]'], {}), '([midpoint_x_coord, next_x_coord])\n', (20419, 20453), False, 'import numpy\n'), ((20478, 20523), 'numpy.array', 'numpy.array', (['[midpoint_y_coord, next_y_coord]'], {}), '([midpoint_y_coord, next_y_coord])\n', (20489, 20523), False, 'import numpy\n'), ((20834, 20883), 'numpy.array', 'numpy.array', (['[previous_x_coord, midpoint_x_coord]'], {}), '([previous_x_coord, midpoint_x_coord])\n', (20845, 20883), False, 'import numpy\n'), ((20908, 20957), 'numpy.array', 'numpy.array', (['[previous_y_coord, midpoint_y_coord]'], {}), '([previous_y_coord, midpoint_y_coord])\n', (20919, 20957), False, 'import numpy\n'), ((28920, 28970), 'numpy.unique', 'numpy.unique', (['centroid_x_coords'], {'return_index': '(True)'}), '(centroid_x_coords, return_index=True)\n', (28932, 28970), False, 'import numpy\n'), ((6130, 6158), 'numpy.array', 'numpy.array', (['[18]'], {'dtype': 'int'}), '([18], dtype=int)\n', (6141, 6158), False, 'import numpy\n'), ((6186, 6205), 'numpy.array', 'numpy.array', (['[15.0]'], {}), '([15.0])\n', (6197, 6205), False, 'import numpy\n'), ((6232, 6250), 'numpy.array', 'numpy.array', (['[3.2]'], {}), '([3.2])\n', (6243, 6250), False, 'import numpy\n'), ((8715, 8743), 'numpy.array', 'numpy.array', (['[12]'], {'dtype': 'int'}), '([12], dtype=int)\n', (8726, 8743), False, 'import numpy\n'), ((8771, 8789), 'numpy.array', 'numpy.array', (['[9.0]'], {}), '([9.0])\n', (8782, 8789), False, 'import numpy\n'), ((8816, 8834), 'numpy.array', 'numpy.array', (['[3.2]'], {}), '([3.2])\n', (8827, 8834), False, 'import numpy\n'), ((17539, 17567), 'numpy.min', 'numpy.min', (['centroid_y_coords'], {}), '(centroid_y_coords)\n', (17548, 17567), False, 'import numpy\n'), ((17598, 17626), 'numpy.max', 'numpy.max', (['centroid_y_coords'], {}), '(centroid_y_coords)\n', (17607, 17626), False, 'import numpy\n'), ((17658, 17686), 'numpy.min', 'numpy.min', (['centroid_x_coords'], {}), '(centroid_x_coords)\n', (17667, 17686), False, 'import numpy\n'), ((17718, 17746), 'numpy.max', 'numpy.max', (['centroid_x_coords'], {}), '(centroid_x_coords)\n', (17727, 17746), False, 'import numpy\n'), ((23774, 23807), 'numpy.round', 'numpy.round', (['tornado_time_minutes'], {}), '(tornado_time_minutes)\n', (23785, 23807), False, 'import numpy\n'), ((25795, 25823), 'numpy.min', 'numpy.min', (['centroid_y_coords'], {}), '(centroid_y_coords)\n', (25804, 25823), False, 'import numpy\n'), ((25854, 25882), 'numpy.max', 'numpy.max', (['centroid_y_coords'], {}), '(centroid_y_coords)\n', (25863, 25882), False, 'import numpy\n'), ((25914, 25942), 'numpy.min', 'numpy.min', (['centroid_x_coords'], {}), '(centroid_x_coords)\n', (25923, 25942), False, 'import numpy\n'), ((25974, 26002), 'numpy.max', 'numpy.max', (['centroid_x_coords'], {}), '(centroid_x_coords)\n', (25983, 26002), False, 'import numpy\n'), ((22316, 22368), 'numpy.array', 'numpy.array', (['interp_polygon_object_xy.exterior.xy[0]'], {}), '(interp_polygon_object_xy.exterior.xy[0])\n', (22327, 22368), False, 'import numpy\n'), ((22431, 22483), 'numpy.array', 'numpy.array', (['interp_polygon_object_xy.exterior.xy[1]'], {}), '(interp_polygon_object_xy.exterior.xy[1])\n', (22442, 22483), False, 'import numpy\n'), ((24003, 24038), 'numpy.round', 'numpy.round', (['storm_times_minutes[i]'], {}), '(storm_times_minutes[i])\n', (24014, 24038), False, 'import numpy\n'), ((29041, 29076), 'numpy.round', 'numpy.round', (['storm_times_minutes[i]'], {}), '(storm_times_minutes[i])\n', (29052, 29076), False, 'import numpy\n')] |
from sklearn.neighbors import KernelDensity
from scipy import signal
import networkx as nx
import numpy as np
def ClusterPeak(List,sd):
sd = max(10,sd)
x=np.array(sorted(List))
y=[[i] for i in x]
kde = KernelDensity(kernel='gaussian', bandwidth=min(sd/2,100)).fit(y)
log_density = kde.score_samples(y)
est_density = np.exp(log_density)
num_peak_3 = signal.find_peaks(est_density,distance=3)
peaks = [x[i] for i in num_peak_3[0]]
#print("Peaks:",peaks)
peak_density = [est_density[i] for i in num_peak_3[0]]
border = [(peaks[i]+peaks[i+1])/2 for i in range(len(peaks)-1)]
##Those jumping point should also be border
i = 0
while i<len(x)-1:
if x[i+1]-x[i]>sd :
border.append(x[i]+sd/2)
i +=1
clusters = []
ai = min(x)-1
border.sort()
#print("Borders:",border)
i = 0
while i < len(border):
bi = border[i]
group = list(x[(ai<x)*(x<=bi)])
if len(group)>0:
clusters.append(group)
ai = bi
i+=1
clusters.append(list(x[ai<x]))
if len(clusters)<2:
#print('Total:',len(List),'the number of cluster is ' + str(len(clusters)))
return clusters
else:
i = 0
while i<len(clusters)-1:
#print("Diff between clusters:",max(clusters[i]),min(clusters[i+1]))
if min(clusters[i+1])-max(clusters[i])<sd/2:#100:
#print("Diff between clusters <sd/2 ! Merge them.",len(clusters))
clusters[i]+=clusters[i+1]
del clusters[i+1]
#print(len(clusters))
else:
#print(clusters[i],clusters[i+1])
i+=1
#print('Total:',len(List),'the number of cluster is ' + str(len(clusters)))
return clusters
def Select_Col(select_row,Coff_mat,Weight_mat,current_ctg,truc):
select_col = set()
G=nx.Graph()
for i in current_ctg:
G.add_node(contig_list[i])
pair = {}
for row in select_row[1:]:
line=list(Coff_mat[row,:])
select_col.add(line.index(1))
select_col.add(line.index(-1))
n1=current_ctg[line.index(1)]
n2=current_ctg[line.index(-1)]
G.add_edge(contig_list[n1],contig_list[n2])
allSubG = nx.connected_component_subgraphs(G)
IsConnect = 0
for subG in allSubG:
print("Here subG.size",len(subG.nodes()))
if contig_list[current_ctg[0]] in subG.nodes() and len(subG.nodes())==len(current_ctg):
IsConnect = 1
print("Here IsConnect!")
else:
subG_nodes = list(set(subG.nodes()))
ProRegFunc(subG_nodes)
return(IsConnect,list(select_col))
def RegProc(Coff_mat,Weight_mat,y,Cov_mat):
doubleCoff_0 = np.dot(Coff_mat.T,Weight_mat)
doubleCoff = np.dot(doubleCoff_0,Coff_mat)
Solution_mat_0 = np.dot(np.linalg.pinv(doubleCoff),Coff_mat.T)
Lamba_mat = np.dot(Solution_mat_0,Weight_mat)
Solution_mat = np.dot(Lamba_mat, y.T)
y_predict = np.dot(Coff_mat,Solution_mat)
y_diff = y_predict - y
abs_error = abs(y_diff)
Solution_cov = np.dot(np.dot(Lamba_mat,Cov_mat),Lamba_mat.T)
return (Solution_mat,abs_error,Solution_cov)
def RegProcSol(Coff_mat,Weight_mat,y): #,Cov_mat
doubleCoff_0 = np.dot(Coff_mat.T,Weight_mat)
doubleCoff = np.dot(doubleCoff_0,Coff_mat)
Solution_mat_0 = np.dot(np.linalg.pinv(doubleCoff),Coff_mat.T)
Lamba_mat = np.dot(Solution_mat_0,Weight_mat)
Solution_mat = np.dot(Lamba_mat, y.T)
#Solution_cov = np.dot(np.dot(Lamba_mat,Cov_mat),Lamba_mat.T)
return (Solution_mat) #,Solution_cov
def SolCovFinal(Coff_mat, Weight_mat, Cov_mat):
doubleCoff_0 = np.dot(Coff_mat.T,Weight_mat)
doubleCoff = np.dot(doubleCoff_0,Coff_mat)
Solution_mat_0 = np.dot(np.linalg.pinv(doubleCoff),Coff_mat.T)
Lamba_mat = np.dot(Solution_mat_0,Weight_mat)
Solution_cov = np.dot(np.dot(Lamba_mat,Cov_mat),Lamba_mat.T)
return Solution_cov
def findDemarcation(r_list,maxerror):
size = len(r_list)
if size<3:
return min(r_list)
s_list = sorted(r_list)
mid_id = int(size*3/4)
k = mid_id+1
while k<size-1:
last = s_list[k]
diff = s_list[k+1]-last
if last<maxerror:
k +=1
continue
if diff/last>3 and s_list[k+1]>1000: #r>10*maxerror
print("mid",mid_id,"Found Demarcation:",size,k,last,s_list[k+1])
return(last)
else:
k +=1
if size>200:
return (np.percentile(r_list,95)-1)
else:
return (np.percentile(r_list,98)-1)
def FindDemarcation(r_list,maxerror):
size = len(r_list)
if size<3:
return min(r_list)
s_list = sorted(r_list)
mid_id = int(size*3/4)+1
last = s_list[mid_id-1]
for r in s_list[mid_id:]:
diff = r-last
if last==0:
last = r
continue
if diff/last>3 and r>1000: #r>maxerror
print("Found Demarcation:",size,s_list.index(last),last,r)
'''if size>1000:
print("Here1000:")
np.savetxt("FindDema_example_1000.txt",s_list,fmt="%d")
elif size>500:
print("Here500:")
np.savetxt("FindDema_example_500.txt",s_list,fmt="%d")
elif size>100:
print("Here100:")
np.savetxt("FindDema_example_100.txt",s_list,fmt="%d")'''
return(last)
last = r
if size>200:
return np.percentile(r_list,95)#95)
else:
return np.percentile(r_list,98)
def TrimMean(x):
lower_bound = np.percentile(x,5)
upper_bound = np.percentile(x,95)
r_list = [i for i in x if (i>=lower_bound and i<=upper_bound)] #list(filter(lambda y:y>=lower_bound and y<=upper_bound, list))
if len(r_list)>1:
return np.median(r_list) #np.mean
else:
return np.median(x)
def MADN(list):
list=np.array(list)
MAD = np.median(abs(list-np.median(list)))
return MAD/0.675
def MedianStd(list):
list=np.array(list)
size=len(list)
if size<2: return 0
#mean=np.mean(list1)
median=np.median(list)
m_list=median*np.ones(size)
Std=sum((list-m_list)**2)/size
return sqrt(Std/2)
def del_col_and_update(Coff_mat,Weight_mat,y,Cov_mat,subG,current_ctg):
global contig_list
G_col = []
new_current = []
for node in subG.nodes():
id_in_contigl = contig_list.index(node)
id_in_current = current_ctg.index(id_in_contigl)
G_col.append(id_in_current)
new_current.append(id_in_contigl)
Coff_mat = Coff_mat[:,G_col]
j=1
while j <= Coff_mat.shape[0]-1:
line=list(Coff_mat[j,:])
if (1 not in line) or (-1 not in line): #row line should be deleted
Coff_mat=np.delete(Coff_mat,j,axis=0)
y=np.delete(y,j)
Cov_mat=np.delete(Cov_mat,j,axis=0)
Cov_mat=np.delete(Cov_mat,j,axis=1)
Weight_mat=np.delete(Weight_mat,j,axis=0)
Weight_mat=np.delete(Weight_mat,j,axis=1)
else : j +=1
return(Coff_mat,Cov_mat,Weight_mat,new_current,y) | [
"numpy.median",
"networkx.connected_component_subgraphs",
"numpy.linalg.pinv",
"numpy.ones",
"numpy.delete",
"networkx.Graph",
"numpy.exp",
"numpy.array",
"numpy.dot",
"scipy.signal.find_peaks",
"numpy.percentile"
] | [((334, 353), 'numpy.exp', 'np.exp', (['log_density'], {}), '(log_density)\n', (340, 353), True, 'import numpy as np\n'), ((369, 411), 'scipy.signal.find_peaks', 'signal.find_peaks', (['est_density'], {'distance': '(3)'}), '(est_density, distance=3)\n', (386, 411), False, 'from scipy import signal\n'), ((1692, 1702), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (1700, 1702), True, 'import networkx as nx\n'), ((2021, 2056), 'networkx.connected_component_subgraphs', 'nx.connected_component_subgraphs', (['G'], {}), '(G)\n', (2053, 2056), True, 'import networkx as nx\n'), ((2460, 2490), 'numpy.dot', 'np.dot', (['Coff_mat.T', 'Weight_mat'], {}), '(Coff_mat.T, Weight_mat)\n', (2466, 2490), True, 'import numpy as np\n'), ((2505, 2535), 'numpy.dot', 'np.dot', (['doubleCoff_0', 'Coff_mat'], {}), '(doubleCoff_0, Coff_mat)\n', (2511, 2535), True, 'import numpy as np\n'), ((2614, 2648), 'numpy.dot', 'np.dot', (['Solution_mat_0', 'Weight_mat'], {}), '(Solution_mat_0, Weight_mat)\n', (2620, 2648), True, 'import numpy as np\n'), ((2665, 2687), 'numpy.dot', 'np.dot', (['Lamba_mat', 'y.T'], {}), '(Lamba_mat, y.T)\n', (2671, 2687), True, 'import numpy as np\n'), ((2702, 2732), 'numpy.dot', 'np.dot', (['Coff_mat', 'Solution_mat'], {}), '(Coff_mat, Solution_mat)\n', (2708, 2732), True, 'import numpy as np\n'), ((2962, 2992), 'numpy.dot', 'np.dot', (['Coff_mat.T', 'Weight_mat'], {}), '(Coff_mat.T, Weight_mat)\n', (2968, 2992), True, 'import numpy as np\n'), ((3007, 3037), 'numpy.dot', 'np.dot', (['doubleCoff_0', 'Coff_mat'], {}), '(doubleCoff_0, Coff_mat)\n', (3013, 3037), True, 'import numpy as np\n'), ((3116, 3150), 'numpy.dot', 'np.dot', (['Solution_mat_0', 'Weight_mat'], {}), '(Solution_mat_0, Weight_mat)\n', (3122, 3150), True, 'import numpy as np\n'), ((3167, 3189), 'numpy.dot', 'np.dot', (['Lamba_mat', 'y.T'], {}), '(Lamba_mat, y.T)\n', (3173, 3189), True, 'import numpy as np\n'), ((3361, 3391), 'numpy.dot', 'np.dot', (['Coff_mat.T', 'Weight_mat'], {}), '(Coff_mat.T, Weight_mat)\n', (3367, 3391), True, 'import numpy as np\n'), ((3406, 3436), 'numpy.dot', 'np.dot', (['doubleCoff_0', 'Coff_mat'], {}), '(doubleCoff_0, Coff_mat)\n', (3412, 3436), True, 'import numpy as np\n'), ((3515, 3549), 'numpy.dot', 'np.dot', (['Solution_mat_0', 'Weight_mat'], {}), '(Solution_mat_0, Weight_mat)\n', (3521, 3549), True, 'import numpy as np\n'), ((5330, 5349), 'numpy.percentile', 'np.percentile', (['x', '(5)'], {}), '(x, 5)\n', (5343, 5349), True, 'import numpy as np\n'), ((5365, 5385), 'numpy.percentile', 'np.percentile', (['x', '(95)'], {}), '(x, 95)\n', (5378, 5385), True, 'import numpy as np\n'), ((5629, 5643), 'numpy.array', 'np.array', (['list'], {}), '(list)\n', (5637, 5643), True, 'import numpy as np\n'), ((5740, 5754), 'numpy.array', 'np.array', (['list'], {}), '(list)\n', (5748, 5754), True, 'import numpy as np\n'), ((5826, 5841), 'numpy.median', 'np.median', (['list'], {}), '(list)\n', (5835, 5841), True, 'import numpy as np\n'), ((2561, 2587), 'numpy.linalg.pinv', 'np.linalg.pinv', (['doubleCoff'], {}), '(doubleCoff)\n', (2575, 2587), True, 'import numpy as np\n'), ((2807, 2833), 'numpy.dot', 'np.dot', (['Lamba_mat', 'Cov_mat'], {}), '(Lamba_mat, Cov_mat)\n', (2813, 2833), True, 'import numpy as np\n'), ((3063, 3089), 'numpy.linalg.pinv', 'np.linalg.pinv', (['doubleCoff'], {}), '(doubleCoff)\n', (3077, 3089), True, 'import numpy as np\n'), ((3462, 3488), 'numpy.linalg.pinv', 'np.linalg.pinv', (['doubleCoff'], {}), '(doubleCoff)\n', (3476, 3488), True, 'import numpy as np\n'), ((3573, 3599), 'numpy.dot', 'np.dot', (['Lamba_mat', 'Cov_mat'], {}), '(Lamba_mat, Cov_mat)\n', (3579, 3599), True, 'import numpy as np\n'), ((5212, 5237), 'numpy.percentile', 'np.percentile', (['r_list', '(95)'], {}), '(r_list, 95)\n', (5225, 5237), True, 'import numpy as np\n'), ((5268, 5293), 'numpy.percentile', 'np.percentile', (['r_list', '(98)'], {}), '(r_list, 98)\n', (5281, 5293), True, 'import numpy as np\n'), ((5544, 5561), 'numpy.median', 'np.median', (['r_list'], {}), '(r_list)\n', (5553, 5561), True, 'import numpy as np\n'), ((5589, 5601), 'numpy.median', 'np.median', (['x'], {}), '(x)\n', (5598, 5601), True, 'import numpy as np\n'), ((5858, 5871), 'numpy.ones', 'np.ones', (['size'], {}), '(size)\n', (5865, 5871), True, 'import numpy as np\n'), ((4201, 4226), 'numpy.percentile', 'np.percentile', (['r_list', '(95)'], {}), '(r_list, 95)\n', (4214, 4226), True, 'import numpy as np\n'), ((4257, 4282), 'numpy.percentile', 'np.percentile', (['r_list', '(98)'], {}), '(r_list, 98)\n', (4270, 4282), True, 'import numpy as np\n'), ((6429, 6459), 'numpy.delete', 'np.delete', (['Coff_mat', 'j'], {'axis': '(0)'}), '(Coff_mat, j, axis=0)\n', (6438, 6459), True, 'import numpy as np\n'), ((6464, 6479), 'numpy.delete', 'np.delete', (['y', 'j'], {}), '(y, j)\n', (6473, 6479), True, 'import numpy as np\n'), ((6491, 6520), 'numpy.delete', 'np.delete', (['Cov_mat', 'j'], {'axis': '(0)'}), '(Cov_mat, j, axis=0)\n', (6500, 6520), True, 'import numpy as np\n'), ((6531, 6560), 'numpy.delete', 'np.delete', (['Cov_mat', 'j'], {'axis': '(1)'}), '(Cov_mat, j, axis=1)\n', (6540, 6560), True, 'import numpy as np\n'), ((6574, 6606), 'numpy.delete', 'np.delete', (['Weight_mat', 'j'], {'axis': '(0)'}), '(Weight_mat, j, axis=0)\n', (6583, 6606), True, 'import numpy as np\n'), ((6620, 6652), 'numpy.delete', 'np.delete', (['Weight_mat', 'j'], {'axis': '(1)'}), '(Weight_mat, j, axis=1)\n', (6629, 6652), True, 'import numpy as np\n'), ((5671, 5686), 'numpy.median', 'np.median', (['list'], {}), '(list)\n', (5680, 5686), True, 'import numpy as np\n')] |
import numpy as np
X = np.array(([2, 9], [1, 5], [3, 6]), dtype=float)
y = np.array(([92], [86], [89]), dtype=float)
X = X/np.amax(X,axis=0) # maximum of X array longitudinally
y = y/100
#Sigmoid Function
def sigmoid (x):
return 1/(1 + np.exp(-x))
#Derivative of Sigmoid Function
def derivatives_sigmoid(x):
return x * (1 - x)
#Variable initialization
epoch=7000 #Setting training iterations
lr=0.1 #Setting learning rate
inputlayer_neurons = 2 #number of features in data set
hiddenlayer_neurons = 3 #number of hidden layers neurons
output_neurons = 1 #number of neurons at output layer
#weight and bias initialization
wh=np.random.uniform(size=(inputlayer_neurons,hiddenlayer_neurons))
bh=np.random.uniform(size=(1,hiddenlayer_neurons))
wout=np.random.uniform(size=(hiddenlayer_neurons,output_neurons))
bout=np.random.uniform(size=(1,output_neurons))
#draws a random range of numbers uniformly of dim x*y
for i in range(epoch):
#Forward Propogation
hinp1=np.dot(X,wh)
hinp=hinp1 + bh
hlayer_act = sigmoid(hinp)
outinp1=np.dot(hlayer_act,wout)
outinp= outinp1+ bout
output = sigmoid(outinp)
#Backpropagation
EO = y-output
outgrad = derivatives_sigmoid(output)
d_output = EO * outgrad
EH = d_output.dot(wout.T)
hiddengrad = derivatives_sigmoid(hlayer_act) #how much hidden layer wts contributed to err
d_hiddenlayer = EH * hiddengrad
wout += hlayer_act.T.dot(d_output) * lr# dotproduct of nextlayererror and #currentlayerop
wh += X.T.dot(d_hiddenlayer) * lr
print("Input: \n" + str(X))
print("Actual Output: \n" + str(y))
print("Predicted Output: \n" ,output)
| [
"numpy.exp",
"numpy.array",
"numpy.dot",
"numpy.random.uniform",
"numpy.amax"
] | [((24, 71), 'numpy.array', 'np.array', (['([2, 9], [1, 5], [3, 6])'], {'dtype': 'float'}), '(([2, 9], [1, 5], [3, 6]), dtype=float)\n', (32, 71), True, 'import numpy as np\n'), ((77, 118), 'numpy.array', 'np.array', (['([92], [86], [89])'], {'dtype': 'float'}), '(([92], [86], [89]), dtype=float)\n', (85, 118), True, 'import numpy as np\n'), ((651, 716), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(inputlayer_neurons, hiddenlayer_neurons)'}), '(size=(inputlayer_neurons, hiddenlayer_neurons))\n', (668, 716), True, 'import numpy as np\n'), ((720, 768), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(1, hiddenlayer_neurons)'}), '(size=(1, hiddenlayer_neurons))\n', (737, 768), True, 'import numpy as np\n'), ((774, 835), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(hiddenlayer_neurons, output_neurons)'}), '(size=(hiddenlayer_neurons, output_neurons))\n', (791, 835), True, 'import numpy as np\n'), ((841, 884), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(1, output_neurons)'}), '(size=(1, output_neurons))\n', (858, 884), True, 'import numpy as np\n'), ((126, 144), 'numpy.amax', 'np.amax', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (133, 144), True, 'import numpy as np\n'), ((995, 1008), 'numpy.dot', 'np.dot', (['X', 'wh'], {}), '(X, wh)\n', (1001, 1008), True, 'import numpy as np\n'), ((1065, 1089), 'numpy.dot', 'np.dot', (['hlayer_act', 'wout'], {}), '(hlayer_act, wout)\n', (1071, 1089), True, 'import numpy as np\n'), ((246, 256), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (252, 256), True, 'import numpy as np\n')] |
import torch
import sys
import os
import numpy as np
from abc import ABC
from transformers import BertTokenizer
import transformers
from typing import List
sys.path.append(os.path.join('..', 'stel'))
from set_for_global import set_global_seed, set_torch_device, set_logging, EVAL_BATCH_SIZE
BERT_MAX_WORDS = 250
# OPTIONAL: if you want to have more information on what's happening under the hood, activate the logger as follows
import logging
set_logging()
transformers.logging.set_verbosity_info()
device = set_torch_device()
BATCH_SIZE = 16
set_global_seed()
BERT_CASED_BASE_MODEL = "bert-base-cased"
BERT_UNCASED_BASE_MODEL = "bert-base-uncased"
ROBERTA_BASE = 'roberta-base'
UNCASED_TOKENIZER = BertTokenizer.from_pretrained(BERT_UNCASED_BASE_MODEL) # , do_lower_case=True)
CASED_TOKENIZER = BertTokenizer.from_pretrained(BERT_CASED_BASE_MODEL)
# ---------------------------------------------- CODE -------------------------------------------------
# Load pre-trained model tokenizer (vocabulary)
# tokenizer = BertTokenizer.from_pretrained('tuned_bert_path-base-uncased')
# Tokenize input
# text = "[CLS] Who was <NAME> ? [SEP] <NAME> was a puppeteer [SEP]"
# tokenized_text = tokenizer.tokenize(text)
class TransformersModel(ABC):
"""
abstract class that can (theoretically) load any pretrained huggingface model
"""
def __init__(self, model_path="", tokenizer_path=None):
self.model, self.tokenizer = self._load_model(model_path, tokenizer_path)
self.model.to(device)
def _load_model(self, model_path, tokenizer_path=None):
model = transformers.PreTrainedModel.from_pretrained(model_path)
if tokenizer_path is None:
tokenizer = transformers.PreTrainedTokenizer(model_path)
else:
tokenizer = transformers.PreTrainedTokenizer(tokenizer_path)
return model, tokenizer
def forward(self, text):
"""
https://huggingface.co/transformers/model_doc/bert.html#bertmodel
:param text:
:return:
"""
self.model.eval()
encoded_dict = tokenize_sentence(text, self.tokenizer)
encoded_dict.to(device)
with torch.no_grad():
predictions = self.model(**encoded_dict, return_dict=True)
return np.array([emb for emb in predictions.last_hidden_state]).mean()
def forward_batch(self, u1s, batch_size=EVAL_BATCH_SIZE):
self.model.eval()
chunks = (len(u1s) - 1) // batch_size + 1
avg_embeds = [] # torch.tensor([], device=device)
for i in range(chunks):
logging.info("at batch number {}".format(i))
batch_u1s = u1s[i * batch_size:min((i + 1) * batch_size, len(u1s))]
encoded_dict = tokenize_sentences(batch_u1s, self.tokenizer)
encoded_dict.to(device)
with torch.no_grad():
outputs = self.model(**encoded_dict, return_dict=True)
# outputs.last_hidden_state.data[0].mean(dim=0)
avg_embed = [emb.mean(dim=0).to(device) for emb in outputs.last_hidden_state.data] # pooler_output
# avg_embed.to(device)
avg_embeds = avg_embeds + avg_embed # torch.cat((avg_embeds, avg_embed))
# del encoded_dict["input_ids"]
# del encoded_dict["token_type_ids"]
# del encoded_dict["attention_mask"]
del encoded_dict
del outputs
del batch_u1s
torch.cuda.empty_cache()
return avg_embeds
class RoBERTaModel(TransformersModel):
# https://huggingface.co/roberta-base
def __init__(self, model_path=ROBERTA_BASE, tokenizer_path=None):
super(RoBERTaModel, self).__init__(model_path, tokenizer_path)
def _load_model(self, model_path, tokenizer_path=None):
model = transformers.RobertaModel.from_pretrained(model_path)
if tokenizer_path is None:
tokenizer = transformers.RobertaTokenizer.from_pretrained(model_path)
else:
tokenizer = transformers.RobertaTokenizer.from_pretrained(tokenizer_path)
return model, tokenizer
# TODO improve? ...
# https://huggingface.co/transformers/quicktour.html
# You can pass a list of sentences directly to your tokenizer.
# If your goal is to send them through your model as a batch, you probably want to pad them all to the same length,
# truncate them to the maximum length the model can accept and get tensors back.
# You can specify all of that to the tokenizer:
class BertModel(TransformersModel):
def __init__(self, model_path=BERT_UNCASED_BASE_MODEL, tokenizer_path=None, in_eval_mode=True):
super(BertModel, self).__init__(model_path, tokenizer_path)
self.model.to(device)
if in_eval_mode:
self.model.eval()
def _load_model(self, model_path, tokenizer_path=None):
model = transformers.BertModel.from_pretrained(model_path)
tokenizer = self._set_tokenizer(model_path, tokenizer_path)
return model, tokenizer
def _set_tokenizer(self, model_path, tokenizer_path):
if tokenizer_path is None:
tokenizer = transformers.BertTokenizer.from_pretrained(model_path)
else:
tokenizer = transformers.BertTokenizer.from_pretrained(tokenizer_path)
return tokenizer
class BertForTwoSentencePredictionModel(BertModel):
def __init__(self, model_path=BERT_UNCASED_BASE_MODEL, tokenizer_path=None, in_eval_mode=True):
super(BertForTwoSentencePredictionModel, self).__init__(model_path, tokenizer_path, in_eval_mode=in_eval_mode)
def forward_two(self, utt1, utt2):
"""
Returns logit of next sentence prediction head as return
:param utt1:
:param utt2:
:return:
"""
self.model.eval()
encoded_dict = tokenize_sentence_pair(utt1, utt2, self.tokenizer)
encoded_dict.to(device)
with torch.no_grad():
predictions = self.model(**encoded_dict)
# predictions[0, 0] is the score of Next sentence being True and predictions[0, 1] is the score of
# Next sentence being False
# https://github.com/huggingface/transformers/issues/48
return predictions[0]
def forward_two_batch(self, u1s, u2s, batch_size=EVAL_BATCH_SIZE):
self.model.eval()
# https://stackoverflow.com/questions/41868890/how-to-loop-through-a-python-list-in-batch
chunks = (len(u1s) - 1) // batch_size + 1
logits = torch.tensor([], device=device)
for i in range(chunks):
logging.info("at batch number {}".format(i))
batch_u1s = u1s[i * batch_size:min((i + 1) * batch_size, len(u1s))]
batch_u2s = u2s[i * batch_size:min((i + 1) * batch_size, len(u1s))]
encoded_dict = tokenize_sentence_pairs(batch_u1s, batch_u2s, self.tokenizer)
with torch.no_grad():
outputs = self.model(**encoded_dict, return_dict=True)
logit_output = outputs["logits"]
logit_output.to(device)
# logging.info("current logit output is at {}".format(logit_output))
# logging.info("logit_output is on cuda: {}".format(logit_output.is_cuda))
logits.to(device)
# logging.info("logits is on cuda: {}".format(logits.is_cuda))
# logging.info("device is {}".format(device))
logits = torch.cat((logits, logit_output))
# = {"token_type_ids": token_type_ids,
# "input_ids": input_ids,
# "attention_mask": attention_masks}
del encoded_dict["input_ids"]
del encoded_dict["token_type_ids"]
del encoded_dict["attention_mask"]
del encoded_dict
del outputs
del batch_u1s
del batch_u2s
torch.cuda.empty_cache()
return logits
class SoftmaxTwoBertModel(BertForTwoSentencePredictionModel):
def __init__(self, model_path=BERT_UNCASED_BASE_MODEL, tokenizer_path=None, in_eval_mode=True):
super().__init__(model_path=model_path, tokenizer_path=tokenizer_path, in_eval_mode=in_eval_mode)
def similarity(self, utt1, utt2):
logit = self.forward_two(utt1, utt2)
# outputs has the logits first for the classes [0, 1],
# where 0 indicates that the sentences are written in the same style,
# or originally: that sentence A is a continuation of sentence B
# logit = outputs[0]
sim_value = self.get_sim_from_logit(logit)
return sim_value
def similarities(self, u1s: List[str], u2s: List[str]):
"""
:param u1s:
:param u2s:
:return: tensor with similarity values
"""
logits = self.forward_two_batch(u1s, u2s)
# logits = outputs["logits"].data
sim_values = self.get_sim_from_logit(logits, dim=0) # [self._get_sim_from_logit(logit) for logit in logits]
return sim_values
@staticmethod
def get_sim_from_logit(logit, dim=None):
"""
:param logit: expects a tensor of matrix, e.g., tensor([[0.5, 0.5]])
:param dim:
:return: sim value for label 0, i.e, at 1 if same at 0 of not
"""
softmax = torch.nn.functional.softmax(logit, dim=1)
if dim == None: # only one value for logit [ , ]
# softmax.tolist()[0][0]
sim_value = softmax.data[0][0].item()
# sim_value = softmax.data[0].item()
return sim_value
else:
sim_values = softmax.data[:, 0]
return sim_values
class SoftmaxNextBertModel(SoftmaxTwoBertModel):
def __init__(self, model_path=BERT_UNCASED_BASE_MODEL, tokenizer_path=None, in_eval_mode=True):
super().__init__(model_path=model_path, tokenizer_path=tokenizer_path, in_eval_mode=in_eval_mode)
def _load_model(self, model_path, tokenizer_path=None):
model = transformers.BertForNextSentencePrediction.from_pretrained(model_path)
tokenizer = self._set_tokenizer(model_path, tokenizer_path)
return model, tokenizer
class UncasedBertForNextSentencePredictionmodel(SoftmaxNextBertModel):
def __init__(self, model_path=BERT_UNCASED_BASE_MODEL, tokenizer_path=BERT_UNCASED_BASE_MODEL):
super(SoftmaxNextBertModel, self).__init__(model_path, tokenizer_path)
class CasedBertForNextSentencePredictionModel(SoftmaxNextBertModel):
def __init__(self, model_path=BERT_UNCASED_BASE_MODEL, tokenizer_path=BERT_UNCASED_BASE_MODEL):
super(SoftmaxNextBertModel, self).__init__(model_path, tokenizer_path)
def _load_model(self, model_path, tokenizer_path=None):
model = transformers.BertForNextSentencePrediction.from_pretrained(model_path)
tokenizer = transformers.BertTokenizer.from_pretrained(tokenizer_path)
return model, tokenizer
class SoftmaxSeqBertModel(SoftmaxTwoBertModel):
def __init__(self, model_path, tokenizer_path=BERT_CASED_BASE_MODEL):
super(SoftmaxSeqBertModel, self).__init__(model_path, tokenizer_path)
def _load_model(self, model_path, tokenizer_path=None):
model = transformers.BertForSequenceClassification.from_pretrained(model_path)
tokenizer = transformers.BertTokenizer.from_pretrained(tokenizer_path)
return model, tokenizer
# ________________________________________ Preparing Data ____________________________________________
def tokenize_sentences(texts: List[str], tokenizer):
texts = [' '.join(text.split(' ')[:512]) for text in texts]
encoded_dict = tokenizer(texts,
return_tensors='pt',
# max_length=512,
truncation=True,
padding=True)
# input_ids, token_type_ids, attention_mask
return encoded_dict
def tokenize_sentence(text, tokenizer):
text = ' '.join(text.split(' ')[:512])
encoded_dict = tokenizer(text,
return_tensors='pt',
max_length=512,
padding="max_length",
truncation="longest_first")
# input_ids, token_type_ids, attention_mask
return encoded_dict
def tokenize_sentence_pair(u1, u2, tokenizer):
# encoded_dict = tokenizer.encode_plus(
# "[CLS] " + u1 + " [SEP] " + u2 + " [SEP]",
# add_special_tokens=False,
# truncation=True,
# max_length=512, # TODO: use batches with variable length ?
# pad_to_max_length=True,
# return_attention_mask=True,
# return_tensors='pt',
# )
# u1 = ' '.join(u1.split(' ', BERT_MAX_WORDS+1)[:BERT_MAX_WORDS])
# make sure the paragraphs are not longer than the max length
# last part -> first part; first part -> first part; last part -> last part; first part -> last part
u1 = ' '.join(u1.split(' ')[-BERT_MAX_WORDS:])
u2 = ' '.join(u2.split(' ')[:BERT_MAX_WORDS])
# u2 = ' '.join(u2.split(' ', BERT_MAX_WORDS+1)[:BERT_MAX_WORDS])
encoded_dict = tokenizer(u1, u2,
return_tensors='pt',
max_length=512,
padding="max_length",
truncation="longest_first")
# input_ids, token_type_ids, attention_mask
return encoded_dict
def tokenize_sentence_pairs(u1s, u2s, tokenizer):
input_ids = []
attention_masks = []
token_type_ids = []
for u1, u2 in zip(u1s, u2s):
encoded_dict = tokenize_sentence_pair(u1, u2, tokenizer)
encoded_dict.to(device)
# Add the encoded sentence to the list.
input_ids.append(encoded_dict['input_ids'])
# And its attention mask (simply differentiates padding from non-padding).
attention_masks.append(encoded_dict['attention_mask']) \
# Token ids whether token belongs to u1 or u2
token_type_ids.append(encoded_dict['token_type_ids'])
# Convert the lists into tensors.
input_ids = torch.cat(input_ids, dim=0)
attention_masks = torch.cat(attention_masks, dim=0)
token_type_ids = torch.cat(token_type_ids, dim=0)
input_ids.to(device)
attention_masks.to(device)
token_type_ids.to(device)
encoded_dicts = {"token_type_ids": token_type_ids,
"input_ids": input_ids,
"attention_mask": attention_masks}
return encoded_dicts
| [
"transformers.RobertaTokenizer.from_pretrained",
"numpy.array",
"torch.nn.functional.softmax",
"transformers.BertForNextSentencePrediction.from_pretrained",
"transformers.BertModel.from_pretrained",
"set_for_global.set_global_seed",
"set_for_global.set_torch_device",
"transformers.PreTrainedModel.from... | [((447, 460), 'set_for_global.set_logging', 'set_logging', ([], {}), '()\n', (458, 460), False, 'from set_for_global import set_global_seed, set_torch_device, set_logging, EVAL_BATCH_SIZE\n'), ((461, 502), 'transformers.logging.set_verbosity_info', 'transformers.logging.set_verbosity_info', ([], {}), '()\n', (500, 502), False, 'import transformers\n'), ((512, 530), 'set_for_global.set_torch_device', 'set_torch_device', ([], {}), '()\n', (528, 530), False, 'from set_for_global import set_global_seed, set_torch_device, set_logging, EVAL_BATCH_SIZE\n'), ((549, 566), 'set_for_global.set_global_seed', 'set_global_seed', ([], {}), '()\n', (564, 566), False, 'from set_for_global import set_global_seed, set_torch_device, set_logging, EVAL_BATCH_SIZE\n'), ((705, 759), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['BERT_UNCASED_BASE_MODEL'], {}), '(BERT_UNCASED_BASE_MODEL)\n', (734, 759), False, 'from transformers import BertTokenizer\n'), ((803, 855), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['BERT_CASED_BASE_MODEL'], {}), '(BERT_CASED_BASE_MODEL)\n', (832, 855), False, 'from transformers import BertTokenizer\n'), ((173, 199), 'os.path.join', 'os.path.join', (['""".."""', '"""stel"""'], {}), "('..', 'stel')\n", (185, 199), False, 'import os\n'), ((14070, 14097), 'torch.cat', 'torch.cat', (['input_ids'], {'dim': '(0)'}), '(input_ids, dim=0)\n', (14079, 14097), False, 'import torch\n'), ((14120, 14153), 'torch.cat', 'torch.cat', (['attention_masks'], {'dim': '(0)'}), '(attention_masks, dim=0)\n', (14129, 14153), False, 'import torch\n'), ((14175, 14207), 'torch.cat', 'torch.cat', (['token_type_ids'], {'dim': '(0)'}), '(token_type_ids, dim=0)\n', (14184, 14207), False, 'import torch\n'), ((1600, 1656), 'transformers.PreTrainedModel.from_pretrained', 'transformers.PreTrainedModel.from_pretrained', (['model_path'], {}), '(model_path)\n', (1644, 1656), False, 'import transformers\n'), ((3812, 3865), 'transformers.RobertaModel.from_pretrained', 'transformers.RobertaModel.from_pretrained', (['model_path'], {}), '(model_path)\n', (3853, 3865), False, 'import transformers\n'), ((4912, 4962), 'transformers.BertModel.from_pretrained', 'transformers.BertModel.from_pretrained', (['model_path'], {}), '(model_path)\n', (4950, 4962), False, 'import transformers\n'), ((6536, 6567), 'torch.tensor', 'torch.tensor', (['[]'], {'device': 'device'}), '([], device=device)\n', (6548, 6567), False, 'import torch\n'), ((9287, 9328), 'torch.nn.functional.softmax', 'torch.nn.functional.softmax', (['logit'], {'dim': '(1)'}), '(logit, dim=1)\n', (9314, 9328), False, 'import torch\n'), ((9975, 10045), 'transformers.BertForNextSentencePrediction.from_pretrained', 'transformers.BertForNextSentencePrediction.from_pretrained', (['model_path'], {}), '(model_path)\n', (10033, 10045), False, 'import transformers\n'), ((10725, 10795), 'transformers.BertForNextSentencePrediction.from_pretrained', 'transformers.BertForNextSentencePrediction.from_pretrained', (['model_path'], {}), '(model_path)\n', (10783, 10795), False, 'import transformers\n'), ((10816, 10874), 'transformers.BertTokenizer.from_pretrained', 'transformers.BertTokenizer.from_pretrained', (['tokenizer_path'], {}), '(tokenizer_path)\n', (10858, 10874), False, 'import transformers\n'), ((11186, 11256), 'transformers.BertForSequenceClassification.from_pretrained', 'transformers.BertForSequenceClassification.from_pretrained', (['model_path'], {}), '(model_path)\n', (11244, 11256), False, 'import transformers\n'), ((11277, 11335), 'transformers.BertTokenizer.from_pretrained', 'transformers.BertTokenizer.from_pretrained', (['tokenizer_path'], {}), '(tokenizer_path)\n', (11319, 11335), False, 'import transformers\n'), ((1716, 1760), 'transformers.PreTrainedTokenizer', 'transformers.PreTrainedTokenizer', (['model_path'], {}), '(model_path)\n', (1748, 1760), False, 'import transformers\n'), ((1799, 1847), 'transformers.PreTrainedTokenizer', 'transformers.PreTrainedTokenizer', (['tokenizer_path'], {}), '(tokenizer_path)\n', (1831, 1847), False, 'import transformers\n'), ((2184, 2199), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2197, 2199), False, 'import torch\n'), ((3459, 3483), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (3481, 3483), False, 'import torch\n'), ((3925, 3982), 'transformers.RobertaTokenizer.from_pretrained', 'transformers.RobertaTokenizer.from_pretrained', (['model_path'], {}), '(model_path)\n', (3970, 3982), False, 'import transformers\n'), ((4021, 4082), 'transformers.RobertaTokenizer.from_pretrained', 'transformers.RobertaTokenizer.from_pretrained', (['tokenizer_path'], {}), '(tokenizer_path)\n', (4066, 4082), False, 'import transformers\n'), ((5181, 5235), 'transformers.BertTokenizer.from_pretrained', 'transformers.BertTokenizer.from_pretrained', (['model_path'], {}), '(model_path)\n', (5223, 5235), False, 'import transformers\n'), ((5274, 5332), 'transformers.BertTokenizer.from_pretrained', 'transformers.BertTokenizer.from_pretrained', (['tokenizer_path'], {}), '(tokenizer_path)\n', (5316, 5332), False, 'import transformers\n'), ((5964, 5979), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5977, 5979), False, 'import torch\n'), ((7444, 7477), 'torch.cat', 'torch.cat', (['(logits, logit_output)'], {}), '((logits, logit_output))\n', (7453, 7477), False, 'import torch\n'), ((7876, 7900), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (7898, 7900), False, 'import torch\n'), ((2287, 2343), 'numpy.array', 'np.array', (['[emb for emb in predictions.last_hidden_state]'], {}), '([emb for emb in predictions.last_hidden_state])\n', (2295, 2343), True, 'import numpy as np\n'), ((2844, 2859), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2857, 2859), False, 'import torch\n'), ((6923, 6938), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6936, 6938), False, 'import torch\n')] |
import pytest
import warnings
warnings.filterwarnings('ignore')
@pytest.mark.basic
def test_resize_ratio():
"""
Testing the resize_ratio function
Returns: Nothing
"""
import numpy as np
from deep_utils import resize_ratio
dummy_images = [np.random.randint(0, 255, (1200, 900, 3), dtype=np.uint8),
np.random.randint(0, 255, (800, 1200, 3), dtype=np.uint8),
np.random.randint(0, 255, (550, 350, 3), dtype=np.uint8),
np.random.randint(0, 255, (900, 900, 3), dtype=np.uint8),
]
preferred_outputs = [
(900, 675, 3),
(600, 900, 3),
(900, 572, 3),
(900, 900, 3),
]
for dummy_img, preferred_output in zip(dummy_images, preferred_outputs):
out = resize_ratio(dummy_img, 900)
assert out.shape == preferred_output, f"resize_ratio failed for input img.shape={dummy_img.shape}"
| [
"numpy.random.randint",
"deep_utils.resize_ratio",
"warnings.filterwarnings"
] | [((31, 64), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (54, 64), False, 'import warnings\n'), ((269, 326), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)', '(1200, 900, 3)'], {'dtype': 'np.uint8'}), '(0, 255, (1200, 900, 3), dtype=np.uint8)\n', (286, 326), True, 'import numpy as np\n'), ((348, 405), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)', '(800, 1200, 3)'], {'dtype': 'np.uint8'}), '(0, 255, (800, 1200, 3), dtype=np.uint8)\n', (365, 405), True, 'import numpy as np\n'), ((427, 483), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)', '(550, 350, 3)'], {'dtype': 'np.uint8'}), '(0, 255, (550, 350, 3), dtype=np.uint8)\n', (444, 483), True, 'import numpy as np\n'), ((505, 561), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)', '(900, 900, 3)'], {'dtype': 'np.uint8'}), '(0, 255, (900, 900, 3), dtype=np.uint8)\n', (522, 561), True, 'import numpy as np\n'), ((801, 829), 'deep_utils.resize_ratio', 'resize_ratio', (['dummy_img', '(900)'], {}), '(dummy_img, 900)\n', (813, 829), False, 'from deep_utils import resize_ratio\n')] |
import os
import numpy as np
from azureml.monitoring import ModelDataCollector
from inference_schema.parameter_types.numpy_parameter_type import NumpyParameterType
from inference_schema.schema_decorators import input_schema, output_schema
# sklearn.externals.joblib is removed in 0.23
from sklearn import __version__ as sklearnver
from packaging.version import Version
if Version(sklearnver) < Version("0.23.0"):
from sklearn.externals import joblib
else:
import joblib
def init():
global model
global inputs_dc
inputs_dc = ModelDataCollector('elevation-regression-model.pkl', designation='inputs',
feature_names=['latitude', 'longitude', 'temperature', 'windAngle', 'windSpeed'])
# note here "elevation-regression-model.pkl" is the name of the model registered under
# this is a different behavior than before when the code is run locally, even though the code is the same.
# AZUREML_MODEL_DIR is an environment variable created during deployment.
# It is the path to the model folder (./azureml-models/$MODEL_NAME/$VERSION)
# For multiple models, it points to the folder containing all deployed models (./azureml-models)
model_path = os.path.join(os.getenv('AZUREML_MODEL_DIR'), 'elevation-regression-model.pkl')
model = joblib.load(model_path)
input_sample = np.array([[30, -85, 21, 150, 6]])
output_sample = np.array([8.995])
@input_schema('data', NumpyParameterType(input_sample))
@output_schema(NumpyParameterType(output_sample))
def run(data):
try:
inputs_dc.collect(data)
result = model.predict(data)
# you can return any datatype as long as it is JSON-serializable
return result.tolist()
except Exception as e:
error = str(e)
return error
| [
"os.getenv",
"azureml.monitoring.ModelDataCollector",
"inference_schema.parameter_types.numpy_parameter_type.NumpyParameterType",
"numpy.array",
"packaging.version.Version",
"joblib.load"
] | [((1350, 1383), 'numpy.array', 'np.array', (['[[30, -85, 21, 150, 6]]'], {}), '([[30, -85, 21, 150, 6]])\n', (1358, 1383), True, 'import numpy as np\n'), ((1400, 1417), 'numpy.array', 'np.array', (['[8.995]'], {}), '([8.995])\n', (1408, 1417), True, 'import numpy as np\n'), ((373, 392), 'packaging.version.Version', 'Version', (['sklearnver'], {}), '(sklearnver)\n', (380, 392), False, 'from packaging.version import Version\n'), ((395, 412), 'packaging.version.Version', 'Version', (['"""0.23.0"""'], {}), "('0.23.0')\n", (402, 412), False, 'from packaging.version import Version\n'), ((547, 711), 'azureml.monitoring.ModelDataCollector', 'ModelDataCollector', (['"""elevation-regression-model.pkl"""'], {'designation': '"""inputs"""', 'feature_names': "['latitude', 'longitude', 'temperature', 'windAngle', 'windSpeed']"}), "('elevation-regression-model.pkl', designation='inputs',\n feature_names=['latitude', 'longitude', 'temperature', 'windAngle',\n 'windSpeed'])\n", (565, 711), False, 'from azureml.monitoring import ModelDataCollector\n'), ((1309, 1332), 'joblib.load', 'joblib.load', (['model_path'], {}), '(model_path)\n', (1320, 1332), False, 'import joblib\n'), ((1442, 1474), 'inference_schema.parameter_types.numpy_parameter_type.NumpyParameterType', 'NumpyParameterType', (['input_sample'], {}), '(input_sample)\n', (1460, 1474), False, 'from inference_schema.parameter_types.numpy_parameter_type import NumpyParameterType\n'), ((1491, 1524), 'inference_schema.parameter_types.numpy_parameter_type.NumpyParameterType', 'NumpyParameterType', (['output_sample'], {}), '(output_sample)\n', (1509, 1524), False, 'from inference_schema.parameter_types.numpy_parameter_type import NumpyParameterType\n'), ((1231, 1261), 'os.getenv', 'os.getenv', (['"""AZUREML_MODEL_DIR"""'], {}), "('AZUREML_MODEL_DIR')\n", (1240, 1261), False, 'import os\n')] |
#!/usr/bin/env python3
########################################################################
# File: compareSampleSets.py
# executable:
# Purpose:
#
#
# Author: <NAME>
# History: cms 01/08/2020 Created
#
########################################################################
########################################################################
# Hot Imports & Global Variable
########################################################################
import os, sys
import numpy as np
from scipy.stats import ranksums
from statsmodels.stats.multitest import multipletests
########################################################################
# CommandLine
########################################################################
class CommandLine(object) :
'''
Handle the command line, usage and help requests.
CommandLine uses argparse, now standard in 2.7 and beyond.
it implements a standard command line argument parser with various argument options,
and a standard usage and help,
attributes:
myCommandLine.args is a dictionary which includes each of the available command line arguments as
myCommandLine.args['option']
methods:
'''
def __init__(self, inOpts=None) :
'''
CommandLine constructor.
Implements a parser to interpret the command line argv string using argparse.
'''
import argparse
self.parser = argparse.ArgumentParser(description = 'TBD',
epilog = 'Please feel free to forward any usage questions or concerns',
add_help = True, #default is True
prefix_chars = '-',
usage = '%(prog)s -m1 manifest1.txt -m2 manifest2.txt')
# Add args
self.parser.add_argument('--psiMESA', type=str, action = 'store', required=True, help='Compressed NPZ formatted PSI matrix from quantMESA.')
self.parser.add_argument('-m1', '--manifest1', type=str, action = 'store', required=True, help='Manifest containing samples for sample set group1')
self.parser.add_argument('-m2', '--manifest2' , type=str, action = 'store', required=True, help='Manifest containing samples for sample set group2')
self.parser.add_argument('-o', '--out_prefix' , type=str, action = 'store', required=False, help='Prefix for output file.')
if inOpts is None :
self.args = vars(self.parser.parse_args())
else :
self.args = vars(self.parser.parse_args(inOpts))
########################################################################
# Helper Functions
#
#
########################################################################
def loadNPZ(x):
'''
takes in npz formatted matrix.
'''
try:
data = np.load(x)
except:
print("ERR ** Cannot load matrix %s. Check path or format." % x)
sys.exit(1)
return data
def getColIndexFromArray(x,y):
'''
takes in list of strings = x
and finds list index in array = y
'''
return np.nonzero(np.isin(y,x))
def returnSamplesFromManifest(x):
'''
reads in mesa formatted manifest
returns list of samples
'''
s = list()
with open(x) as fin:
for i in fin:
s.append(i.split()[0])
return s
########################################################################
# MAINE
#
#
########################################################################
def main():
'''
A workflow to compute the significance difference
between two distributions of PSI values.
Values are assumed to not be normall distributed, thus
we invoke the wilcoxon ranksum test as the statistical analysis.
'''
myCommandLine = CommandLine()
# args
pmesa = myCommandLine.args["psiMESA"]
group1 = myCommandLine.args["manifest1"]
group2 = myCommandLine.args["manifest2"]
prefix = myCommandLine.args['out_prefix']
# get sample lists
g1 = returnSamplesFromManifest(group1)
g2 = returnSamplesFromManifest(group2)
if len(g1) < 3 or len(g2) < 3:
print("Cannot conduct wilcoxon with less than 3 samples in either group. Exit.", file=sys.stderr)
sys.exit(1)
#load psi
data = loadNPZ(pmesa)
#table has 3 arrays, cols, rows and data
cols, rows, matrix = data['cols'], data['rows'], data['data']
# get sample indices
g1Indices = getColIndexFromArray(g1,cols)
g2Indices = getColIndexFromArray(g2,cols)
# do the math
pvals = list()
testedEvents = list()
for n,event in enumerate(matrix):
d1, d2 = event[g1Indices], event[g2Indices]
nonans1 = np.invert(np.isnan(d1))
nonans2 = np.invert(np.isnan(d2))
data1 = d1[nonans1]
data2 = d2[nonans2]
if len(data1) < 3 or len(data2) < 3:
continue
D, pval = ranksums(d1, d2)
testedEvents.append((rows[n],np.mean(data1)-np.mean(data2)))
pvals.append(pval)
# correct pvals
corrected = multipletests(pvals,method="fdr_bh")[1]
for n,i in enumerate(testedEvents):
print(pvals[n],corrected[n],i[0],i[1])
if __name__ == "__main__":
main()
| [
"numpy.mean",
"argparse.ArgumentParser",
"statsmodels.stats.multitest.multipletests",
"numpy.isin",
"numpy.isnan",
"scipy.stats.ranksums",
"sys.exit",
"numpy.load"
] | [((1455, 1669), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""TBD"""', 'epilog': '"""Please feel free to forward any usage questions or concerns"""', 'add_help': '(True)', 'prefix_chars': '"""-"""', 'usage': '"""%(prog)s -m1 manifest1.txt -m2 manifest2.txt"""'}), "(description='TBD', epilog=\n 'Please feel free to forward any usage questions or concerns', add_help\n =True, prefix_chars='-', usage=\n '%(prog)s -m1 manifest1.txt -m2 manifest2.txt')\n", (1478, 1669), False, 'import argparse\n'), ((2912, 2922), 'numpy.load', 'np.load', (['x'], {}), '(x)\n', (2919, 2922), True, 'import numpy as np\n'), ((3186, 3199), 'numpy.isin', 'np.isin', (['y', 'x'], {}), '(y, x)\n', (3193, 3199), True, 'import numpy as np\n'), ((4340, 4351), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4348, 4351), False, 'import os, sys\n'), ((5014, 5030), 'scipy.stats.ranksums', 'ranksums', (['d1', 'd2'], {}), '(d1, d2)\n', (5022, 5030), False, 'from scipy.stats import ranksums\n'), ((5169, 5206), 'statsmodels.stats.multitest.multipletests', 'multipletests', (['pvals'], {'method': '"""fdr_bh"""'}), "(pvals, method='fdr_bh')\n", (5182, 5206), False, 'from statsmodels.stats.multitest import multipletests\n'), ((3016, 3027), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3024, 3027), False, 'import os, sys\n'), ((4807, 4819), 'numpy.isnan', 'np.isnan', (['d1'], {}), '(d1)\n', (4815, 4819), True, 'import numpy as np\n'), ((4849, 4861), 'numpy.isnan', 'np.isnan', (['d2'], {}), '(d2)\n', (4857, 4861), True, 'import numpy as np\n'), ((5068, 5082), 'numpy.mean', 'np.mean', (['data1'], {}), '(data1)\n', (5075, 5082), True, 'import numpy as np\n'), ((5083, 5097), 'numpy.mean', 'np.mean', (['data2'], {}), '(data2)\n', (5090, 5097), True, 'import numpy as np\n')] |
import os
import numpy as np
import json
import random
import jieba
import collections
from tqdm import tqdm
import config.args as args
from util.Logginger import init_logger
from pytorch_pretrained_bert.tokenization import BertTokenizer
logger = init_logger("QA", logging_path=args.log_path)
with open('TC/pybert/io/PMI_word.json','r',encoding='utf-8') as f:
PMI_word = json.load(f)
class InputExample(object):
"Template for a single data"
def __init__(self,
qas_id, # question id
question_text, # question text
doc_tokens, # context
orig_answer_text=None, # answer text
start_position=None, # For Yes, No & no-answer, start_position = 0
end_position=None, # For Yes, No & no-answer, start_position = 0
answer_type=None # We denote answer type as Yes: 0 No: 1 no-answer: 2 long-answer: 3
):
self.qas_id = qas_id
self.question_text = question_text
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
self.answer_type = answer_type
class InputFeatures(object):
"Feature to feed into model"
def __init__(self,
unique_id, # feature id
example_index, # example index, note this is different from qas_id
doc_span_index, # split context index
tokens, # question token + context + flag character
adj,
token_to_orig_map, # token index before BertTokenize
token_is_max_context,
input_ids, # model input, the id of tokens
input_mask,
segment_ids, # For distinguishing question & context
start_position=None,
end_position=None,
answer_type=None):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tokens = tokens
self.adj = adj,
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.start_position = start_position
self.end_position = end_position
self.answer_type = answer_type
def train_val_split(X, y, valid_size=0.25, random_state=2019, shuffle=True):
"""
训练集验证集分割
:param X: sentences
:param y: labels
:param random_state: 随机种子
"""
logger.info('train val split')
train, valid = [], []
bucket = [[] for _ in [i for i in range(len(args.answer_type))]]
for data_x, data_y in tqdm(zip(X, y), desc='bucket'):
bucket[int(data_y)].append((data_x, data_y))
del X, y
for bt in tqdm(bucket, desc='split'):
N = len(bt)
if N == 0:
continue
test_size = int(N * valid_size)
if shuffle:
random.seed(random_state)
random.shuffle(bt)
valid.extend(bt[:test_size])
train.extend(bt[test_size:])
if shuffle:
random.seed(random_state)
random.shuffle(valid)
random.shuffle(train)
return train, valid
def read_squad_data(raw_data, save_dir, is_training=True):
logger.info("Read raw squad data...")
logger.info("train_dev_split is %s" % str(is_training))
logger.info("test data path is %s" % raw_data)
with open(raw_data, "r", encoding="utf-8") as fr:
data = json.load(fr)
data = data["data"]
samples = []
for e in data:
paragraphs = e["paragraphs"]
# For small train, we just observed one paragraph in the paragraph list
for paragraph in paragraphs:
context = paragraph["context"]
qas = paragraph["qas"]
for qa in qas:
question = qa["question"]
answers = qa["answers"]
qid = qa["id"]
start_position = int(answers[0]["answer_start"])
end_position = int(answers[0]["answer_end"])
answer_text = answers[0]["text"]
answer_type = answers[0]["answer_type"]
assert len(answers) <= 1, "Found more than one answer for one question"
sample = {"qid": qid, "context": context,
"question": question, "answer_type": answer_type, "answer_text": answer_text,
"start_position": start_position, "end_position": end_position}
samples.append(sample)
if is_training:
y = [args.answer_type[sample["answer_type"]] for sample in samples]
train, valid = train_val_split(samples, y)
logger.info("Train set size is %d" % len(train))
logger.info("Dev set size is %d" % len(valid))
with open(os.path.join(save_dir, "train.json"), 'w') as fr:
for t in train:
print(json.dumps(t[0], ensure_ascii=False), file=fr)
with open(os.path.join(save_dir, "dev.json"), 'w') as fr:
for v in valid:
print(json.dumps(v[0], ensure_ascii=False), file=fr)
else:
with open(os.path.join(save_dir, "test.json"), 'w') as fr:
logger.info("Test set size is %d" %len(samples))
for sample in samples:
print(json.dumps(sample,ensure_ascii=False), file=fr)
def read_qa_examples(data_dir, corpus_type):
assert corpus_type in ["train", "dev", "test"], "Unknown corpus type"
examples = []
with open(os.path.join(data_dir, corpus_type +'.json'), 'r',encoding='utf-8') as fr:
for i, data in enumerate(fr):
data = json.loads(data.strip("\n"))
example = InputExample(qas_id=data["qid"],
question_text=data["question"],
doc_tokens=data["context"],
orig_answer_text=data["answer_text"],
start_position=data["start_position"],
end_position=data["end_position"],
answer_type=data["answer_type"])
examples.append(example)
return examples
def make_adj(text, tokens,max_seq_len):
# print(jieba.lcut(text,cut_all=True))
# print(tokens)
# print(max_seq_len)
print('-------------------------ningyx---------------------')
adj = np.zeros((max_seq_len,max_seq_len))
texts = jieba.lcut(text,cut_all=True)
i = 0
text_id = dict()
while(i < len(texts)):
if len(texts[i]) == 1:
i += 1
else:
l,r = [0,0]
j = 1
flag = False
while j < len(tokens)-1:
if not flag and tokens[j] in texts[i] and tokens[j+1] in texts[i]:
l = j
j += 1
flag = True
elif flag and tokens[j+1] not in texts[i]:
r = j
break
else:
j += 1
adj[l,r] = 1
adj[r,l] = 1
text_id[texts[i]] = (l,r)
i += 1
# print(text_id)
# edge of PMI
for i in range(len(texts)-1):
if texts[i] in PMI_word.keys():
for j in range(i,len(texts)):
if texts[j] in PMI_word.keys():
adj[text_id[texts[i]][1],text_id[texts[j]][0]] = 1
adj[text_id[texts[j]][0],text_id[texts[i]][1]] = 1
for i in range(len(tokens)):
adj[0,i] = 1
adj[i,0] = 1
adj[i,i] = 1
# print(adj[0,:])
return adj
def convert_examples_to_features(examples,
tokenizer,
example_type,
max_seq_length,
doc_stride,
max_query_length,
is_training):
unique_id = 10000000
features = []
all_adj = []
if os.path.isfile(example_type + '_adj.npy'):
all_adj = np.load(example_type + '_adj.npy')
adj_flag = True
else:
adj_flag = False
for example_index, example in tqdm(enumerate(examples)):
if example_index > 1000:
break
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if is_training:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer,
example.orig_answer_text)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
_DocSpan = collections.namedtuple("DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
#quesiton index
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append("[SEP]")
if adj_flag:
adj = all_adj[doc_span_index,:,:]
else:
adj = make_adj(example.doc_tokens,tokens,max_seq_length)
all_adj.append(adj)
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
start_position = None
end_position = None
answer_type = None
if is_training:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
if example.answer_type != "no-answer":
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start and
tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = 0
end_position = 0
answer_type = "no-answer"
else:
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
answer_type = example.answer_type
else:
start_position = 0
end_position = 0
answer_type = "no-answer"
answer_type = args.answer_type[answer_type]
if example_index < 20:
logger.info("*** Example ***")
logger.info("unique_id: %s" % (unique_id))
logger.info("example_index: %s" % (example_index))
logger.info("doc_span_index: %s" % (doc_span_index))
logger.info("tokens: %s" % " ".join(tokens))
logger.info("token_to_orig_map: %s" % " ".join([
"%d:%d" % (x, y) for (x, y) in token_to_orig_map.items()]))
logger.info("token_is_max_context: %s" % " ".join([
"%d:%s" % (x, y) for (x, y) in token_is_max_context.items()]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info(
"input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
if is_training:
answer_text = " ".join(tokens[start_position:(end_position + 1)])
logger.info("start_position: %d" % (start_position))
logger.info("end_position: %d" % (end_position))
logger.info(
"answer: %s" % (answer_text))
logger.info("answer_type: %s" %answer_type)
features.append(
InputFeatures(
unique_id=unique_id,
example_index=example_index,
doc_span_index=doc_span_index,
tokens=tokens,
adj = adj,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
start_position=start_position,
end_position=end_position,
answer_type=answer_type))
unique_id += 1
if not adj_flag:
np.save(example_type + '_adj.npy',all_adj)
return features
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text):
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
if __name__ == '__main__':
#read_squad_data("data/test.json", "data/")
examples = read_qa_examples("data/", "train")
convert_examples_to_features(examples,
tokenizer=BertTokenizer("../pretrained_model/Bert-wwm-ext/bert_vocab.txt"),
max_seq_length=512,
doc_stride=500,
max_query_length=32,
is_training=True)
| [
"jieba.lcut",
"collections.namedtuple",
"random.shuffle",
"tqdm.tqdm",
"os.path.join",
"json.dumps",
"random.seed",
"util.Logginger.init_logger",
"os.path.isfile",
"pytorch_pretrained_bert.tokenization.BertTokenizer",
"numpy.zeros",
"json.load",
"numpy.load",
"numpy.save"
] | [((248, 293), 'util.Logginger.init_logger', 'init_logger', (['"""QA"""'], {'logging_path': 'args.log_path'}), "('QA', logging_path=args.log_path)\n", (259, 293), False, 'from util.Logginger import init_logger\n'), ((377, 389), 'json.load', 'json.load', (['f'], {}), '(f)\n', (386, 389), False, 'import json\n'), ((3120, 3146), 'tqdm.tqdm', 'tqdm', (['bucket'], {'desc': '"""split"""'}), "(bucket, desc='split')\n", (3124, 3146), False, 'from tqdm import tqdm\n'), ((6799, 6835), 'numpy.zeros', 'np.zeros', (['(max_seq_len, max_seq_len)'], {}), '((max_seq_len, max_seq_len))\n', (6807, 6835), True, 'import numpy as np\n'), ((6847, 6877), 'jieba.lcut', 'jieba.lcut', (['text'], {'cut_all': '(True)'}), '(text, cut_all=True)\n', (6857, 6877), False, 'import jieba\n'), ((8440, 8481), 'os.path.isfile', 'os.path.isfile', (["(example_type + '_adj.npy')"], {}), "(example_type + '_adj.npy')\n", (8454, 8481), False, 'import os\n'), ((3438, 3463), 'random.seed', 'random.seed', (['random_state'], {}), '(random_state)\n', (3449, 3463), False, 'import random\n'), ((3472, 3493), 'random.shuffle', 'random.shuffle', (['valid'], {}), '(valid)\n', (3486, 3493), False, 'import random\n'), ((3502, 3523), 'random.shuffle', 'random.shuffle', (['train'], {}), '(train)\n', (3516, 3523), False, 'import random\n'), ((3832, 3845), 'json.load', 'json.load', (['fr'], {}), '(fr)\n', (3841, 3845), False, 'import json\n'), ((8501, 8535), 'numpy.load', 'np.load', (["(example_type + '_adj.npy')"], {}), "(example_type + '_adj.npy')\n", (8508, 8535), True, 'import numpy as np\n'), ((10005, 10059), 'collections.namedtuple', 'collections.namedtuple', (['"""DocSpan"""', "['start', 'length']"], {}), "('DocSpan', ['start', 'length'])\n", (10027, 10059), False, 'import collections\n'), ((15832, 15875), 'numpy.save', 'np.save', (["(example_type + '_adj.npy')", 'all_adj'], {}), "(example_type + '_adj.npy', all_adj)\n", (15839, 15875), True, 'import numpy as np\n'), ((3281, 3306), 'random.seed', 'random.seed', (['random_state'], {}), '(random_state)\n', (3292, 3306), False, 'import random\n'), ((3319, 3337), 'random.shuffle', 'random.shuffle', (['bt'], {}), '(bt)\n', (3333, 3337), False, 'import random\n'), ((5876, 5921), 'os.path.join', 'os.path.join', (['data_dir', "(corpus_type + '.json')"], {}), "(data_dir, corpus_type + '.json')\n", (5888, 5921), False, 'import os\n'), ((18131, 18195), 'pytorch_pretrained_bert.tokenization.BertTokenizer', 'BertTokenizer', (['"""../pretrained_model/Bert-wwm-ext/bert_vocab.txt"""'], {}), "('../pretrained_model/Bert-wwm-ext/bert_vocab.txt')\n", (18144, 18195), False, 'from pytorch_pretrained_bert.tokenization import BertTokenizer\n'), ((5170, 5206), 'os.path.join', 'os.path.join', (['save_dir', '"""train.json"""'], {}), "(save_dir, 'train.json')\n", (5182, 5206), False, 'import os\n'), ((5335, 5369), 'os.path.join', 'os.path.join', (['save_dir', '"""dev.json"""'], {}), "(save_dir, 'dev.json')\n", (5347, 5369), False, 'import os\n'), ((5508, 5543), 'os.path.join', 'os.path.join', (['save_dir', '"""test.json"""'], {}), "(save_dir, 'test.json')\n", (5520, 5543), False, 'import os\n'), ((5270, 5306), 'json.dumps', 'json.dumps', (['t[0]'], {'ensure_ascii': '(False)'}), '(t[0], ensure_ascii=False)\n', (5280, 5306), False, 'import json\n'), ((5433, 5469), 'json.dumps', 'json.dumps', (['v[0]'], {'ensure_ascii': '(False)'}), '(v[0], ensure_ascii=False)\n', (5443, 5469), False, 'import json\n'), ((5675, 5713), 'json.dumps', 'json.dumps', (['sample'], {'ensure_ascii': '(False)'}), '(sample, ensure_ascii=False)\n', (5685, 5713), False, 'import json\n')] |
import optuna
import json
import numpy as np
import argparse
import os
from optuna.visualization import plot_optimization_history, plot_param_importances
parser = argparse.ArgumentParser()
parser.add_argument("--study-name", help="Study name used during hyperparameter optimization", type=str, default=None)
parser.add_argument("--storage", help="Database storage path used during hyperparameter optimization", type=str, default=None)
parser.add_argument("--save-n-best-hyperparameters", help="Save the hyperparameters for the n best trials that resulted in the best returns", type=int, default=0)
parser.add_argument("--visualize", help="Visualize the study results", type=bool, default=True)
args = parser.parse_args()
output_dir = "./indicator_hyperparameters/" + args.study_name
os.makedirs(output_dir, exist_ok=True)
study = optuna.create_study(study_name=args.study_name, storage=args.storage, load_if_exists=True, direction="maximize")
if args.visualize:
fig1 = plot_optimization_history(study)
fig2 = plot_param_importances(study)
fig1.write_image(output_dir + "/optimization_history.png")
fig2.write_image(output_dir + "/param_importances.png")
values = []
for trial in study.trials:
values.append(trial.value)
scratch_values = [-np.inf if i is None else i for i in values]
ordered_indices = np.argsort(scratch_values)[::-1]
for i in range(args.save_n_best_hyperparameters):
params = study.trials[ordered_indices[i]].params
text = json.dumps(params)
jsonFile = open(output_dir + '/hyperparameters_' + str(i) + ".json", "w+")
jsonFile.write(text)
jsonFile.close() | [
"os.makedirs",
"argparse.ArgumentParser",
"optuna.visualization.plot_param_importances",
"json.dumps",
"numpy.argsort",
"optuna.visualization.plot_optimization_history",
"optuna.create_study"
] | [((165, 190), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (188, 190), False, 'import argparse\n'), ((786, 824), 'os.makedirs', 'os.makedirs', (['output_dir'], {'exist_ok': '(True)'}), '(output_dir, exist_ok=True)\n', (797, 824), False, 'import os\n'), ((834, 950), 'optuna.create_study', 'optuna.create_study', ([], {'study_name': 'args.study_name', 'storage': 'args.storage', 'load_if_exists': '(True)', 'direction': '"""maximize"""'}), "(study_name=args.study_name, storage=args.storage,\n load_if_exists=True, direction='maximize')\n", (853, 950), False, 'import optuna\n'), ((977, 1009), 'optuna.visualization.plot_optimization_history', 'plot_optimization_history', (['study'], {}), '(study)\n', (1002, 1009), False, 'from optuna.visualization import plot_optimization_history, plot_param_importances\n'), ((1021, 1050), 'optuna.visualization.plot_param_importances', 'plot_param_importances', (['study'], {}), '(study)\n', (1043, 1050), False, 'from optuna.visualization import plot_optimization_history, plot_param_importances\n'), ((1328, 1354), 'numpy.argsort', 'np.argsort', (['scratch_values'], {}), '(scratch_values)\n', (1338, 1354), True, 'import numpy as np\n'), ((1476, 1494), 'json.dumps', 'json.dumps', (['params'], {}), '(params)\n', (1486, 1494), False, 'import json\n')] |
import json
import random
from argparse import ArgumentParser
from numpy.random import default_rng
parser = ArgumentParser()
parser.add_argument("--in_file", type=str, default="data/NewsQA.train.json",)
parser.add_argument("--out_file_dev", type=str, default="dataNewsQA.sample.dev.json")
parser.add_argument("--out_file_train", type=str, default="data/NewsQA.sample.train.json")
parser.add_argument("--num", type=int, default=1000, required=False)
parser.add_argument("--seed", type=int, default=42, required=False)
args = parser.parse_args()
def subsample_dataset_random(data_json, sample_num=1000, seed=55):
total = 0
context_num=0
id_lists=[]
for paras in data_json['data']:
for para in paras['paragraphs']:
context_num+=1
qa_num = len(para['qas'])
id_lists+=[qa['id'] for qa in para['qas']]
total += qa_num
print('Total QA Num: %d, Total Context: %d' % (total,context_num))
random.seed(seed)
rng = default_rng()
sampled_list = list(rng.choice(id_lists, size=sample_num,replace=False))
new_passages_dev = []
new_passages_train=[]
for passages in data_json['data']:
new_paras_dev = []
new_paras_train = []
for para in passages['paragraphs']:
context = para['context']
new_qas_dev = []
new_qas_train = []
for qa in para['qas']:
if qa['id'] in sampled_list:
new_qas_dev.append(qa)
else:
new_qas_train.append(qa)
if len(new_qas_dev) > 0:
new_paras_dev.append({'context': context, 'qas': new_qas_dev})
if len(new_qas_train) > 0:
new_paras_train.append({'context': context, 'qas': new_qas_train})
if len(new_paras_dev) > 0:
new_passages_dev.append({'title': passages['title'], 'paragraphs': new_paras_dev})
if len(new_paras_train) > 0:
new_passages_train.append({'title': passages['title'], 'paragraphs': new_paras_train})
dev_data_json = {'data': new_passages_dev, 'version': data_json['version']}
train_data_json = {'data': new_passages_train, 'version': data_json['version']}
total = 0
context_num=0
for paras in dev_data_json['data']:
for para in paras['paragraphs']:
context_num+=1
qa_num = len(para['qas'])
id_lists+=[qa['id'] for qa in para['qas']]
total += qa_num
print('Sample Dev QA Num: %d, Total Context: %d' % (total,context_num))
total = 0
context_num = 0
for paras in train_data_json['data']:
for para in paras['paragraphs']:
context_num += 1
qa_num = len(para['qas'])
id_lists += [qa['id'] for qa in para['qas']]
total += qa_num
print('Sample Train QA Num: %d, Total Context: %d' % (total, context_num))
return train_data_json,dev_data_json
def main(args):
dataset = json.load(open(args.in_file, 'r'))
train_data_json,dev_data_json=subsample_dataset_random(dataset, args.num, args.seed)
json.dump(train_data_json, open(args.out_file_train, 'w'))
json.dump(dev_data_json, open(args.out_file_dev, 'w'))
if __name__ == '__main__':
args = parser.parse_args()
random.seed(args.seed)
main(args)
| [
"numpy.random.default_rng",
"argparse.ArgumentParser",
"random.seed"
] | [((114, 130), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (128, 130), False, 'from argparse import ArgumentParser\n'), ((994, 1011), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (1005, 1011), False, 'import random\n'), ((1023, 1036), 'numpy.random.default_rng', 'default_rng', ([], {}), '()\n', (1034, 1036), False, 'from numpy.random import default_rng\n'), ((3409, 3431), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (3420, 3431), False, 'import random\n')] |
# This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import pandas as pd
import numba as nb
import time
from warnings import warn
import scipy.sparse as sp
from scipy.sparse import coo_matrix, csc_matrix
from scipy.sparse import hstack as hs, vstack as vs
from scipy.sparse.linalg import factorized, spsolve, inv
from matplotlib import pyplot as plt
from GridCal.Engine import *
def SysMat(Y, Ys, pq, pvpq):
"""
Computes the system Jacobian matrix in polar coordinates
Args:
Ybus: Admittance matrix
V: Array of nodal voltages
Ibus: Array of nodal current injections
pq: Array with the indices of the PQ buses
pvpq: Array with the indices of the PV and PQ buses
Returns:
The system Jacobian matrix
"""
A11 = -Ys.imag[np.ix_(pvpq, pvpq)]
A12 = Y.real[np.ix_(pvpq, pq)]
A21 = -Ys.real[np.ix_(pq, pvpq)]
A22 = -Y.imag[np.ix_(pq, pq)]
Asys = sp.vstack([sp.hstack([A11, A12]),
sp.hstack([A21, A22])], format="csc")
return Asys
def compute_acptdf(Ybus, Yseries, Yf, Yt, Cf, V, pq, pv, distribute_slack):
"""
Compute the AC-PTDF
:param Ybus: admittance matrix
:param Yf: Admittance matrix of the buses "from"
:param Yt: Admittance matrix of the buses "to"
:param Cf: Connectivity branch - bus "from"
:param V: voltages array
:param Ibus: array of currents
:param pq: array of pq node indices
:param pv: array of pv node indices
:return: AC-PTDF matrix (branches, buses)
"""
n = len(V)
pvpq = np.r_[pv, pq]
npq = len(pq)
# compute the Jacobian
J = SysMat(Ybus, Yseries, pq, pvpq)
if distribute_slack:
dP = np.ones((n, n)) * (-1 / (n - 1))
for i in range(n):
dP[i, i] = 1.0
else:
dP = np.eye(n, n)
# compose the compatible array (the Q increments are considered zero
dQ = np.zeros((npq, n))
# dQ = np.eye(n, n)[pq, :]
dS = np.r_[dP[pvpq, :], dQ]
# solve the voltage increments
dx = spsolve(J, dS)
# compute branch derivatives
If = Yf * V
E = V / np.abs(V)
Vdiag = sp.diags(V)
Vdiag_conj = sp.diags(np.conj(V))
Ediag = sp.diags(E)
Ediag_conj = sp.diags(np.conj(E))
If_diag_conj = sp.diags(np.conj(If))
Yf_conj = Yf.copy()
Yf_conj.data = np.conj(Yf_conj.data)
Yt_conj = Yt.copy()
Yt_conj.data = np.conj(Yt_conj.data)
dSf_dVa = 1j * (If_diag_conj * Cf * Vdiag - sp.diags(Cf * V) * Yf_conj * Vdiag_conj)
dSf_dVm = If_diag_conj * Cf * Ediag - sp.diags(Cf * V) * Yf_conj * Ediag_conj
# compose the final AC-PTDF
dPf_dVa = dSf_dVa.real[:, pvpq]
dPf_dVm = dSf_dVm.real[:, pq]
PTDF = sp.hstack((dPf_dVa, dPf_dVm)) * dx
return PTDF
def make_lodf(circuit: SnapshotCircuit, PTDF, correct_values=True):
"""
:param circuit:
:param PTDF: PTDF matrix in numpy array form
:return:
"""
nl = circuit.nbr
# compute the connectivity matrix
Cft = circuit.C_branch_bus_f - circuit.C_branch_bus_t
H = PTDF * Cft.T
# old code
# h = sp.diags(H.diagonal())
# LODF = H / (np.ones((nl, nl)) - h * np.ones(nl))
# divide each row of H by the vector 1 - H.diagonal
# LODF = H / (1 - H.diagonal())
# replace possible nan and inf
# LODF[LODF == -np.inf] = 0
# LODF[LODF == np.inf] = 0
# LODF = np.nan_to_num(LODF)
# this loop avoids the divisions by zero
# in those cases the LODF column should be zero
LODF = np.zeros((nl, nl))
div = 1 - H.diagonal()
for j in range(H.shape[1]):
if div[j] != 0:
LODF[:, j] = H[:, j] / div[j]
# replace the diagonal elements by -1
# old code
# LODF = LODF - sp.diags(LODF.diagonal()) - sp.eye(nl, nl), replaced by:
for i in range(nl):
LODF[i, i] = - 1.0
if correct_values:
i1, j1 = np.where(LODF > 1)
for i, j in zip(i1, j1):
LODF[i, j] = 1
i2, j2 = np.where(LODF < -1)
for i, j in zip(i2, j2):
LODF[i, j] = -1
return LODF
def get_branch_time_series(circuit: TimeCircuit, PTDF):
"""
:param grid:
:return:
"""
# option 2: call the power directly
P = circuit.Sbus.real
Pbr = np.dot(PTDF, P).T * circuit.Sbase
return Pbr
def multiple_failure_old(flows, LODF, beta, delta, alpha):
"""
:param flows: array of all the pre-contingency flows
:param LODF: Line Outage Distribution Factors Matrix
:param beta: index of the first failed line
:param delta: index of the second failed line
:param alpha: index of the line where you want to see the effects
:return: post contingency flow in the line alpha
"""
# multiple contingency matrix
M = np.ones((2, 2))
M[0, 1] = -LODF[beta, delta]
M[1, 0] = -LODF[delta, beta]
# normal flows of the lines beta and delta
F = flows[[beta, delta]]
# contingency flows after failing the ines beta and delta
Ff = np.linalg.solve(M, F)
# flow delta in the line alpha after the multiple contingency of the lines beta and delta
L = LODF[alpha, :][[beta, delta]]
dFf_alpha = np.dot(L, Ff)
return F[alpha] + dFf_alpha
def multiple_failure(flows, LODF, failed_idx):
"""
From the paper:
Multiple Element Contingency Screening
IEEE TRANSACTIONS ON POWER SYSTEMS, VOL. 26, NO. 3, AUGUST 2011
<NAME> and <NAME>
:param flows: array of all the pre-contingency flows (the base flows)
:param LODF: Line Outage Distribution Factors Matrix
:param failed_idx: indices of the failed lines
:return: all post contingency flows
"""
# multiple contingency matrix
M = -LODF[np.ix_(failed_idx, failed_idx)]
for i in range(len(failed_idx)):
M[i, i] = 1.0
# normal flows of the failed lines indicated by failed_idx
F = flows[failed_idx]
# Affected flows after failing the lines indicated by failed_idx
Ff = np.linalg.solve(M, F)
# flow delta in the line alpha after the multiple contingency of the lines indicated by failed_idx
L = LODF[:, failed_idx]
dFf_alpha = np.dot(L, Ff)
# return the final contingency flow as the base flow plus the contingency flow delta
return flows + dFf_alpha
def get_n_minus_1_flows(circuit: MultiCircuit):
opt = PowerFlowOptions()
branches = circuit.get_branches()
m = circuit.get_branch_number()
Pmat = np.zeros((m, m)) # monitored, contingency
for c, branch in enumerate(branches):
if branch.active:
branch.active = False
pf = PowerFlowDriver(circuit, opt)
pf.run()
Pmat[:, c] = pf.results.Sbranch.real
branch.active = True
return Pmat
def check_lodf(grid: MultiCircuit):
flows_n1_nr = get_n_minus_1_flows(grid)
# assume 1 island
nc = compile_snapshot_circuit(grid)
islands = split_into_islands(nc)
circuit = islands[0]
pf_driver = PowerFlowDriver(grid, PowerFlowOptions())
pf_driver.run()
PTDF = compute_acptdf(Ybus=circuit.Ybus,
Yseries=circuit.Yseries,
Yf=circuit.Yf,
Yt=circuit.Yt,
Cf=circuit.C_branch_bus_f,
V=pf_driver.results.voltage,
pq=circuit.pq,
pv=circuit.pv,
distribute_slack=True)
LODF = make_lodf(circuit, PTDF)
Pbus = circuit.get_injections(False).real
flows_n = np.dot(PTDF, Pbus)
nl = circuit.nbr
flows_n1 = np.zeros((nl, nl))
for c in range(nl): # branch that fails (contingency)
# for m in range(nl): # branch to monitor
# flows_n1[m, c] = flows_n[m] + LODF[m, c] * flows_n[c]
flows_n1[:, c] = flows_n[:] + LODF[:, c] * flows_n[c]
return flows_n, flows_n1_nr, flows_n1
def test_ptdf(grid):
"""
Sigma-distances test
:param grid:
:return:
"""
nc = compile_snapshot_circuit(grid)
islands = split_into_islands(nc)
circuit = islands[0] # pick the first island
pf_driver = PowerFlowDriver(grid, PowerFlowOptions())
pf_driver.run()
PTDF = compute_acptdf(Ybus=circuit.Ybus,
Yseries=circuit.Yseries,
Yf=circuit.Yf,
Yt=circuit.Yt,
Cf=circuit.C_branch_bus_f,
V=pf_driver.results.voltage,
pq=circuit.pq,
pv=circuit.pv,
distribute_slack=False)
print('PTDF:')
print(PTDF)
if __name__ == '__main__':
from GridCal.Engine import FileOpen
import pandas as pd
np.set_printoptions(threshold=sys.maxsize, linewidth=200000000)
# np.set_printoptions(linewidth=2000, suppress=True)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/IEEE39_1W.gridcal'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/IEEE 14.xlsx'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/lynn5buspv.xlsx'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/IEEE 118.xlsx'
fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/1354 Pegase.xlsx'
# fname = 'helm_data1.gridcal'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/IEEE 14 PQ only.gridcal'
# fname = 'IEEE 14 PQ only full.gridcal'
# fname = '/home/santi/Descargas/matpower-fubm-master/data/case5.m'
# fname = '/home/santi/Descargas/matpower-fubm-master/data/case30.m'
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/PGOC_6bus.gridcal'
grid_ = FileOpen(fname).open()
test_ptdf(grid_)
name = os.path.splitext(fname.split(os.sep)[-1])[0]
method = 'ACPTDF (No Jacobian, V=Vpf)'
nc_ = compile_snapshot_circuit(grid_)
islands_ = split_into_islands(nc_)
circuit_ = islands_[0]
pf_driver_ = PowerFlowDriver(grid_, PowerFlowOptions())
pf_driver_.run()
H_ = compute_acptdf(Ybus=circuit_.Ybus,
Yseries=circuit_.Yseries,
Yf=circuit_.Yf,
Yt=circuit_.Yt,
Cf=circuit_.C_branch_bus_f,
V=pf_driver_.results.voltage,
pq=circuit_.pq,
pv=circuit_.pv,
distribute_slack=False)
LODF_ = make_lodf(circuit_, H_)
if H_.shape[0] < 50:
print('PTDF:\n', H_)
print('LODF:\n', LODF_)
flows_n_, flows_n1_nr_, flows_n1_ = check_lodf(grid_)
# in the case of the grid PGOC_6bus
flows_multiple = multiple_failure(flows=flows_n_,
LODF=LODF_,
failed_idx=[1, 5]) # failed lines 2 and 6
Pn1_nr_df = pd.DataFrame(data=flows_n1_nr_, index=nc_.branch_names, columns=nc_.branch_names)
flows_n1_df = pd.DataFrame(data=flows_n1_, index=nc_.branch_names, columns=nc_.branch_names)
# plot N-1
fig = plt.figure(figsize=(12, 8))
title = 'N-1 with ' + method + ' (' + name + ')'
fig.suptitle(title)
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
Pn1_nr_df.plot(ax=ax1, legend=False)
flows_n1_df.plot(ax=ax2, legend=False)
diff = Pn1_nr_df - flows_n1_df
diff.plot(ax=ax3, legend=False)
ax1.set_title('Newton-Raphson N-1 flows')
ax2.set_title('PTDF N-1 flows')
ax3.set_title('Difference')
fig.savefig(title + '.png')
# ------------------------------------------------------------------------------------------------------------------
# Perform real time series
# ------------------------------------------------------------------------------------------------------------------
if grid_.time_profile is not None:
grid_.ensure_profiles_exist()
nc_ts = compile_time_circuit(grid_)
islands_ts = split_time_circuit_into_islands(nc_ts)
circuit_ts = islands_ts[0]
pf_options = PowerFlowOptions()
ts_driver = TimeSeries(grid=grid_, options=pf_options)
ts_driver.run()
Pbr_nr = ts_driver.results.Sbranch.real
df_Pbr_nr = pd.DataFrame(data=Pbr_nr, columns=circuit_ts.branch_names, index=circuit_ts.time_array)
# Compute the PTDF based flows
Pbr_ptdf = get_branch_time_series(circuit=circuit_ts, PTDF=H_)
df_Pbr_ptdf = pd.DataFrame(data=Pbr_ptdf, columns=circuit_ts.branch_names, index=circuit_ts.time_array)
# plot
fig = plt.figure(figsize=(12, 8))
title = 'Flows with ' + method + ' (' + name + ')'
fig.suptitle(title)
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
df_Pbr_nr.plot(ax=ax1, legend=False)
df_Pbr_ptdf.plot(ax=ax2, legend=False)
diff = df_Pbr_nr - df_Pbr_ptdf
diff.plot(ax=ax3, legend=False)
ax1.set_title('Newton-Raphson flows')
ax2.set_title('PTDF flows')
ax3.set_title('Difference')
fig.savefig(title + '.png')
plt.show()
| [
"scipy.sparse.linalg.spsolve",
"numpy.eye",
"numpy.linalg.solve",
"numpy.abs",
"numpy.ones",
"matplotlib.pyplot.show",
"numpy.conj",
"numpy.where",
"GridCal.Engine.FileOpen",
"numpy.ix_",
"pandas.set_option",
"numpy.zeros",
"numpy.dot",
"matplotlib.pyplot.figure",
"scipy.sparse.hstack",
... | [((2526, 2544), 'numpy.zeros', 'np.zeros', (['(npq, n)'], {}), '((npq, n))\n', (2534, 2544), True, 'import numpy as np\n'), ((2653, 2667), 'scipy.sparse.linalg.spsolve', 'spsolve', (['J', 'dS'], {}), '(J, dS)\n', (2660, 2667), False, 'from scipy.sparse.linalg import factorized, spsolve, inv\n'), ((2752, 2763), 'scipy.sparse.diags', 'sp.diags', (['V'], {}), '(V)\n', (2760, 2763), True, 'import scipy.sparse as sp\n'), ((2814, 2825), 'scipy.sparse.diags', 'sp.diags', (['E'], {}), '(E)\n', (2822, 2825), True, 'import scipy.sparse as sp\n'), ((2949, 2970), 'numpy.conj', 'np.conj', (['Yf_conj.data'], {}), '(Yf_conj.data)\n', (2956, 2970), True, 'import numpy as np\n'), ((3014, 3035), 'numpy.conj', 'np.conj', (['Yt_conj.data'], {}), '(Yt_conj.data)\n', (3021, 3035), True, 'import numpy as np\n'), ((4120, 4138), 'numpy.zeros', 'np.zeros', (['(nl, nl)'], {}), '((nl, nl))\n', (4128, 4138), True, 'import numpy as np\n'), ((5373, 5388), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (5380, 5388), True, 'import numpy as np\n'), ((5604, 5625), 'numpy.linalg.solve', 'np.linalg.solve', (['M', 'F'], {}), '(M, F)\n', (5619, 5625), True, 'import numpy as np\n'), ((5775, 5788), 'numpy.dot', 'np.dot', (['L', 'Ff'], {}), '(L, Ff)\n', (5781, 5788), True, 'import numpy as np\n'), ((6578, 6599), 'numpy.linalg.solve', 'np.linalg.solve', (['M', 'F'], {}), '(M, F)\n', (6593, 6599), True, 'import numpy as np\n'), ((6748, 6761), 'numpy.dot', 'np.dot', (['L', 'Ff'], {}), '(L, Ff)\n', (6754, 6761), True, 'import numpy as np\n'), ((7046, 7062), 'numpy.zeros', 'np.zeros', (['(m, m)'], {}), '((m, m))\n', (7054, 7062), True, 'import numpy as np\n'), ((8164, 8182), 'numpy.dot', 'np.dot', (['PTDF', 'Pbus'], {}), '(PTDF, Pbus)\n', (8170, 8182), True, 'import numpy as np\n'), ((8220, 8238), 'numpy.zeros', 'np.zeros', (['(nl, nl)'], {}), '((nl, nl))\n', (8228, 8238), True, 'import numpy as np\n'), ((9375, 9438), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'sys.maxsize', 'linewidth': '(200000000)'}), '(threshold=sys.maxsize, linewidth=200000000)\n', (9394, 9438), True, 'import numpy as np\n'), ((9500, 9538), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', '(500)'], {}), "('display.max_rows', 500)\n", (9513, 9538), True, 'import pandas as pd\n'), ((9543, 9584), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', '(500)'], {}), "('display.max_columns', 500)\n", (9556, 9584), True, 'import pandas as pd\n'), ((9589, 9625), 'pandas.set_option', 'pd.set_option', (['"""display.width"""', '(1000)'], {}), "('display.width', 1000)\n", (9602, 9625), True, 'import pandas as pd\n'), ((11704, 11790), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'flows_n1_nr_', 'index': 'nc_.branch_names', 'columns': 'nc_.branch_names'}), '(data=flows_n1_nr_, index=nc_.branch_names, columns=nc_.\n branch_names)\n', (11716, 11790), True, 'import pandas as pd\n'), ((11804, 11882), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'flows_n1_', 'index': 'nc_.branch_names', 'columns': 'nc_.branch_names'}), '(data=flows_n1_, index=nc_.branch_names, columns=nc_.branch_names)\n', (11816, 11882), True, 'import pandas as pd\n'), ((11909, 11936), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (11919, 11936), True, 'from matplotlib import pyplot as plt\n'), ((13989, 13999), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13997, 13999), True, 'from matplotlib import pyplot as plt\n'), ((1448, 1464), 'numpy.ix_', 'np.ix_', (['pvpq', 'pq'], {}), '(pvpq, pq)\n', (1454, 1464), True, 'import numpy as np\n'), ((2430, 2442), 'numpy.eye', 'np.eye', (['n', 'n'], {}), '(n, n)\n', (2436, 2442), True, 'import numpy as np\n'), ((2730, 2739), 'numpy.abs', 'np.abs', (['V'], {}), '(V)\n', (2736, 2739), True, 'import numpy as np\n'), ((2790, 2800), 'numpy.conj', 'np.conj', (['V'], {}), '(V)\n', (2797, 2800), True, 'import numpy as np\n'), ((2852, 2862), 'numpy.conj', 'np.conj', (['E'], {}), '(E)\n', (2859, 2862), True, 'import numpy as np\n'), ((2892, 2903), 'numpy.conj', 'np.conj', (['If'], {}), '(If)\n', (2899, 2903), True, 'import numpy as np\n'), ((3322, 3351), 'scipy.sparse.hstack', 'sp.hstack', (['(dPf_dVa, dPf_dVm)'], {}), '((dPf_dVa, dPf_dVm))\n', (3331, 3351), True, 'import scipy.sparse as sp\n'), ((4491, 4509), 'numpy.where', 'np.where', (['(LODF > 1)'], {}), '(LODF > 1)\n', (4499, 4509), True, 'import numpy as np\n'), ((4588, 4607), 'numpy.where', 'np.where', (['(LODF < -1)'], {}), '(LODF < -1)\n', (4596, 4607), True, 'import numpy as np\n'), ((13096, 13188), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'Pbr_nr', 'columns': 'circuit_ts.branch_names', 'index': 'circuit_ts.time_array'}), '(data=Pbr_nr, columns=circuit_ts.branch_names, index=circuit_ts\n .time_array)\n', (13108, 13188), True, 'import pandas as pd\n'), ((13317, 13411), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'Pbr_ptdf', 'columns': 'circuit_ts.branch_names', 'index': 'circuit_ts.time_array'}), '(data=Pbr_ptdf, columns=circuit_ts.branch_names, index=\n circuit_ts.time_array)\n', (13329, 13411), True, 'import pandas as pd\n'), ((13437, 13464), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (13447, 13464), True, 'from matplotlib import pyplot as plt\n'), ((1411, 1429), 'numpy.ix_', 'np.ix_', (['pvpq', 'pvpq'], {}), '(pvpq, pvpq)\n', (1417, 1429), True, 'import numpy as np\n'), ((1485, 1501), 'numpy.ix_', 'np.ix_', (['pq', 'pvpq'], {}), '(pq, pvpq)\n', (1491, 1501), True, 'import numpy as np\n'), ((1521, 1535), 'numpy.ix_', 'np.ix_', (['pq', 'pq'], {}), '(pq, pq)\n', (1527, 1535), True, 'import numpy as np\n'), ((1560, 1581), 'scipy.sparse.hstack', 'sp.hstack', (['[A11, A12]'], {}), '([A11, A12])\n', (1569, 1581), True, 'import scipy.sparse as sp\n'), ((1605, 1626), 'scipy.sparse.hstack', 'sp.hstack', (['[A21, A22]'], {}), '([A21, A22])\n', (1614, 1626), True, 'import scipy.sparse as sp\n'), ((2320, 2335), 'numpy.ones', 'np.ones', (['(n, n)'], {}), '((n, n))\n', (2327, 2335), True, 'import numpy as np\n'), ((4868, 4883), 'numpy.dot', 'np.dot', (['PTDF', 'P'], {}), '(PTDF, P)\n', (4874, 4883), True, 'import numpy as np\n'), ((6318, 6348), 'numpy.ix_', 'np.ix_', (['failed_idx', 'failed_idx'], {}), '(failed_idx, failed_idx)\n', (6324, 6348), True, 'import numpy as np\n'), ((10535, 10550), 'GridCal.Engine.FileOpen', 'FileOpen', (['fname'], {}), '(fname)\n', (10543, 10550), False, 'from GridCal.Engine import FileOpen\n'), ((3168, 3184), 'scipy.sparse.diags', 'sp.diags', (['(Cf * V)'], {}), '(Cf * V)\n', (3176, 3184), True, 'import scipy.sparse as sp\n'), ((3085, 3101), 'scipy.sparse.diags', 'sp.diags', (['(Cf * V)'], {}), '(Cf * V)\n', (3093, 3101), True, 'import scipy.sparse as sp\n')] |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import roc_auc_score, roc_curve, classification_report
from xgboost import XGBClassifier
from time import time
idx = pd.IndexSlice
# COMMAND ----------
# MAGIC %md General
# COMMAND ----------
def count_unique_index(df, index_level=0):
return df.index.get_level_values(index_level).nunique()
def describe_datetimeindex(df, index_level=1):
return pd.Series(df.index.get_level_values(index_level)).describe(datetime_is_numeric=True)
def prop_table(x, dropna=False):
tmp = (x.value_counts(sort=False, dropna=dropna).reset_index()
.merge((100 * x.value_counts(sort=False, normalize=True, dropna=dropna)).round(2).reset_index(), on='index',
how='inner'))
tmp.columns = [x.name, 'count', 'percent']
tmp = tmp.sort_values('count', ascending=False)
tot = x.notnull().sum() if dropna else len(x)
return tmp.append(pd.DataFrame([['Total', tot, 100]], columns=tmp.columns), ignore_index=True)
# COMMAND ----------
# MAGIC %md Data generation
# COMMAND ----------
def generate_normalized_hr_sample(random_state=1729, split='train', ili_type=3):
rnd = np.random.RandomState(random_state)
participant_id = 'P'+''.join([str(rnd.choice(np.arange(0,10), 1)[0]) for i in range(12)])
onset_date = f'2020-{rnd.choice(np.arange(2,7), 1)[0]:01d}-{rnd.choice(np.arange(1,28), 1)[0]:01d}'
healthy_miss_fraction = 0.1
illness_miss_fraction = 0.2
healthy_dist_param = [0, 0.3]
illness_dist_param = [0, 0.4]
dict_ili = {1:{'hr_max': -0.1, ## ILI
'rhr':0.05,
'hr_stdv': -0.3,
'hr_50pct': -0.2,
'miss_fraction': {0:0.1, 1:0.15, 2:0.1},
'pivot': 2
},
2:{'hr_max': -0.2, ## FLU
'rhr': 0.1,
'hr_stdv': -0.4,
'hr_50pct': -0.3,
'miss_fraction': {0: 0.1, 1:0.2, 2:0.1},
'pivot': 3
},
3:{'hr_max': -0.4, ## COVID
'rhr': 0.3,
'hr_stdv': -0.6,
'hr_50pct': -0.5,
'miss_fraction': {0: 0.1, 1: 0.25, 2:0.2},
'pivot': 4
}
}
shifts = 5
dt = pd.date_range(pd.to_datetime(onset_date) - pd.Timedelta('28d'),
pd.to_datetime(onset_date) + pd.Timedelta('14d'),
)
col_names = {'hr_max': 'heart_rate__not_moving__max',
'rhr': 'heart_rate__resting_heart_rate',
'hr_stdv': 'heart_rate__stddev',
'hr_50pct': 'heart_rate__perc_50th'
}
n_cols = len(col_names)
def _linear_trend(peak, trough, width, days):
step = (peak-trough)/width
return np.pad(np.concatenate([np.arange(trough, peak, step)+step,
np.arange(peak,trough, -step)-step]),
(0,days-2*width), constant_values=trough)
def _sample_hr(rnd, days, label, ili_type):
if label==1:
return np.column_stack([rnd.normal(illness_dist_param[0], illness_dist_param[1], [days, len(col_names)]) +\
np.column_stack([_linear_trend(dict_ili[ili_type][colz], illness_dist_param[0], dict_ili[ili_type]['pivot'], days)
for colz in col_names.keys()]),
0+(rnd.uniform(0,1,days) < dict_ili[ili_type]['miss_fraction'][label])
])
else:
return np.column_stack([rnd.normal(healthy_dist_param[0], healthy_dist_param[1], [days, len(col_names)]),
0+(rnd.uniform(0,1,days) < dict_ili[ili_type]['miss_fraction'][label])
])
def _add_shifts(x, rows, cols):
if rows==0:
return x
y = np.empty([rows,cols])
y[:] = np.nan
return np.row_stack([y, x[:-rows,:]])
dat = np.row_stack([_sample_hr(rnd, 27, 0, ili_type),
_sample_hr(rnd, 9, 1, ili_type),
_sample_hr(rnd, 7, 0, ili_type)
])
dat[dat[:,-1]==1, :-1] = np.nan ### add missing values
out = pd.DataFrame(np.column_stack([_add_shifts(dat[:,:-1], 1, n_cols) for i in range(shifts)]),
columns=pd.MultiIndex.from_product([[str(i)+'days_ago' for i in range(shifts)], col_names.values()]),
index=pd.MultiIndex.from_product([[participant_id], dt], names=['id_participant_external', 'dt'])
)
day_col = ('labels', 'days_since_onset')
label_col= ('labels', 'training_labels')
out[('labels', 'split')] = split
out[('labels', 'ILI_type')] = ili_type
out[day_col] = np.arange(-28,15)
out[label_col] = -1
out.loc[(out[day_col] > -22) & (out[day_col] < -7), label_col] = 0
out.loc[(out[day_col] > 0) & (out[day_col] < 8), label_col] = 1
return out
# COMMAND ----------
# MAGIC %md Prepare data
# COMMAND ----------
def get_dataset(df, keep_filter, days_ago, feature_cols,
label_col = ('labels', 'training_labels')):
y = df.loc[keep_filter ,label_col]
X = df.loc[keep_filter, idx[days_ago, feature_cols]]
filter_rows = (~X.isna().all(axis=1))
print(X.shape, y.shape)
print(f'Missing rows percent = {100 - 100*filter_rows.sum()/X.shape[0]:.2f}%')
print(prop_table(y))
return X, y, filter_rows
# COMMAND ----------
# MAGIC %md Model training
# COMMAND ----------
def run_xgb_class2(classifier, X_train, y_train, X_val, y_val, scorer=roc_auc_score):
classifier.fit(X_train, y_train)
yh_train = classifier.predict_proba(X_train)[:,1]
yh_val = classifier.predict_proba(X_val)[:,1]
print(f'Train ROC: {scorer(y_train, yh_train):.4f}')
print(f'Val ROC: {scorer(0+(y_val > 0), yh_val):.4f}')
return classifier, yh_train, yh_val
def run_xgb_hyperopt_2class(space, X_train, y_train, X_val, y_val, scorer=roc_auc_score):
hypopt = []
for params in space:
classifier = XGBClassifier(**params, use_label_encoder=False, eval_metric='error')
stime = time()
classifier.fit(X_train, y_train)
etime = time()
yh_train = classifier.predict_proba(X_train)[:,1]
yh_val = classifier.predict_proba(X_val)[:,1]
hypopt.append(pd.Series([scorer(y_train, yh_train),
scorer(y_val, yh_val),
(etime-stime)/60] + list(params.values()),
index=['train_roc', 'val_roc', 'time_mins'] + list(params.keys())
))
hypopt = pd.concat(hypopt, axis=1).T
return hypopt.sort_values(by=['val_roc', 'train_roc'], ascending=[False, True])
# COMMAND ----------
# MAGIC %md Model predictions
# COMMAND ----------
def get_specificity_threshold(y, yh, list_specificity_fraction):
ROC = roc_curve(y, yh)
dict_thresh = {}
for spec in list_specificity_fraction:
thresh = ROC[2][np.where(ROC[0] <= 1-spec)[0]-1][-1]
print(f'{100*spec:.0f}% Specifivity cutoff = {thresh:.4f}')
print(classification_report(y, 0+(yh >= thresh)))
print('-' * 50)
dict_thresh[spec] = np.round(thresh, 4)
return ROC, dict_thresh
def run_get_predictions(classifier, X, y, filter_row, df_labels, use_spec, use_spec_thresh, col_names=['I', 'C'], group_cols=['participant_id', 'event_order'], day_col='days_since_onset_v43', day_detect=-3, type_col='ILI_type', hue_col='Type', ili_type_map = {1:'any ILI', 2:'Flu', 3:'COVID'}):
yh = classifier.predict_proba(X)
tmp = df_labels.loc[y.index,:]
tmp.columns = tmp.columns.droplevel(0)
pred = (pd.DataFrame(yh, index=y.index, columns=col_names)
.join(tmp)
)
#pred = get_predictions_v4(df_labels, y, yh, col_names)
print('N =', count_unique_index(pred))
thresh_tag = f'_spec{100*use_spec:.0f}_{"v".join(col_names)}'
spec_thresh_col = 'pred'+thresh_tag
filter_thresh_col = 'filter'+thresh_tag
cumsum_thresh_col = 'cumsum'+thresh_tag
count_thresh_col = 'count'+thresh_tag
pred[spec_thresh_col] = 0+(pred[col_names[-1]] >= use_spec_thresh)
pred.loc[~filter_row, spec_thresh_col] = np.nan
pred[filter_thresh_col] = pred[spec_thresh_col].copy()
pred[count_thresh_col] = pred[spec_thresh_col].copy()
pred.loc[pred[spec_thresh_col].notna(), count_thresh_col] = 1
### Set detection before Day -2 as 0
pred.loc[pred[day_col] < day_detect, filter_thresh_col] = 0
pred.loc[pred[day_col] < day_detect, count_thresh_col] = 0
### Cumsum predictions
pred = (pred
.join(pred
.groupby(group_cols, as_index=False)
.apply(lambda x: x[filter_thresh_col].cumsum().ffill()).rename(cumsum_thresh_col).droplevel(0).to_frame()
)
)
### Set all days after first detection as 1
pred.loc[pred[cumsum_thresh_col] > 1, cumsum_thresh_col] = 1
pred[count_thresh_col] = pred[count_thresh_col].ffill()
plot_df = (pred
.groupby([day_col, type_col])
.agg({cumsum_thresh_col: 'sum', count_thresh_col: 'sum'})
.reset_index()
)
def run_expanding_max(x, colz=day_col):
return (x
.set_index(colz)
.expanding()
.max()
.reset_index()
)
plot_df = (plot_df
.groupby(type_col, as_index=False)
.apply(run_expanding_max)
.reset_index(drop=True)
)
plot_df['recall_fraction'] = plot_df[cumsum_thresh_col]/plot_df[count_thresh_col]
print('Cumulative recall shape=', plot_df.shape)
map_type = (plot_df
.groupby(type_col)
.apply(lambda x: ili_type_map[x[type_col].unique()[0]] + ', N='+ str(int(x[count_thresh_col].max())))
.to_dict()
)
pred[hue_col] = pred[type_col].map(map_type)
plot_df[hue_col] = plot_df[type_col].map(map_type)
print('Predictions shape=', pred.shape)
return pred, plot_df
def get_feature_importance(classifier):
return (pd.Series(classifier
.get_booster()
.get_score(importance_type='gain')
)
.sort_values(ascending=False)
.to_frame()
.reset_index()
.rename(columns={'index': 'feature_name', 0:'gain'})
)
# COMMAND ----------
# MAGIC %md Plotting
# COMMAND ----------
sns_hue = sns.color_palette()
dict_hue = {'ILI': sns_hue[0],
'Covid': sns_hue[1],
'Healthy': sns_hue[2],
'Flu': sns_hue[3]}
ili_type_map = {0:'Healthy', 1: 'ILI', 2: 'Flu', 3: 'COVID-19'}
def plot_trend_lines(df, plot_cols, use_palette, use_hue_order, type_col=('labels', 'Type'),
ts_col = ('labels', "days_since_onset"), ci=67, ts_cut=30, ts_step=4, line_color='coral',
per_row=2, thick=2, plot_width=5, plot_height=4, sharex=False, sharey=True, grid=False):
plotz = len(plot_cols)
rowz = plotz // per_row + 0+(plotz % per_row > 0)
fig, axes = plt.subplots(nrows=rowz, ncols=per_row, figsize=(plot_width*per_row, plot_height*rowz), sharey=sharey, sharex=sharex)
keep_rows = (df[ts_col] >= -ts_cut) & (df[ts_col] <= ts_cut)
(ts_min, ts_max) = df[keep_rows].agg({ts_col: ['min', 'max']}).unstack().values
print(ts_min, ts_max)
for ft_col, ax in zip(plot_cols, axes.flatten() if type(axes) == np.ndarray else [axes]):
if type(ft_col) == tuple:
colr = ft_col[-1]
else:
colr = ft_col
if type(ts_col) == tuple:
xlabel = ts_col[-1]
else:
xlabel = ts_col
sns.lineplot(x=ts_col, y=ft_col, hue=type_col, data=df.loc[keep_rows,:],
palette = use_palette,
hue_order = use_hue_order,
ax=ax, ci=ci, color=line_color, linewidth=thick)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles=handles[1:], labels=labels[1:], fontsize=14)
ax.axvline(x=0, c='k', ls='--')
ax.axhline(y=0, c='k', ls='--')
ax.set_xticks(np.arange(ts_min, ts_max+1, ts_step))
ax.set_title(colr.replace('__',' : ').replace('_',' ').capitalize(), fontsize=16)
ax.set_xlabel(xlabel.replace('_',' ').capitalize(), fontsize=14)
ax.set_ylabel('')
if grid:
ax.grid()
plt.tight_layout()
plt.close()
return fig
def single_plot_missing_performance(y_val, yh_val, X_val, title='Healthy v. ILI', min_N=10):
out_val = pd.DataFrame(np.column_stack([y_val.values, yh_val, X_val.isna().sum(axis=1)/X_val.shape[1]]), index=y_val.index, columns=['gt', 'prob', 'frac'])
fig, ax = plt.subplots(1, 1, figsize=(7,5))
## Add cumulative missing-fraction's AUROC score
plot_roc = pd.DataFrame([{'frac': q,
'Data Fraction': (out_val['frac'] <= q).sum()/out_val.shape[0],
'Class-0 mean': out_val.loc[(out_val['frac'] <= q) & (out_val['gt']==0), 'prob'].mean(),
'Class-1 mean': out_val.loc[(out_val['frac'] <= q) & (out_val['gt']==1), 'prob'].mean(),
'AUROC': roc_auc_score(out_val.loc[out_val.frac <= q, 'gt'], out_val.loc[out_val.frac <= q, 'prob'])}
for q in np.arange(0, 1.05, 0.05) if (out_val.frac <= q).sum() >= min_N])
plot_roc.plot(x='frac', y=['Class-0 mean', 'Class-1 mean', 'AUROC', 'Data Fraction'], ax=ax,
color=['C0', 'orange', 'k', 'coral'], style=['-', '-', '--', '-.'], linewidth=2)
#sns.lineplot(x='frac', y='prob', hue='True label', data=out_val.rename(columns={'gt': 'True label'}), ax=ax[1], lw=2)
#plot_roc.plot(x='frac', y='AUROC', ax=ax[1], color='k', style='--', linewidth=2)
ax.set_ylabel('Score', fontsize=14)
ax.set_xlabel('Missing data Less Than fraction', fontsize=14)
ax.set_title(title, fontsize=15)
ax.legend(fontsize=12)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.close()
return fig
def plot_roc(fpr, tpr):
fig = plt.figure(figsize=(5,4))
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr, tpr)
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.grid()
plt.close()
return fig
def plot_spec_recall_since_onset(pred, plot_df, use_spec, spec_thresh_col, use_palette, use_hue_order, set_tag, max_missing_frac, cumsum_col, count_col, y_tag='COVID-19', dataset_tag='LSFS', hue_col='Type', x_col='days_since_onset_v43', ci=67, xticks_range=np.arange(-28,15,4)):
fig, ax = plt.subplots(1,1,figsize=(9,6))
threshold_line = 1-use_spec
sns.lineplot(x = x_col,
y = spec_thresh_col,
hue = hue_col,
palette = use_palette,
hue_order = use_hue_order,
data = pred,
ci = ci
)
if cumsum_col is not None:
for i,q in enumerate(use_hue_order):
labz = 'Cumulative '+q.split(',')[0]
(plot_df[plot_df[hue_col]==q]
.rename(columns={'recall_fraction':labz})
.plot(x=x_col, y=labz, c=use_palette[i], lw=2,
style='--', ax=ax, legend=False)
)
ax.axvline(x=0, c='k', ls=':', alpha=0.5)
ax.axhline(y=threshold_line, c='k', ls=':', alpha=0.7)
ax.set_xticks(xticks_range)
plt.xticks(fontsize=12);
ax.legend(fontsize=14) ##loc='upper left',
plt.yticks(fontsize=12);
ax.set_ylabel(f'Fraction positive predictions\n for {y_tag}', fontsize=16)
ax.set_xlabel('Days since onset', fontsize=16)
#ax.set_title(f'{dataset_tag}: {set_tag}-set predictions\n {100*use_spec:.0f}% specificity, max-{100*max_missing_frac:.0f}% missing', fontsize=18)
ax.set_title(f'{dataset_tag}: {set_tag} \n {100*use_spec:.0f}% specificity threshold, max-{100*max_missing_frac:.0f}% missing data', fontsize=18)
plt.close()
return fig, ax
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"sklearn.metrics.classification_report",
"sklearn.metrics.roc_auc_score",
"sklearn.metrics.roc_curve",
"numpy.row_stack",
"numpy.random.RandomState",
"numpy.arange",
"pandas.to_datetime",
"pandas.MultiIndex.from_product",
"seaborn.color_palet... | [((10980, 10999), 'seaborn.color_palette', 'sns.color_palette', ([], {}), '()\n', (10997, 10999), True, 'import seaborn as sns\n'), ((1231, 1266), 'numpy.random.RandomState', 'np.random.RandomState', (['random_state'], {}), '(random_state)\n', (1252, 1266), True, 'import numpy as np\n'), ((5035, 5053), 'numpy.arange', 'np.arange', (['(-28)', '(15)'], {}), '(-28, 15)\n', (5044, 5053), True, 'import numpy as np\n'), ((7232, 7248), 'sklearn.metrics.roc_curve', 'roc_curve', (['y', 'yh'], {}), '(y, yh)\n', (7241, 7248), False, 'from sklearn.metrics import roc_auc_score, roc_curve, classification_report\n'), ((11617, 11743), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'rowz', 'ncols': 'per_row', 'figsize': '(plot_width * per_row, plot_height * rowz)', 'sharey': 'sharey', 'sharex': 'sharex'}), '(nrows=rowz, ncols=per_row, figsize=(plot_width * per_row, \n plot_height * rowz), sharey=sharey, sharex=sharex)\n', (11629, 11743), True, 'import matplotlib.pyplot as plt\n'), ((12984, 13002), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (13000, 13002), True, 'import matplotlib.pyplot as plt\n'), ((13007, 13018), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (13016, 13018), True, 'import matplotlib.pyplot as plt\n'), ((13303, 13337), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(7, 5)'}), '(1, 1, figsize=(7, 5))\n', (13315, 13337), True, 'import matplotlib.pyplot as plt\n'), ((14603, 14626), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(12)'}), '(fontsize=12)\n', (14613, 14626), True, 'import matplotlib.pyplot as plt\n'), ((14631, 14654), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(12)'}), '(fontsize=12)\n', (14641, 14654), True, 'import matplotlib.pyplot as plt\n'), ((14659, 14670), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (14668, 14670), True, 'import matplotlib.pyplot as plt\n'), ((14726, 14752), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 4)'}), '(figsize=(5, 4))\n', (14736, 14752), True, 'import matplotlib.pyplot as plt\n'), ((14756, 14787), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]', '"""k--"""'], {}), "([0, 1], [0, 1], 'k--')\n", (14764, 14787), True, 'import matplotlib.pyplot as plt\n'), ((14792, 14810), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (14800, 14810), True, 'import matplotlib.pyplot as plt\n'), ((14815, 14848), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False positive rate"""'], {}), "('False positive rate')\n", (14825, 14848), True, 'import matplotlib.pyplot as plt\n'), ((14853, 14885), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True positive rate"""'], {}), "('True positive rate')\n", (14863, 14885), True, 'import matplotlib.pyplot as plt\n'), ((14890, 14900), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (14898, 14900), True, 'import matplotlib.pyplot as plt\n'), ((14905, 14916), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (14914, 14916), True, 'import matplotlib.pyplot as plt\n'), ((15191, 15212), 'numpy.arange', 'np.arange', (['(-28)', '(15)', '(4)'], {}), '(-28, 15, 4)\n', (15200, 15212), True, 'import numpy as np\n'), ((15228, 15262), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(9, 6)'}), '(1, 1, figsize=(9, 6))\n', (15240, 15262), True, 'import matplotlib.pyplot as plt\n'), ((15301, 15422), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': 'x_col', 'y': 'spec_thresh_col', 'hue': 'hue_col', 'palette': 'use_palette', 'hue_order': 'use_hue_order', 'data': 'pred', 'ci': 'ci'}), '(x=x_col, y=spec_thresh_col, hue=hue_col, palette=use_palette,\n hue_order=use_hue_order, data=pred, ci=ci)\n', (15313, 15422), True, 'import seaborn as sns\n'), ((16053, 16076), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(12)'}), '(fontsize=12)\n', (16063, 16076), True, 'import matplotlib.pyplot as plt\n'), ((16130, 16153), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(12)'}), '(fontsize=12)\n', (16140, 16153), True, 'import matplotlib.pyplot as plt\n'), ((16590, 16601), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (16599, 16601), True, 'import matplotlib.pyplot as plt\n'), ((985, 1041), 'pandas.DataFrame', 'pd.DataFrame', (["[['Total', tot, 100]]"], {'columns': 'tmp.columns'}), "([['Total', tot, 100]], columns=tmp.columns)\n", (997, 1041), True, 'import pandas as pd\n'), ((4128, 4150), 'numpy.empty', 'np.empty', (['[rows, cols]'], {}), '([rows, cols])\n', (4136, 4150), True, 'import numpy as np\n'), ((4187, 4218), 'numpy.row_stack', 'np.row_stack', (['[y, x[:-rows, :]]'], {}), '([y, x[:-rows, :]])\n', (4199, 4218), True, 'import numpy as np\n'), ((6360, 6429), 'xgboost.XGBClassifier', 'XGBClassifier', ([], {'use_label_encoder': '(False)', 'eval_metric': '"""error"""'}), "(**params, use_label_encoder=False, eval_metric='error')\n", (6373, 6429), False, 'from xgboost import XGBClassifier\n'), ((6447, 6453), 'time.time', 'time', ([], {}), '()\n', (6451, 6453), False, 'from time import time\n'), ((6511, 6517), 'time.time', 'time', ([], {}), '()\n', (6515, 6517), False, 'from time import time\n'), ((6969, 6994), 'pandas.concat', 'pd.concat', (['hypopt'], {'axis': '(1)'}), '(hypopt, axis=1)\n', (6978, 6994), True, 'import pandas as pd\n'), ((7552, 7571), 'numpy.round', 'np.round', (['thresh', '(4)'], {}), '(thresh, 4)\n', (7560, 7571), True, 'import numpy as np\n'), ((12238, 12415), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': 'ts_col', 'y': 'ft_col', 'hue': 'type_col', 'data': 'df.loc[keep_rows, :]', 'palette': 'use_palette', 'hue_order': 'use_hue_order', 'ax': 'ax', 'ci': 'ci', 'color': 'line_color', 'linewidth': 'thick'}), '(x=ts_col, y=ft_col, hue=type_col, data=df.loc[keep_rows, :],\n palette=use_palette, hue_order=use_hue_order, ax=ax, ci=ci, color=\n line_color, linewidth=thick)\n', (12250, 12415), True, 'import seaborn as sns\n'), ((2449, 2475), 'pandas.to_datetime', 'pd.to_datetime', (['onset_date'], {}), '(onset_date)\n', (2463, 2475), True, 'import pandas as pd\n'), ((2478, 2497), 'pandas.Timedelta', 'pd.Timedelta', (['"""28d"""'], {}), "('28d')\n", (2490, 2497), True, 'import pandas as pd\n'), ((2513, 2539), 'pandas.to_datetime', 'pd.to_datetime', (['onset_date'], {}), '(onset_date)\n', (2527, 2539), True, 'import pandas as pd\n'), ((2542, 2561), 'pandas.Timedelta', 'pd.Timedelta', (['"""14d"""'], {}), "('14d')\n", (2554, 2561), True, 'import pandas as pd\n'), ((4735, 4831), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (['[[participant_id], dt]'], {'names': "['id_participant_external', 'dt']"}), "([[participant_id], dt], names=[\n 'id_participant_external', 'dt'])\n", (4761, 4831), True, 'import pandas as pd\n'), ((7456, 7500), 'sklearn.metrics.classification_report', 'classification_report', (['y', '(0 + (yh >= thresh))'], {}), '(y, 0 + (yh >= thresh))\n', (7477, 7500), False, 'from sklearn.metrics import roc_auc_score, roc_curve, classification_report\n'), ((8036, 8086), 'pandas.DataFrame', 'pd.DataFrame', (['yh'], {'index': 'y.index', 'columns': 'col_names'}), '(yh, index=y.index, columns=col_names)\n', (8048, 8086), True, 'import pandas as pd\n'), ((12704, 12742), 'numpy.arange', 'np.arange', (['ts_min', '(ts_max + 1)', 'ts_step'], {}), '(ts_min, ts_max + 1, ts_step)\n', (12713, 12742), True, 'import numpy as np\n'), ((13811, 13907), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (["out_val.loc[out_val.frac <= q, 'gt']", "out_val.loc[out_val.frac <= q, 'prob']"], {}), "(out_val.loc[out_val.frac <= q, 'gt'], out_val.loc[out_val.\n frac <= q, 'prob'])\n", (13824, 13907), False, 'from sklearn.metrics import roc_auc_score, roc_curve, classification_report\n'), ((13943, 13967), 'numpy.arange', 'np.arange', (['(0)', '(1.05)', '(0.05)'], {}), '(0, 1.05, 0.05)\n', (13952, 13967), True, 'import numpy as np\n'), ((1402, 1417), 'numpy.arange', 'np.arange', (['(2)', '(7)'], {}), '(2, 7)\n', (1411, 1417), True, 'import numpy as np\n'), ((1441, 1457), 'numpy.arange', 'np.arange', (['(1)', '(28)'], {}), '(1, 28)\n', (1450, 1457), True, 'import numpy as np\n'), ((2977, 3006), 'numpy.arange', 'np.arange', (['trough', 'peak', 'step'], {}), '(trough, peak, step)\n', (2986, 3006), True, 'import numpy as np\n'), ((3051, 3081), 'numpy.arange', 'np.arange', (['peak', 'trough', '(-step)'], {}), '(peak, trough, -step)\n', (3060, 3081), True, 'import numpy as np\n'), ((7337, 7365), 'numpy.where', 'np.where', (['(ROC[0] <= 1 - spec)'], {}), '(ROC[0] <= 1 - spec)\n', (7345, 7365), True, 'import numpy as np\n'), ((1321, 1337), 'numpy.arange', 'np.arange', (['(0)', '(10)'], {}), '(0, 10)\n', (1330, 1337), True, 'import numpy as np\n')] |
import numpy as np
import unittest
from monte_carlo_tree_search import Node, MCTS, ucb_score
from game import Connect2Game
class MCTSTests(unittest.TestCase):
def test_mcts_from_root_with_equal_priors(self):
class MockModel:
def predict(self, board):
# starting board is:
# [0, 0, 1, -1]
return np.array([0.26, 0.24, 0.24, 0.26]), 0.0001
game = Connect2Game()
args = {'num_simulations': 50}
model = MockModel()
mcts = MCTS(game, model, args)
canonical_board = [0, 0, 0, 0]
print("starting")
root = mcts.run(model, canonical_board, to_play=1)
# the best move is to play at index 1 or 2
best_outer_move = max(root.children[0].visit_count, root.children[0].visit_count)
best_center_move = max(root.children[1].visit_count, root.children[2].visit_count)
self.assertGreater(best_center_move, best_outer_move)
def test_mcts_finds_best_move_with_really_bad_priors(self):
class MockModel:
def predict(self, board):
# starting board is:
# [0, 0, 1, -1]
return np.array([0.3, 0.7, 0, 0]), 0.0001
game = Connect2Game()
args = {'num_simulations': 25}
model = MockModel()
mcts = MCTS(game, model, args)
canonical_board = [0, 0, 1, -1]
print("starting")
root = mcts.run(model, canonical_board, to_play=1)
# the best move is to play at index 1
self.assertGreater(root.children[1].visit_count, root.children[0].visit_count)
def test_mcts_finds_best_move_with_equal_priors(self):
class MockModel:
def predict(self, board):
return np.array([0.51, 0.49, 0, 0]), 0.0001
game = Connect2Game()
args = { 'num_simulations': 25 }
model = MockModel()
mcts = MCTS(game, model, args)
canonical_board = [0, 0, -1, 1]
root = mcts.run(model, canonical_board, to_play=1)
# the better move is to play at index 1
self.assertLess(root.children[0].visit_count, root.children[1].visit_count)
def test_mcts_finds_best_move_with_really_really_bad_priors(self):
class MockModel:
def predict(self, board):
# starting board is:
# [-1, 0, 0, 0]
return np.array([0, 0.3, 0.3, 0.3]), 0.0001
game = Connect2Game()
args = {'num_simulations': 100}
model = MockModel()
mcts = MCTS(game, model, args)
canonical_board = [-1, 0, 0, 0]
root = mcts.run(model, canonical_board, to_play=1)
# the best move is to play at index 1
self.assertGreater(root.children[1].visit_count, root.children[2].visit_count)
self.assertGreater(root.children[1].visit_count, root.children[3].visit_count)
class NodeTests(unittest.TestCase):
def test_initialization(self):
node = Node(0.5, to_play=1)
self.assertEqual(node.visit_count, 0)
self.assertEqual(node.prior, 0.5)
self.assertEqual(len(node.children), 0)
self.assertFalse(node.expanded())
self.assertEqual(node.value(), 0)
def test_selection(self):
node = Node(0.5, to_play=1)
c0 = Node(0.5, to_play=-1)
c1 = Node(0.5, to_play=-1)
c2 = Node(0.5, to_play=-1)
node.visit_count = 1
c0.visit_count = 0
c2.visit_count = 0
c2.visit_count = 1
node.children = {
0: c0,
1: c1,
2: c2,
}
action = node.select_action(temperature=0)
self.assertEqual(action, 2)
def test_expansion(self):
node = Node(0.5, to_play=1)
state = [0, 0, 0, 0]
action_probs = [0.25, 0.15, 0.5, 0.1]
to_play = 1
node.expand(state, to_play, action_probs)
self.assertEqual(len(node.children), 4)
self.assertTrue(node.expanded())
self.assertEqual(node.to_play, to_play)
self.assertEqual(node.children[0].prior, 0.25)
self.assertEqual(node.children[1].prior, 0.15)
self.assertEqual(node.children[2].prior, 0.50)
self.assertEqual(node.children[3].prior, 0.10)
def test_ucb_score_no_children_visited(self):
node = Node(0.5, to_play=1)
node.visit_count = 1
state = [0, 0, 0, 0]
action_probs = [0.25, 0.15, 0.5, 0.1]
to_play = 1
node.expand(state, to_play, action_probs)
node.children[0].visit_count = 0
node.children[1].visit_count = 0
node.children[2].visit_count = 0
node.children[3].visit_count = 0
score_0 = ucb_score(node, node.children[0])
score_1 = ucb_score(node, node.children[1])
score_2 = ucb_score(node, node.children[2])
score_3 = ucb_score(node, node.children[3])
# With no visits, UCB score is just the priors
self.assertEqual(score_0, node.children[0].prior)
self.assertEqual(score_1, node.children[1].prior)
self.assertEqual(score_2, node.children[2].prior)
self.assertEqual(score_3, node.children[3].prior)
def test_ucb_score_one_child_visited(self):
node = Node(0.5, to_play=1)
node.visit_count = 1
state = [0, 0, 0, 0]
action_probs = [0.25, 0.15, 0.5, 0.1]
to_play = 1
node.expand(state, to_play, action_probs)
node.children[0].visit_count = 0
node.children[1].visit_count = 0
node.children[2].visit_count = 1
node.children[3].visit_count = 0
score_0 = ucb_score(node, node.children[0])
score_1 = ucb_score(node, node.children[1])
score_2 = ucb_score(node, node.children[2])
score_3 = ucb_score(node, node.children[3])
# With no visits, UCB score is just the priors
self.assertEqual(score_0, node.children[0].prior)
self.assertEqual(score_1, node.children[1].prior)
# If we visit one child once, its score is halved
self.assertEqual(score_2, node.children[2].prior / 2)
self.assertEqual(score_3, node.children[3].prior)
action, child = node.select_child()
self.assertEqual(action, 0)
def test_ucb_score_one_child_visited_twice(self):
node = Node(0.5, to_play=1)
node.visit_count = 2
state = [0, 0, 0, 0]
action_probs = [0.25, 0.15, 0.5, 0.1]
to_play = 1
node.expand(state, to_play, action_probs)
node.children[0].visit_count = 0
node.children[1].visit_count = 0
node.children[2].visit_count = 2
node.children[3].visit_count = 0
score_0 = ucb_score(node, node.children[0])
score_1 = ucb_score(node, node.children[1])
score_2 = ucb_score(node, node.children[2])
score_3 = ucb_score(node, node.children[3])
action, child = node.select_child()
# Now that we've visited the second action twice, we should
# end up trying the first action
self.assertEqual(action, 0)
def test_ucb_score_no_children_visited(self):
node = Node(0.5, to_play=1)
node.visit_count = 1
state = [0, 0, 0, 0]
action_probs = [0.25, 0.15, 0.5, 0.1]
to_play = 1
node.expand(state, to_play, action_probs)
node.children[0].visit_count = 0
node.children[1].visit_count = 0
node.children[2].visit_count = 1
node.children[3].visit_count = 0
score_0 = ucb_score(node, node.children[0])
score_1 = ucb_score(node, node.children[1])
score_2 = ucb_score(node, node.children[2])
score_3 = ucb_score(node, node.children[3])
# With no visits, UCB score is just the priors
self.assertEqual(score_0, node.children[0].prior)
self.assertEqual(score_1, node.children[1].prior)
# If we visit one child once, its score is halved
self.assertEqual(score_2, node.children[2].prior / 2)
self.assertEqual(score_3, node.children[3].prior)
if __name__ == '__main__':
unittest.main()
| [
"game.Connect2Game",
"numpy.array",
"unittest.main",
"monte_carlo_tree_search.MCTS",
"monte_carlo_tree_search.Node",
"monte_carlo_tree_search.ucb_score"
] | [((8109, 8124), 'unittest.main', 'unittest.main', ([], {}), '()\n', (8122, 8124), False, 'import unittest\n'), ((429, 443), 'game.Connect2Game', 'Connect2Game', ([], {}), '()\n', (441, 443), False, 'from game import Connect2Game\n'), ((527, 550), 'monte_carlo_tree_search.MCTS', 'MCTS', (['game', 'model', 'args'], {}), '(game, model, args)\n', (531, 550), False, 'from monte_carlo_tree_search import Node, MCTS, ucb_score\n'), ((1241, 1255), 'game.Connect2Game', 'Connect2Game', ([], {}), '()\n', (1253, 1255), False, 'from game import Connect2Game\n'), ((1339, 1362), 'monte_carlo_tree_search.MCTS', 'MCTS', (['game', 'model', 'args'], {}), '(game, model, args)\n', (1343, 1362), False, 'from monte_carlo_tree_search import Node, MCTS, ucb_score\n'), ((1822, 1836), 'game.Connect2Game', 'Connect2Game', ([], {}), '()\n', (1834, 1836), False, 'from game import Connect2Game\n'), ((1922, 1945), 'monte_carlo_tree_search.MCTS', 'MCTS', (['game', 'model', 'args'], {}), '(game, model, args)\n', (1926, 1945), False, 'from monte_carlo_tree_search import Node, MCTS, ucb_score\n'), ((2458, 2472), 'game.Connect2Game', 'Connect2Game', ([], {}), '()\n', (2470, 2472), False, 'from game import Connect2Game\n'), ((2557, 2580), 'monte_carlo_tree_search.MCTS', 'MCTS', (['game', 'model', 'args'], {}), '(game, model, args)\n', (2561, 2580), False, 'from monte_carlo_tree_search import Node, MCTS, ucb_score\n'), ((2989, 3009), 'monte_carlo_tree_search.Node', 'Node', (['(0.5)'], {'to_play': '(1)'}), '(0.5, to_play=1)\n', (2993, 3009), False, 'from monte_carlo_tree_search import Node, MCTS, ucb_score\n'), ((3277, 3297), 'monte_carlo_tree_search.Node', 'Node', (['(0.5)'], {'to_play': '(1)'}), '(0.5, to_play=1)\n', (3281, 3297), False, 'from monte_carlo_tree_search import Node, MCTS, ucb_score\n'), ((3311, 3332), 'monte_carlo_tree_search.Node', 'Node', (['(0.5)'], {'to_play': '(-1)'}), '(0.5, to_play=-1)\n', (3315, 3332), False, 'from monte_carlo_tree_search import Node, MCTS, ucb_score\n'), ((3346, 3367), 'monte_carlo_tree_search.Node', 'Node', (['(0.5)'], {'to_play': '(-1)'}), '(0.5, to_play=-1)\n', (3350, 3367), False, 'from monte_carlo_tree_search import Node, MCTS, ucb_score\n'), ((3381, 3402), 'monte_carlo_tree_search.Node', 'Node', (['(0.5)'], {'to_play': '(-1)'}), '(0.5, to_play=-1)\n', (3385, 3402), False, 'from monte_carlo_tree_search import Node, MCTS, ucb_score\n'), ((3741, 3761), 'monte_carlo_tree_search.Node', 'Node', (['(0.5)'], {'to_play': '(1)'}), '(0.5, to_play=1)\n', (3745, 3761), False, 'from monte_carlo_tree_search import Node, MCTS, ucb_score\n'), ((4333, 4353), 'monte_carlo_tree_search.Node', 'Node', (['(0.5)'], {'to_play': '(1)'}), '(0.5, to_play=1)\n', (4337, 4353), False, 'from monte_carlo_tree_search import Node, MCTS, ucb_score\n'), ((4713, 4746), 'monte_carlo_tree_search.ucb_score', 'ucb_score', (['node', 'node.children[0]'], {}), '(node, node.children[0])\n', (4722, 4746), False, 'from monte_carlo_tree_search import Node, MCTS, ucb_score\n'), ((4765, 4798), 'monte_carlo_tree_search.ucb_score', 'ucb_score', (['node', 'node.children[1]'], {}), '(node, node.children[1])\n', (4774, 4798), False, 'from monte_carlo_tree_search import Node, MCTS, ucb_score\n'), ((4817, 4850), 'monte_carlo_tree_search.ucb_score', 'ucb_score', (['node', 'node.children[2]'], {}), '(node, node.children[2])\n', (4826, 4850), False, 'from monte_carlo_tree_search import Node, MCTS, ucb_score\n'), ((4869, 4902), 'monte_carlo_tree_search.ucb_score', 'ucb_score', (['node', 'node.children[3]'], {}), '(node, node.children[3])\n', (4878, 4902), False, 'from monte_carlo_tree_search import Node, MCTS, ucb_score\n'), ((5255, 5275), 'monte_carlo_tree_search.Node', 'Node', (['(0.5)'], {'to_play': '(1)'}), '(0.5, to_play=1)\n', (5259, 5275), False, 'from monte_carlo_tree_search import Node, MCTS, ucb_score\n'), ((5635, 5668), 'monte_carlo_tree_search.ucb_score', 'ucb_score', (['node', 'node.children[0]'], {}), '(node, node.children[0])\n', (5644, 5668), False, 'from monte_carlo_tree_search import Node, MCTS, ucb_score\n'), ((5687, 5720), 'monte_carlo_tree_search.ucb_score', 'ucb_score', (['node', 'node.children[1]'], {}), '(node, node.children[1])\n', (5696, 5720), False, 'from monte_carlo_tree_search import Node, MCTS, ucb_score\n'), ((5739, 5772), 'monte_carlo_tree_search.ucb_score', 'ucb_score', (['node', 'node.children[2]'], {}), '(node, node.children[2])\n', (5748, 5772), False, 'from monte_carlo_tree_search import Node, MCTS, ucb_score\n'), ((5791, 5824), 'monte_carlo_tree_search.ucb_score', 'ucb_score', (['node', 'node.children[3]'], {}), '(node, node.children[3])\n', (5800, 5824), False, 'from monte_carlo_tree_search import Node, MCTS, ucb_score\n'), ((6327, 6347), 'monte_carlo_tree_search.Node', 'Node', (['(0.5)'], {'to_play': '(1)'}), '(0.5, to_play=1)\n', (6331, 6347), False, 'from monte_carlo_tree_search import Node, MCTS, ucb_score\n'), ((6708, 6741), 'monte_carlo_tree_search.ucb_score', 'ucb_score', (['node', 'node.children[0]'], {}), '(node, node.children[0])\n', (6717, 6741), False, 'from monte_carlo_tree_search import Node, MCTS, ucb_score\n'), ((6760, 6793), 'monte_carlo_tree_search.ucb_score', 'ucb_score', (['node', 'node.children[1]'], {}), '(node, node.children[1])\n', (6769, 6793), False, 'from monte_carlo_tree_search import Node, MCTS, ucb_score\n'), ((6812, 6845), 'monte_carlo_tree_search.ucb_score', 'ucb_score', (['node', 'node.children[2]'], {}), '(node, node.children[2])\n', (6821, 6845), False, 'from monte_carlo_tree_search import Node, MCTS, ucb_score\n'), ((6864, 6897), 'monte_carlo_tree_search.ucb_score', 'ucb_score', (['node', 'node.children[3]'], {}), '(node, node.children[3])\n', (6873, 6897), False, 'from monte_carlo_tree_search import Node, MCTS, ucb_score\n'), ((7155, 7175), 'monte_carlo_tree_search.Node', 'Node', (['(0.5)'], {'to_play': '(1)'}), '(0.5, to_play=1)\n', (7159, 7175), False, 'from monte_carlo_tree_search import Node, MCTS, ucb_score\n'), ((7535, 7568), 'monte_carlo_tree_search.ucb_score', 'ucb_score', (['node', 'node.children[0]'], {}), '(node, node.children[0])\n', (7544, 7568), False, 'from monte_carlo_tree_search import Node, MCTS, ucb_score\n'), ((7587, 7620), 'monte_carlo_tree_search.ucb_score', 'ucb_score', (['node', 'node.children[1]'], {}), '(node, node.children[1])\n', (7596, 7620), False, 'from monte_carlo_tree_search import Node, MCTS, ucb_score\n'), ((7639, 7672), 'monte_carlo_tree_search.ucb_score', 'ucb_score', (['node', 'node.children[2]'], {}), '(node, node.children[2])\n', (7648, 7672), False, 'from monte_carlo_tree_search import Node, MCTS, ucb_score\n'), ((7691, 7724), 'monte_carlo_tree_search.ucb_score', 'ucb_score', (['node', 'node.children[3]'], {}), '(node, node.children[3])\n', (7700, 7724), False, 'from monte_carlo_tree_search import Node, MCTS, ucb_score\n'), ((370, 404), 'numpy.array', 'np.array', (['[0.26, 0.24, 0.24, 0.26]'], {}), '([0.26, 0.24, 0.24, 0.26])\n', (378, 404), True, 'import numpy as np\n'), ((1190, 1216), 'numpy.array', 'np.array', (['[0.3, 0.7, 0, 0]'], {}), '([0.3, 0.7, 0, 0])\n', (1198, 1216), True, 'import numpy as np\n'), ((1769, 1797), 'numpy.array', 'np.array', (['[0.51, 0.49, 0, 0]'], {}), '([0.51, 0.49, 0, 0])\n', (1777, 1797), True, 'import numpy as np\n'), ((2405, 2433), 'numpy.array', 'np.array', (['[0, 0.3, 0.3, 0.3]'], {}), '([0, 0.3, 0.3, 0.3])\n', (2413, 2433), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""First simple sklearn classifier"""
from __future__ import division # 1/2 == 0.5, as in Py3
from __future__ import absolute_import # avoid hiding global modules with locals
from __future__ import print_function # force use of print("hello")
from __future__ import unicode_literals # force unadorned strings "" to be unicode without prepending u""
import argparse
import os
import copy
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn import linear_model
from sklearn import naive_bayes
from sklearn import cross_validation
from matplotlib import pyplot as plt
from nltk.corpus import stopwords
import unicodecsv
import sql_convenience
############
# NOTE
# this is a basic LogisticRegression classifier, using 5-fold cross validation
# and a cross entropy error measure (which should nicely fit this binary
# decision classification problem).
# do not trust this code to do anything useful in the real world!
############
def reader(class_name):
class_reader = unicodecsv.reader(open(class_name), encoding='utf-8')
row0 = next(class_reader)
assert row0 == ["tweet_id", "tweet_text"]
lines = []
for tweet_id, tweet_text in class_reader:
txt = tweet_text.strip()
if len(txt) > 0:
lines.append(txt)
return lines
def label_learned_set(vectorizer, clfl, threshold, validation_table):
for row in sql_convenience.extract_classifications_and_tweets(validation_table):
cls, tweet_id, tweet_text = row
spd = vectorizer.transform([tweet_text]).todense()
predicted_cls = clfl.predict(spd)
predicted_class = predicted_cls[0] # turn 1D array of 1 item into 1 item
predicted_proba = clfl.predict_proba(spd)[0][predicted_class]
if predicted_proba < threshold and predicted_class == 1:
predicted_class = 0 # force to out-of-class if we don't trust our answer
sql_convenience.update_class(tweet_id, validation_table, predicted_class)
def check_classification(vectorizer, clfl):
spd0 = vectorizer.transform([u'really enjoying how the apple\'s iphone makes my ipad look small']).todense()
print("1?", clfl.predict(spd0), clfl.predict_proba(spd0)) # -> 1 which is set 1 (is brand)
spd1 = vectorizer.transform([u'i like my apple, eating it makes me happy']).todense()
print("0?", clfl.predict(spd1), clfl.predict_proba(spd1)) # -> 0 which is set 0 (not brand)
def annotate_tokens(indices_for_large_coefficients, clf, vectorizer, plt):
y = clf.coef_[0][indices_for_large_coefficients]
tokens = np.array(vectorizer.get_feature_names())[indices_for_large_coefficients]
for x, y, token in zip(indices_for_large_coefficients, y, tokens):
plt.text(x, y, token)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Simple sklearn implementation, example usage "learn1.py scikit_testtrain_apple --validation_table=learn1_validation_apple"')
parser.add_argument('table', help='Name of in and out of class data to read (e.g. scikit_validation_app)')
parser.add_argument('--validation_table', help='Table of validation data - get tweets and write predicted class labels back (e.g. learn1_validation_apple)')
parser.add_argument('--roc', default=False, action="store_true", help='Plot a Receiver Operating Characterics graph for the learning results')
parser.add_argument('--pr', default=False, action="store_true", help='Plot a Precision/Recall graph for the learning results')
parser.add_argument('--termmatrix', default=False, action="store_true", help='Draw a 2D matrix of tokens vs binary presence (or absence) using all training documents')
args = parser.parse_args()
data_dir = "data"
in_class_name = os.path.join(data_dir, args.table + '_in_class.csv')
out_class_name = os.path.join(data_dir, args.table + '_out_class.csv')
in_class_lines = reader(in_class_name)
out_class_lines = reader(out_class_name)
# put all items into the training set
train_set = out_class_lines + in_class_lines
target = np.array([0] * len(out_class_lines) + [1] * len(in_class_lines))
# choose a vectorizer to turn the tokens in tweets into a matrix of
# examples (we can plot this further below using --termmatrix)
stopWords = stopwords.words('english')
MIN_DF = 2
vectorizer_binary = CountVectorizer(stop_words=stopWords, min_df=MIN_DF, binary=True)
vectorizer_tfidf = TfidfVectorizer(stop_words=stopWords, min_df=MIN_DF)
#vectorizer = vectorizer_tfidf
vectorizer = vectorizer_binary
trainVectorizerArray = vectorizer.fit_transform(train_set).toarray()
print("Feature names (first 20):", vectorizer.get_feature_names()[:20], "...")
print("Vectorized %d features" % (len(vectorizer.get_feature_names())))
MAX_PLOTS = 3
f = plt.figure(1)
plt.clf()
f = plt.subplot(MAX_PLOTS, 1, 0)
for n in range(MAX_PLOTS):
if n == 0:
clf = naive_bayes.BernoulliNB()
title = "Bernoulli Naive Bayes"
if n == 1:
clf = linear_model.LogisticRegression()
title = "Logistic Regression l2 error"
if n == 2:
clf = linear_model.LogisticRegression(penalty='l1')
title = "Logistic Regression l1 error"
kf = cross_validation.KFold(n=len(target), n_folds=5, shuffle=True)
# using a score isn't so helpful here (I think) as I want to know the
# distance from the desired categories and a >0.5 threshold isn't
# necessaryily the right thing to measure (I care about precision when
# classifying, not recall, so the threshold matters)
#cross_val_scores = cross_validation.cross_val_score(clf, trainVectorizerArray, target, cv=kf, n_jobs=-1)
#print("Cross validation in/out of class test scores:" + str(cross_val_scores))
#print("Accuracy: %0.3f (+/- %0.3f)" % (cross_val_scores.mean(), cross_val_scores.std() / 2))
f = plt.subplot(MAX_PLOTS, 1, n + 1)
plt.title(title)
for i, (train_rows, test_rows) in enumerate(kf):
Y_train = target[train_rows]
X_train = trainVectorizerArray[train_rows]
X_test = trainVectorizerArray[test_rows]
probas_test_ = clf.fit(X_train, Y_train).predict_proba(X_test)
probas_train_ = clf.fit(X_train, Y_train).predict_proba(X_train)
# plot the Logistic Regression coefficients
if n == 1 or n == 2:
coef = clf.coef_[0]
if n == 0:
coef = clf.coef_
plt.plot(coef, 'b', alpha=0.3)
plt.ylabel("Coefficient value")
xmax = coef.shape[0]
plt.xlim(xmax=xmax)
plt.xlabel("Coefficient per term")
# plot the tokens with the largest coefficients
coef = copy.copy(clf.coef_[0])
coef.sort()
annotate_tokens(np.where(clf.coef_ >= coef[-10])[1], clf, vectorizer, plt)
annotate_tokens(np.where(clf.coef_ < coef[10])[1], clf, vectorizer, plt)
#f = plt.subplot(MAX_PLOTS, 1, 1)
#plt.title("{}: l2 penalty (top) vs l1 penalty (bottom) for {} cross fold models on {}".format(str(clf.__class__).split('.')[-1][:-2], len(kf), args.table))
plt.show()
| [
"matplotlib.pyplot.ylabel",
"sklearn.naive_bayes.BernoulliNB",
"copy.copy",
"sql_convenience.update_class",
"nltk.corpus.stopwords.words",
"argparse.ArgumentParser",
"sklearn.feature_extraction.text.CountVectorizer",
"numpy.where",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotli... | [((1507, 1575), 'sql_convenience.extract_classifications_and_tweets', 'sql_convenience.extract_classifications_and_tweets', (['validation_table'], {}), '(validation_table)\n', (1557, 1575), False, 'import sql_convenience\n'), ((2904, 3075), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Simple sklearn implementation, example usage "learn1.py scikit_testtrain_apple --validation_table=learn1_validation_apple\\""""'}), '(description=\n \'Simple sklearn implementation, example usage "learn1.py scikit_testtrain_apple --validation_table=learn1_validation_apple"\'\n )\n', (2927, 3075), False, 'import argparse\n'), ((3862, 3914), 'os.path.join', 'os.path.join', (['data_dir', "(args.table + '_in_class.csv')"], {}), "(data_dir, args.table + '_in_class.csv')\n", (3874, 3914), False, 'import os\n'), ((3936, 3989), 'os.path.join', 'os.path.join', (['data_dir', "(args.table + '_out_class.csv')"], {}), "(data_dir, args.table + '_out_class.csv')\n", (3948, 3989), False, 'import os\n'), ((4405, 4431), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (4420, 4431), False, 'from nltk.corpus import stopwords\n'), ((4471, 4536), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'stop_words': 'stopWords', 'min_df': 'MIN_DF', 'binary': '(True)'}), '(stop_words=stopWords, min_df=MIN_DF, binary=True)\n', (4486, 4536), False, 'from sklearn.feature_extraction.text import CountVectorizer\n'), ((4560, 4612), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'stop_words': 'stopWords', 'min_df': 'MIN_DF'}), '(stop_words=stopWords, min_df=MIN_DF)\n', (4575, 4612), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((4943, 4956), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (4953, 4956), True, 'from matplotlib import pyplot as plt\n'), ((4961, 4970), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4968, 4970), True, 'from matplotlib import pyplot as plt\n'), ((4979, 5007), 'matplotlib.pyplot.subplot', 'plt.subplot', (['MAX_PLOTS', '(1)', '(0)'], {}), '(MAX_PLOTS, 1, 0)\n', (4990, 5007), True, 'from matplotlib import pyplot as plt\n'), ((6838, 6872), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Coefficient per term"""'], {}), "('Coefficient per term')\n", (6848, 6872), True, 'from matplotlib import pyplot as plt\n'), ((6936, 6959), 'copy.copy', 'copy.copy', (['clf.coef_[0]'], {}), '(clf.coef_[0])\n', (6945, 6959), False, 'import copy\n'), ((7336, 7346), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7344, 7346), True, 'from matplotlib import pyplot as plt\n'), ((2029, 2102), 'sql_convenience.update_class', 'sql_convenience.update_class', (['tweet_id', 'validation_table', 'predicted_class'], {}), '(tweet_id, validation_table, predicted_class)\n', (2057, 2102), False, 'import sql_convenience\n'), ((2840, 2861), 'matplotlib.pyplot.text', 'plt.text', (['x', 'y', 'token'], {}), '(x, y, token)\n', (2848, 2861), True, 'from matplotlib import pyplot as plt\n'), ((6089, 6121), 'matplotlib.pyplot.subplot', 'plt.subplot', (['MAX_PLOTS', '(1)', '(n + 1)'], {}), '(MAX_PLOTS, 1, n + 1)\n', (6100, 6121), True, 'from matplotlib import pyplot as plt\n'), ((6130, 6146), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (6139, 6146), True, 'from matplotlib import pyplot as plt\n'), ((6813, 6832), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {'xmax': 'xmax'}), '(xmax=xmax)\n', (6821, 6832), True, 'from matplotlib import pyplot as plt\n'), ((5077, 5102), 'sklearn.naive_bayes.BernoulliNB', 'naive_bayes.BernoulliNB', ([], {}), '()\n', (5100, 5102), False, 'from sklearn import naive_bayes\n'), ((5184, 5217), 'sklearn.linear_model.LogisticRegression', 'linear_model.LogisticRegression', ([], {}), '()\n', (5215, 5217), False, 'from sklearn import linear_model\n'), ((5306, 5351), 'sklearn.linear_model.LogisticRegression', 'linear_model.LogisticRegression', ([], {'penalty': '"""l1"""'}), "(penalty='l1')\n", (5337, 5351), False, 'from sklearn import linear_model\n'), ((6701, 6731), 'matplotlib.pyplot.plot', 'plt.plot', (['coef', '"""b"""'], {'alpha': '(0.3)'}), "(coef, 'b', alpha=0.3)\n", (6709, 6731), True, 'from matplotlib import pyplot as plt\n'), ((6744, 6775), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Coefficient value"""'], {}), "('Coefficient value')\n", (6754, 6775), True, 'from matplotlib import pyplot as plt\n'), ((6996, 7028), 'numpy.where', 'np.where', (['(clf.coef_ >= coef[-10])'], {}), '(clf.coef_ >= coef[-10])\n', (7004, 7028), True, 'import numpy as np\n'), ((7075, 7105), 'numpy.where', 'np.where', (['(clf.coef_ < coef[10])'], {}), '(clf.coef_ < coef[10])\n', (7083, 7105), True, 'import numpy as np\n')] |
import os
import time
import cv2
import matplotlib.pyplot as plt
import numpy as np
import png
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
from colormap.colors import Color, hex2rgb
from sklearn.metrics import average_precision_score as ap_score
from torch.utils.data import DataLoader
from torchvision import datasets, models, transforms
from tqdm import tqdm
from dataset import FacadeDataset
N_CLASS=5
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.n_class = N_CLASS
self.layers = nn.Sequential(
#########################################
### TODO: Add more layers ###
#########################################
nn.Conv2d(3, self.n_class, 1, padding=0),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.layers(x)
return x
def save_label(label, path):
'''
Function for ploting labels.
'''
colormap = [
'#000000',
'#0080FF',
'#80FF80',
'#FF8000',
'#FF0000',
]
assert(np.max(label)<len(colormap))
colors = [hex2rgb(color, normalise=False) for color in colormap]
w = png.Writer(label.shape[1], label.shape[0], palette=colors, bitdepth=4)
with open(path, 'wb') as f:
w.write(f, label)
def train(trainloader, net, criterion, optimizer, device, epoch):
'''
Function for training.
'''
start = time.time()
running_loss = 0.0
net = net.train()
for images, labels in tqdm(trainloader):
images = images.to(device)
labels = labels.to(device)
optimizer.zero_grad()
output = net(images)
loss = criterion(output, labels)
loss.backward()
optimizer.step()
running_loss = loss.item()
end = time.time()
print('[epoch %d] loss: %.3f elapsed time %.3f' %
(epoch, running_loss, end-start))
def test(testloader, net, criterion, device):
'''
Function for testing.
'''
losses = 0.
cnt = 0
with torch.no_grad():
net = net.eval()
for images, labels in tqdm(testloader):
images = images.to(device)
labels = labels.to(device)
output = net(images)
loss = criterion(output, labels)
losses += loss.item()
cnt += 1
print(losses / cnt)
return (losses/cnt)
def cal_AP(testloader, net, criterion, device):
'''
Calculate Average Precision
'''
losses = 0.
cnt = 0
with torch.no_grad():
net = net.eval()
preds = [[] for _ in range(5)]
heatmaps = [[] for _ in range(5)]
for images, labels in tqdm(testloader):
images = images.to(device)
labels = labels.to(device)
output = net(images).cpu().numpy()
for c in range(5):
preds[c].append(output[:, c].reshape(-1))
heatmaps[c].append(labels[:, c].cpu().numpy().reshape(-1))
aps = []
for c in range(5):
preds[c] = np.concatenate(preds[c])
heatmaps[c] = np.concatenate(heatmaps[c])
if heatmaps[c].max() == 0:
ap = float('nan')
else:
ap = ap_score(heatmaps[c], preds[c])
aps.append(ap)
print("AP = {}".format(ap))
# print(losses / cnt)
return None
def get_result(testloader, net, device, folder='output_train'):
result = []
cnt = 1
with torch.no_grad():
net = net.eval()
cnt = 0
for images, labels in tqdm(testloader):
images = images.to(device)
labels = labels.to(device)
output = net(images)[0].cpu().numpy()
c, h, w = output.shape
assert(c == N_CLASS)
y = np.zeros((h,w)).astype('uint8')
for i in range(N_CLASS):
mask = output[i]>0.5
y[mask] = i
gt = labels.cpu().data.numpy().squeeze(0).astype('uint8')
save_label(y, './{}/y{}.png'.format(folder, cnt))
save_label(gt, './{}/gt{}.png'.format(folder, cnt))
plt.imsave(
'./{}/x{}.png'.format(folder, cnt),
((images[0].cpu().data.numpy()+1)*128).astype(np.uint8).transpose(1,2,0))
cnt += 1
def main():
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# TODO change data_range to include all train/evaluation/test data.
# TODO adjust batch_size.
train_data = FacadeDataset(flag='train', data_range=(0,20), onehot=False)
train_loader = DataLoader(train_data, batch_size=1)
test_data = FacadeDataset(flag='test_dev', data_range=(0,114), onehot=False)
test_loader = DataLoader(test_data, batch_size=1)
ap_data = FacadeDataset(flag='test_dev', data_range=(0,114), onehot=True)
ap_loader = DataLoader(ap_data, batch_size=1)
name = 'starter_net'
net = Net().to(device)
criterion = nn.CrossEntropyLoss() #TODO decide loss
optimizer = torch.optim.Adam(net.parameters(), 1e-3, weight_decay=1e-5)
print('\nStart training')
for epoch in range(10): #TODO decide epochs
print('-----------------Epoch = %d-----------------' % (epoch+1))
train(train_loader, net, criterion, optimizer, device, epoch+1)
# TODO create your evaluation set, load the evaluation set and test on evaluation set
evaluation_loader = train_loader
test(evaluation_loader, net, criterion, device)
print('\nFinished Training, Testing on test set')
test(test_loader, net, criterion, device)
print('\nGenerating Unlabeled Result')
result = get_result(test_loader, net, device, folder='output_test')
torch.save(net.state_dict(), './models/model_{}.pth'.format(name))
cal_AP(ap_loader, net, criterion, device)
if __name__ == "__main__":
main()
| [
"torch.nn.ReLU",
"torch.nn.CrossEntropyLoss",
"sklearn.metrics.average_precision_score",
"tqdm.tqdm",
"dataset.FacadeDataset",
"numpy.max",
"torch.nn.Conv2d",
"numpy.zeros",
"torch.cuda.is_available",
"colormap.colors.hex2rgb",
"numpy.concatenate",
"torch.utils.data.DataLoader",
"torch.no_gr... | [((1243, 1313), 'png.Writer', 'png.Writer', (['label.shape[1]', 'label.shape[0]'], {'palette': 'colors', 'bitdepth': '(4)'}), '(label.shape[1], label.shape[0], palette=colors, bitdepth=4)\n', (1253, 1313), False, 'import png\n'), ((1494, 1505), 'time.time', 'time.time', ([], {}), '()\n', (1503, 1505), False, 'import time\n'), ((1577, 1594), 'tqdm.tqdm', 'tqdm', (['trainloader'], {}), '(trainloader)\n', (1581, 1594), False, 'from tqdm import tqdm\n'), ((1860, 1871), 'time.time', 'time.time', ([], {}), '()\n', (1869, 1871), False, 'import time\n'), ((4592, 4653), 'dataset.FacadeDataset', 'FacadeDataset', ([], {'flag': '"""train"""', 'data_range': '(0, 20)', 'onehot': '(False)'}), "(flag='train', data_range=(0, 20), onehot=False)\n", (4605, 4653), False, 'from dataset import FacadeDataset\n'), ((4672, 4708), 'torch.utils.data.DataLoader', 'DataLoader', (['train_data'], {'batch_size': '(1)'}), '(train_data, batch_size=1)\n', (4682, 4708), False, 'from torch.utils.data import DataLoader\n'), ((4725, 4790), 'dataset.FacadeDataset', 'FacadeDataset', ([], {'flag': '"""test_dev"""', 'data_range': '(0, 114)', 'onehot': '(False)'}), "(flag='test_dev', data_range=(0, 114), onehot=False)\n", (4738, 4790), False, 'from dataset import FacadeDataset\n'), ((4808, 4843), 'torch.utils.data.DataLoader', 'DataLoader', (['test_data'], {'batch_size': '(1)'}), '(test_data, batch_size=1)\n', (4818, 4843), False, 'from torch.utils.data import DataLoader\n'), ((4858, 4922), 'dataset.FacadeDataset', 'FacadeDataset', ([], {'flag': '"""test_dev"""', 'data_range': '(0, 114)', 'onehot': '(True)'}), "(flag='test_dev', data_range=(0, 114), onehot=True)\n", (4871, 4922), False, 'from dataset import FacadeDataset\n'), ((4938, 4971), 'torch.utils.data.DataLoader', 'DataLoader', (['ap_data'], {'batch_size': '(1)'}), '(ap_data, batch_size=1)\n', (4948, 4971), False, 'from torch.utils.data import DataLoader\n'), ((5041, 5062), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (5060, 5062), True, 'import torch.nn as nn\n'), ((1137, 1150), 'numpy.max', 'np.max', (['label'], {}), '(label)\n', (1143, 1150), True, 'import numpy as np\n'), ((1180, 1211), 'colormap.colors.hex2rgb', 'hex2rgb', (['color'], {'normalise': '(False)'}), '(color, normalise=False)\n', (1187, 1211), False, 'from colormap.colors import Color, hex2rgb\n'), ((2096, 2111), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2109, 2111), False, 'import torch\n'), ((2168, 2184), 'tqdm.tqdm', 'tqdm', (['testloader'], {}), '(testloader)\n', (2172, 2184), False, 'from tqdm import tqdm\n'), ((2580, 2595), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2593, 2595), False, 'import torch\n'), ((2733, 2749), 'tqdm.tqdm', 'tqdm', (['testloader'], {}), '(testloader)\n', (2737, 2749), False, 'from tqdm import tqdm\n'), ((3548, 3563), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3561, 3563), False, 'import torch\n'), ((3636, 3652), 'tqdm.tqdm', 'tqdm', (['testloader'], {}), '(testloader)\n', (3640, 3652), False, 'from tqdm import tqdm\n'), ((771, 811), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', 'self.n_class', '(1)'], {'padding': '(0)'}), '(3, self.n_class, 1, padding=0)\n', (780, 811), True, 'import torch.nn as nn\n'), ((825, 846), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (832, 846), True, 'import torch.nn as nn\n'), ((3108, 3132), 'numpy.concatenate', 'np.concatenate', (['preds[c]'], {}), '(preds[c])\n', (3122, 3132), True, 'import numpy as np\n'), ((3159, 3186), 'numpy.concatenate', 'np.concatenate', (['heatmaps[c]'], {}), '(heatmaps[c])\n', (3173, 3186), True, 'import numpy as np\n'), ((4435, 4460), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4458, 4460), False, 'import torch\n'), ((3299, 3330), 'sklearn.metrics.average_precision_score', 'ap_score', (['heatmaps[c]', 'preds[c]'], {}), '(heatmaps[c], preds[c])\n', (3307, 3330), True, 'from sklearn.metrics import average_precision_score as ap_score\n'), ((3866, 3882), 'numpy.zeros', 'np.zeros', (['(h, w)'], {}), '((h, w))\n', (3874, 3882), True, 'import numpy as np\n')] |
import importlib.util
import logging
import os
import re
import signal
import sys
class FrameworkError(Exception):
pass
def load_module(name, path):
spec = importlib.util.spec_from_file_location(name, path)
module = importlib.util.module_from_spec(spec)
sys.modules[name] = module
spec.loader.exec_module(module)
return module
amlb_path = os.environ.get("AMLB_PATH")
if amlb_path:
utils = load_module("amlb.utils", os.path.join(amlb_path, "utils", "__init__.py"))
else:
import amlb.utils as utils
def setup_logger():
console = logging.StreamHandler(sys.stdout)
console.setLevel(logging.INFO)
handlers = [console]
logging.basicConfig(handlers=handlers)
root = logging.getLogger()
root.setLevel(logging.INFO)
setup_logger()
log = logging.getLogger(__name__)
def result(output_file=None,
predictions=None, truth=None,
probabilities=None, probabilities_labels=None,
target_is_encoded=False,
error_message=None,
models_count=None,
training_duration=None,
predict_duration=None,
**others):
return locals()
def output_subdir(name, config):
subdir = os.path.join(config.output_dir, name, config.name, str(config.fold))
utils.touch(subdir, as_dir=True)
return subdir
def save_metadata(config, **kwargs):
obj = dict(config.__dict__)
obj.update(kwargs)
utils.json_dump(obj, config.output_metadata_file, style='pretty')
data_keys = re.compile("^(X|y|data)(_.+)?$")
def call_run(run_fn):
import numpy as np
params = utils.Namespace.from_dict(utils.json_loads(sys.stdin.read()))
def load_data(name, path, **ignored):
if isinstance(path, str) and data_keys.match(name):
return name, np.load(path, allow_pickle=True)
return name, path
log.info("Params passed to subprocess:\n%s", params)
ds = utils.Namespace.walk(params.dataset, load_data)
config = params.config
config.framework_params = utils.Namespace.dict(config.framework_params)
try:
with utils.InterruptTimeout(config.job_timeout_seconds,
interruptions=[
dict(sig=TimeoutError),
dict(sig=signal.SIGTERM),
dict(sig=signal.SIGQUIT),
dict(sig=signal.SIGKILL),
dict(interrupt='process', sig=signal.SIGKILL)
],
wait_retry_secs=10):
result = run_fn(ds, config)
res = dict(result)
for name in ['predictions', 'truth', 'probabilities']:
arr = result[name]
if arr is not None:
res[name] = os.path.join(config.result_dir, '.'.join([name, 'npy']))
np.save(res[name], arr, allow_pickle=True)
except BaseException as e:
log.exception(e)
res = dict(
error_message=str(e),
models_count=0
)
finally:
# ensure there's no subprocess left
utils.kill_proc_tree(include_parent=False, timeout=5)
utils.json_dump(res, config.result_file, style='compact')
| [
"logging.getLogger",
"logging.basicConfig",
"logging.StreamHandler",
"amlb.utils.Namespace.walk",
"re.compile",
"amlb.utils.touch",
"os.environ.get",
"amlb.utils.Namespace.dict",
"os.path.join",
"numpy.save",
"amlb.utils.json_dump",
"sys.stdin.read",
"numpy.load",
"amlb.utils.kill_proc_tre... | [((369, 396), 'os.environ.get', 'os.environ.get', (['"""AMLB_PATH"""'], {}), "('AMLB_PATH')\n", (383, 396), False, 'import os\n'), ((795, 822), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (812, 822), False, 'import logging\n'), ((1511, 1543), 're.compile', 're.compile', (['"""^(X|y|data)(_.+)?$"""'], {}), "('^(X|y|data)(_.+)?$')\n", (1521, 1543), False, 'import re\n'), ((571, 604), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (592, 604), False, 'import logging\n'), ((669, 707), 'logging.basicConfig', 'logging.basicConfig', ([], {'handlers': 'handlers'}), '(handlers=handlers)\n', (688, 707), False, 'import logging\n'), ((719, 738), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (736, 738), False, 'import logging\n'), ((1282, 1314), 'amlb.utils.touch', 'utils.touch', (['subdir'], {'as_dir': '(True)'}), '(subdir, as_dir=True)\n', (1293, 1314), True, 'import amlb.utils as utils\n'), ((1431, 1496), 'amlb.utils.json_dump', 'utils.json_dump', (['obj', 'config.output_metadata_file'], {'style': '"""pretty"""'}), "(obj, config.output_metadata_file, style='pretty')\n", (1446, 1496), True, 'import amlb.utils as utils\n'), ((1921, 1968), 'amlb.utils.Namespace.walk', 'utils.Namespace.walk', (['params.dataset', 'load_data'], {}), '(params.dataset, load_data)\n', (1941, 1968), True, 'import amlb.utils as utils\n'), ((2027, 2072), 'amlb.utils.Namespace.dict', 'utils.Namespace.dict', (['config.framework_params'], {}), '(config.framework_params)\n', (2047, 2072), True, 'import amlb.utils as utils\n'), ((3275, 3332), 'amlb.utils.json_dump', 'utils.json_dump', (['res', 'config.result_file'], {'style': '"""compact"""'}), "(res, config.result_file, style='compact')\n", (3290, 3332), True, 'import amlb.utils as utils\n'), ((449, 496), 'os.path.join', 'os.path.join', (['amlb_path', '"""utils"""', '"""__init__.py"""'], {}), "(amlb_path, 'utils', '__init__.py')\n", (461, 496), False, 'import os\n'), ((3216, 3269), 'amlb.utils.kill_proc_tree', 'utils.kill_proc_tree', ([], {'include_parent': '(False)', 'timeout': '(5)'}), '(include_parent=False, timeout=5)\n', (3236, 3269), True, 'import amlb.utils as utils\n'), ((1648, 1664), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (1662, 1664), False, 'import sys\n'), ((1795, 1827), 'numpy.load', 'np.load', (['path'], {'allow_pickle': '(True)'}), '(path, allow_pickle=True)\n', (1802, 1827), True, 'import numpy as np\n'), ((2961, 3003), 'numpy.save', 'np.save', (['res[name]', 'arr'], {'allow_pickle': '(True)'}), '(res[name], arr, allow_pickle=True)\n', (2968, 3003), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import torch
import numpy as np
import pandas as pd
def to_one_hot(y, n_class=2):
oh = np.zeros((y.shape[0], n_class), np.float32)
oh[np.arange(y.shape[0]), y] = 1
return oh
def to_torch(arr, cuda=True):
"""
Transform given numpy array to a torch.autograd.Variable
"""
tensor = arr if torch.is_tensor(arr) else torch.from_numpy(arr)
if cuda:
tensor = tensor.cuda()
return tensor
def to_numpy(X):
if isinstance(X, pd.core.generic.NDFrame):
X = X.values
return X
def classwise_balance_weight(sample_weight, y):
"""Balance the weights between positive (1) and negative (0) class."""
w = sample_weight.copy()
categories = np.unique(y)
n_samples = y.shape[0]
n_categories = len(categories)
for c in categories:
mask = (y == c)
w_sum = np.sum(w[mask])
w[mask] = w[mask] / w_sum
w = w * n_samples / n_categories
return w
| [
"numpy.unique",
"torch.from_numpy",
"numpy.sum",
"torch.is_tensor",
"numpy.zeros",
"numpy.arange"
] | [((241, 284), 'numpy.zeros', 'np.zeros', (['(y.shape[0], n_class)', 'np.float32'], {}), '((y.shape[0], n_class), np.float32)\n', (249, 284), True, 'import numpy as np\n'), ((848, 860), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (857, 860), True, 'import numpy as np\n'), ((465, 485), 'torch.is_tensor', 'torch.is_tensor', (['arr'], {}), '(arr)\n', (480, 485), False, 'import torch\n'), ((491, 512), 'torch.from_numpy', 'torch.from_numpy', (['arr'], {}), '(arr)\n', (507, 512), False, 'import torch\n'), ((991, 1006), 'numpy.sum', 'np.sum', (['w[mask]'], {}), '(w[mask])\n', (997, 1006), True, 'import numpy as np\n'), ((292, 313), 'numpy.arange', 'np.arange', (['y.shape[0]'], {}), '(y.shape[0])\n', (301, 313), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
####################################################
# 作者: 刘朝阳
# 时间: 2020.05.01
# 更新时间: 2021.11.25
# 功能: 在计算PERCLOS时, 需要知道驾驶在正常情况下的眼睛开度, 来作为基准计算
# 使用说明: 自动调用, 无需操作
####################################################
import os
import numpy as np
import cv2
import dlib
from imutils import face_utils
from head_posture_estimation import head_posture_estimation
from aspect_ratio_estimation import aspect_ratio_estimation
HPE = head_posture_estimation()
ARE = aspect_ratio_estimation()
# 使用dlib.get_frontal_face_detector() 获得脸部位置检测器
detector = dlib.get_frontal_face_detector()
# 使用dlib.shape_predictor获得脸部特征位置检测器
predictor = dlib.shape_predictor('shape_predictor_68_face_landMARks.dat')
# 分别获取左右眼面部标志的索引
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
(mStart, mEnd) = face_utils.FACIAL_LANDMARKS_IDXS["mouth"]
EAR = everybody_EAR_mean =[]
EAR_all_per_person = []
EAR_all_per_person_open = []
pitch_all_per_person = []
pitch_mean_per_person = []
everybody_pitch_mean = []
everybody_EAR_min = []
def get_everybody_EARandMAR_standard(face_path):
# 遍历每个人的所有图片,提取出眼睛的平均高度
for subdir in os.listdir(face_path): # os.listdir()输出该目录下的所有文件名字 到了lzy文件夹的面前(未进去)
EAR_all_per_person_open = EAR_all_per_person = []
subpath = os.path.join(face_path, subdir) # 连接路径,定位到子文件夹路径 到了lzy文件夹的面前(未进去)
if os.path.isdir(subpath): # 如果子文件夹路径存在
for filename in os.listdir(subpath): # os.listdir(subpath)输出该目录下的所有文件名字 lzy进入,到了1、2、3.png了,然后对每一张进行处理
EAR_mean_per_person = EAR_min_per_person = []
imgpath = os.path.join(subpath, filename) # 连接路径,定位到子文件夹路径
img = cv2.imread(imgpath, cv2.IMREAD_COLOR) # 读1.png
grayimg = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
faces = detector(grayimg, 0)
for k, d in enumerate(faces): # 找出每张图片上的人脸 #一个图片上人脸数就1,所以看作没有这句就行
shape = predictor(grayimg, d)
shape_array = face_utils.shape_to_np(shape)
leftEye = shape_array[lStart:lEnd]
rightEye = shape_array[rStart:rEnd]
reprojectdst, euler_angle, pitch, roll, yaw = HPE.get_head_pose(shape_array) # 重新投影,欧拉角
pitch_all_per_person.append(pitch)
leftEAR = ARE.eye_aspect_ratio(leftEye)
rightEAR = ARE.eye_aspect_ratio(rightEye)
EAR = (leftEAR + rightEAR) / 2.0
EAR_all_per_person.append(EAR)
# for完全进行完毕后,把文件下的所有眼睛高度存入了
if EAR > 0.13 and EAR < 0.23: # 防止闭眼时为0而拉低整体睁眼值 阈值由经验尝试得出
EAR_all_per_person_open.append(EAR) # 把每张图片的高度值放在一起,形成该人所有图片的高度值集合
pitch_mean_per_person = np.mean(pitch_all_per_person)
EAR_mean_per_person = np.mean(EAR_all_per_person_open) # 算lzy眼睛高度的平均值
EAR_min_per_person = np.min(EAR_all_per_person)
everybody_pitch_mean.append(pitch_mean_per_person)
everybody_EAR_mean.append(EAR_mean_per_person) # 把每个人眼睛的平均值记录
everybody_EAR_min.append(EAR_min_per_person)
return everybody_EAR_mean, everybody_EAR_min, everybody_pitch_mean
| [
"numpy.mean",
"os.listdir",
"head_posture_estimation.head_posture_estimation",
"dlib.shape_predictor",
"os.path.join",
"dlib.get_frontal_face_detector",
"os.path.isdir",
"cv2.cvtColor",
"numpy.min",
"imutils.face_utils.shape_to_np",
"aspect_ratio_estimation.aspect_ratio_estimation",
"cv2.imrea... | [((479, 504), 'head_posture_estimation.head_posture_estimation', 'head_posture_estimation', ([], {}), '()\n', (502, 504), False, 'from head_posture_estimation import head_posture_estimation\n'), ((512, 537), 'aspect_ratio_estimation.aspect_ratio_estimation', 'aspect_ratio_estimation', ([], {}), '()\n', (535, 537), False, 'from aspect_ratio_estimation import aspect_ratio_estimation\n'), ((600, 632), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (630, 632), False, 'import dlib\n'), ((683, 744), 'dlib.shape_predictor', 'dlib.shape_predictor', (['"""shape_predictor_68_face_landMARks.dat"""'], {}), "('shape_predictor_68_face_landMARks.dat')\n", (703, 744), False, 'import dlib\n'), ((1245, 1266), 'os.listdir', 'os.listdir', (['face_path'], {}), '(face_path)\n', (1255, 1266), False, 'import os\n'), ((1418, 1449), 'os.path.join', 'os.path.join', (['face_path', 'subdir'], {}), '(face_path, subdir)\n', (1430, 1449), False, 'import os\n'), ((1514, 1536), 'os.path.isdir', 'os.path.isdir', (['subpath'], {}), '(subpath)\n', (1527, 1536), False, 'import os\n'), ((1609, 1628), 'os.listdir', 'os.listdir', (['subpath'], {}), '(subpath)\n', (1619, 1628), False, 'import os\n'), ((3049, 3078), 'numpy.mean', 'np.mean', (['pitch_all_per_person'], {}), '(pitch_all_per_person)\n', (3056, 3078), True, 'import numpy as np\n'), ((3114, 3146), 'numpy.mean', 'np.mean', (['EAR_all_per_person_open'], {}), '(EAR_all_per_person_open)\n', (3121, 3146), True, 'import numpy as np\n'), ((3197, 3223), 'numpy.min', 'np.min', (['EAR_all_per_person'], {}), '(EAR_all_per_person)\n', (3203, 3223), True, 'import numpy as np\n'), ((1800, 1831), 'os.path.join', 'os.path.join', (['subpath', 'filename'], {}), '(subpath, filename)\n', (1812, 1831), False, 'import os\n'), ((1878, 1915), 'cv2.imread', 'cv2.imread', (['imgpath', 'cv2.IMREAD_COLOR'], {}), '(imgpath, cv2.IMREAD_COLOR)\n', (1888, 1915), False, 'import cv2\n'), ((1956, 1993), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2GRAY'], {}), '(img, cv2.COLOR_RGB2GRAY)\n', (1968, 1993), False, 'import cv2\n'), ((2227, 2256), 'imutils.face_utils.shape_to_np', 'face_utils.shape_to_np', (['shape'], {}), '(shape)\n', (2249, 2256), False, 'from imutils import face_utils\n')] |
"""
Creates a theano based gradient descent optimiser for finding good choices of
weights to combine model predictions.
"""
import theano as th
import theano.tensor as tt
import numpy as np
def compile_model_combination_weight_optimiser(lr_adjuster = lambda h, t: h):
model_weights = tt.vector('w') # indexed over K models
model_preds = tt.tensor3('P') # indexed over N examples, M classes, K models
true_labels = tt.matrix('Y') # indexed over N examples, M classes
learning_rate = tt.scalar('h')
n_steps = tt.iscalar('n_steps')
# use softmax form to ensure weights all >=0 and sum to one
comb_preds = (tt.sum(tt.exp(model_weights) * model_preds, axis=2) /
tt.sum(tt.exp(model_weights), axis=0))
# mean negative log loss cost function
cost = - tt.mean(tt.sum(tt.log(comb_preds) * true_labels, axis=1))
# gradient of log loss cost with respect to weights
dC_dW = lambda W: th.clone(th.gradient.jacobian(cost, model_weights),
{model_weights: W})
# scan through gradient descent updates of weights, applying learning rate
# adjuster at each step
[Ws, hs], updates = th.scan(
fn = lambda t, W, h: [W - h * dC_dW(W), lr_adjuster(h, t)],
outputs_info = [model_weights, learning_rate],
sequences = [th.tensor.arange(n_steps)],
n_steps = n_steps,
name = 'weight cost gradient descent')
# create a function to get last updated weight from scan sequence
weights_optimiser = th.function(
inputs = [model_weights, model_preds, true_labels, learning_rate,
n_steps],
outputs = Ws[-1],
updates = updates,
allow_input_downcast = True,
)
# also compile a function for evaluating cost function to check optimiser
# performance / convergence
cost_func = th.function([model_weights, model_preds, true_labels], cost)
return weights_optimiser, cost_func
def compile_per_class_model_combination_weight_optimiser(lr_adjuster = lambda h, t: h):
model_weights = tt.matrix('w') # indexed over M classes, K models
model_preds = tt.tensor3('P') # indexed over N examples, M classes, K models
true_labels = tt.matrix('Y') # indexed over N examples, M classes
learning_rate = tt.scalar('h')
n_steps = tt.iscalar('n_steps')
# use softmax form to ensure weights all >=0 and sum to one
comb_preds = (tt.sum(tt.exp(model_weights) * model_preds, axis=2) /
tt.sum(tt.exp(model_weights), axis=1))
# mean negative log loss cost function
cost = - tt.mean(tt.sum(tt.log(comb_preds) * true_labels, axis=1))
# gradient of log loss cost with respect to weights
dC_dW = lambda W: th.clone(th.gradient.jacobian(cost, model_weights),
{model_weights: W})
# scan through gradient descent updates of weights, applying learning rate
# adjuster at each step
[Ws, hs], updates = th.scan(
fn = lambda t, W, h: [W - h * dC_dW(W), lr_adjuster(h, t)],
outputs_info = [model_weights, learning_rate],
sequences = [th.tensor.arange(n_steps)],
n_steps = n_steps,
name = 'weight cost gradient descent')
# create a function to get last updated weight from scan sequence
weights_optimiser = th.function(
inputs = [model_weights, model_preds, true_labels, learning_rate,
n_steps],
outputs = Ws[-1],
updates = updates,
allow_input_downcast = True,
)
# also compile a function for evaluating cost function to check optimiser
# performance / convergence
cost_func = th.function([model_weights, model_preds, true_labels], cost)
return weights_optimiser, cost_func
if __name__ == '__main__':
"""
Test with randomly generated model predictions and labels.
"""
N_MODELS = 3
N_CLASSES = 10
N_DATA = 100
SEED = 1234
INIT_LEARNING_RATE = 0.1
LR_ADJUSTER = lambda h, t: h
N_STEP = 1000
prng = np.random.RandomState(SEED)
weights = np.zeros((N_CLASSES, N_MODELS))
model_pred_vals = prng.rand(N_DATA, N_CLASSES, N_MODELS)
model_pred_vals = model_pred_vals / model_pred_vals.sum(1)[:,None,:]
true_label_vals = prng.rand(N_DATA, N_CLASSES)
true_label_vals = true_label_vals == true_label_vals.max(axis=1)[:,None]
optimiser, cost = compile_per_class_model_combination_weight_optimiser(LR_ADJUSTER)
print('Initial weights {0}'.format(weights))
print('Initial cost value {0}'.format(
cost(weights, model_pred_vals, true_label_vals)))
updated_weights = optimiser(weights, model_pred_vals, true_label_vals,
INIT_LEARNING_RATE, N_STEP)
print('Final weights {0}'.format(updated_weights))
print('Final cost value {0}'.format(
cost(updated_weights, model_pred_vals, true_label_vals)))
| [
"theano.tensor.exp",
"theano.tensor.iscalar",
"theano.function",
"theano.gradient.jacobian",
"theano.tensor.matrix",
"theano.tensor.tensor3",
"theano.tensor.vector",
"theano.tensor.arange",
"numpy.zeros",
"theano.tensor.scalar",
"theano.tensor.log",
"numpy.random.RandomState"
] | [((290, 304), 'theano.tensor.vector', 'tt.vector', (['"""w"""'], {}), "('w')\n", (299, 304), True, 'import theano.tensor as tt\n'), ((347, 362), 'theano.tensor.tensor3', 'tt.tensor3', (['"""P"""'], {}), "('P')\n", (357, 362), True, 'import theano.tensor as tt\n'), ((428, 442), 'theano.tensor.matrix', 'tt.matrix', (['"""Y"""'], {}), "('Y')\n", (437, 442), True, 'import theano.tensor as tt\n'), ((500, 514), 'theano.tensor.scalar', 'tt.scalar', (['"""h"""'], {}), "('h')\n", (509, 514), True, 'import theano.tensor as tt\n'), ((529, 550), 'theano.tensor.iscalar', 'tt.iscalar', (['"""n_steps"""'], {}), "('n_steps')\n", (539, 550), True, 'import theano.tensor as tt\n'), ((1526, 1675), 'theano.function', 'th.function', ([], {'inputs': '[model_weights, model_preds, true_labels, learning_rate, n_steps]', 'outputs': 'Ws[-1]', 'updates': 'updates', 'allow_input_downcast': '(True)'}), '(inputs=[model_weights, model_preds, true_labels, learning_rate,\n n_steps], outputs=Ws[-1], updates=updates, allow_input_downcast=True)\n', (1537, 1675), True, 'import theano as th\n'), ((1864, 1924), 'theano.function', 'th.function', (['[model_weights, model_preds, true_labels]', 'cost'], {}), '([model_weights, model_preds, true_labels], cost)\n', (1875, 1924), True, 'import theano as th\n'), ((2074, 2088), 'theano.tensor.matrix', 'tt.matrix', (['"""w"""'], {}), "('w')\n", (2083, 2088), True, 'import theano.tensor as tt\n'), ((2142, 2157), 'theano.tensor.tensor3', 'tt.tensor3', (['"""P"""'], {}), "('P')\n", (2152, 2157), True, 'import theano.tensor as tt\n'), ((2223, 2237), 'theano.tensor.matrix', 'tt.matrix', (['"""Y"""'], {}), "('Y')\n", (2232, 2237), True, 'import theano.tensor as tt\n'), ((2295, 2309), 'theano.tensor.scalar', 'tt.scalar', (['"""h"""'], {}), "('h')\n", (2304, 2309), True, 'import theano.tensor as tt\n'), ((2324, 2345), 'theano.tensor.iscalar', 'tt.iscalar', (['"""n_steps"""'], {}), "('n_steps')\n", (2334, 2345), True, 'import theano.tensor as tt\n'), ((3321, 3470), 'theano.function', 'th.function', ([], {'inputs': '[model_weights, model_preds, true_labels, learning_rate, n_steps]', 'outputs': 'Ws[-1]', 'updates': 'updates', 'allow_input_downcast': '(True)'}), '(inputs=[model_weights, model_preds, true_labels, learning_rate,\n n_steps], outputs=Ws[-1], updates=updates, allow_input_downcast=True)\n', (3332, 3470), True, 'import theano as th\n'), ((3659, 3719), 'theano.function', 'th.function', (['[model_weights, model_preds, true_labels]', 'cost'], {}), '([model_weights, model_preds, true_labels], cost)\n', (3670, 3719), True, 'import theano as th\n'), ((4035, 4062), 'numpy.random.RandomState', 'np.random.RandomState', (['SEED'], {}), '(SEED)\n', (4056, 4062), True, 'import numpy as np\n'), ((4077, 4108), 'numpy.zeros', 'np.zeros', (['(N_CLASSES, N_MODELS)'], {}), '((N_CLASSES, N_MODELS))\n', (4085, 4108), True, 'import numpy as np\n'), ((713, 734), 'theano.tensor.exp', 'tt.exp', (['model_weights'], {}), '(model_weights)\n', (719, 734), True, 'import theano.tensor as tt\n'), ((946, 987), 'theano.gradient.jacobian', 'th.gradient.jacobian', (['cost', 'model_weights'], {}), '(cost, model_weights)\n', (966, 987), True, 'import theano as th\n'), ((2508, 2529), 'theano.tensor.exp', 'tt.exp', (['model_weights'], {}), '(model_weights)\n', (2514, 2529), True, 'import theano.tensor as tt\n'), ((2741, 2782), 'theano.gradient.jacobian', 'th.gradient.jacobian', (['cost', 'model_weights'], {}), '(cost, model_weights)\n', (2761, 2782), True, 'import theano as th\n'), ((640, 661), 'theano.tensor.exp', 'tt.exp', (['model_weights'], {}), '(model_weights)\n', (646, 661), True, 'import theano.tensor as tt\n'), ((1328, 1353), 'theano.tensor.arange', 'th.tensor.arange', (['n_steps'], {}), '(n_steps)\n', (1344, 1353), True, 'import theano as th\n'), ((2435, 2456), 'theano.tensor.exp', 'tt.exp', (['model_weights'], {}), '(model_weights)\n', (2441, 2456), True, 'import theano.tensor as tt\n'), ((3123, 3148), 'theano.tensor.arange', 'th.tensor.arange', (['n_steps'], {}), '(n_steps)\n', (3139, 3148), True, 'import theano as th\n'), ((816, 834), 'theano.tensor.log', 'tt.log', (['comb_preds'], {}), '(comb_preds)\n', (822, 834), True, 'import theano.tensor as tt\n'), ((2611, 2629), 'theano.tensor.log', 'tt.log', (['comb_preds'], {}), '(comb_preds)\n', (2617, 2629), True, 'import theano.tensor as tt\n')] |
"""
Some basic inference functions adapted from my inferno module which should be available
here soon:
https://github.com/nealegibson/inferno
Really they are just rewritten versions of https://github.com/nealegibson/Infer
But there are many other options for optimisers/MCMCs/etc, and they should (in principle)
all do much the same job!
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import fmin
from tqdm.auto import tqdm
import time
def fopt(f,x0,var=None,args=[],min=False,**kwargs):
"""
Optimisation function using scipy's fmin.
This uses a simple wrapper to allow maximising as well as minimising functions, as well
as allowing for a fixed set of parameters. inferno.opt or inferno.optimise has more
options.
f - function to be optimised, which can be called as f(x0,*args)
x0 - array of starting points
var - array with the same length as x0 indicating variable parameters. Parameters are
variable if >0, so boolean True/False, integers, or even error arrays will work
args - additional arguments passed to fmin (see scipy.optimize.fmin)
min - if True minimises rather than maximises the function f wrt x0
kwargs - additional keyword args are passed to fmin
"""
if var is None: var = np.ones(x0.size)
var_ind = var>0
x = np.copy(x0)
#define wrapper function to re-construct the full parameter array from subset
def wrapper(p,*args):
x[var_ind] = p # get full parameter array
if min: return f(x,*args) #call the function and return
else: return -f(x,*args) #or the negative of the func to maximise the function instead
#call scipy's fmin on the wrapper function
x[var_ind] = fmin(wrapper,x0[var_ind],args=args,**kwargs)
return x
def apply_uniform_logPrior(logL,bounds):
"""
Simple funciton decorator that takes in a log Likelihood function, and returns a log Posterior
that is bounded according to bounds.
logL - input function of form logL(p,*args,**kwargs)
bounds - list of (lower/upper) bounds for each parameter
e.g. for a parameter vector of length 5, could use:
bounds = [(0,1),(-np.inf,np.inf),None,(-10,10),(2,9)]
then can define logP from logL as follows:
logP = apply_uniform_prior(logL,bounds)
logP is then a callable function just like logL
logP(p,*args,...)
"""
#convert bounds into lower and upper limits of len(p)
lower_lims = np.array([i[0] if i is not None else -np.inf for i in bounds])
upper_lims = np.array([i[1] if i is not None else np.inf for i in bounds])
#define the posterior distribution
def logP(p,*args,**kwargs):
#apply priors - return -np.inf if outside prior range
if np.any(p<lower_lims): return -np.inf
if np.any(p>upper_lims): return -np.inf
return logL(p,*args,**kwargs) #else return the logL
return logP #return the function
def uniform_logPrior(bounds):
"""
same as above, but returns the logPrior directly
"""
lower = np.array([t[0] if type(t) is tuple else -np.inf for t in bounds])
upper = np.array([t[1] if type(t) is tuple else np.inf for t in bounds])
def log_prior(x,*args,**kwargs):
"""
logPrior with simple bounds
"""
if np.any(x<lower): return -np.inf
elif np.any(x>upper): return -np.inf
else: return 0.
return log_prior
def miniMCMC(f,x,x_err,burnin,chain,N=32,args=[],a=2,dlogP=50):
"""
Simple MCMC function. This is missing a lot of functionality and checks of more
extensive codes, see inferno.mcmc for a fully-featured mcmc implementation with multiple
flavours of MCMC.
This version runs a simple implementation of an Affine Invariant MCMC (this is a
misnomer as most non-trivial Metropolis Hastings steps can be affine invariant!).
However, this MCMC flavour does require minimal tuning, and therefore is good for
testing. See Goodman & Weare (2010) for a description of the algorithm, and
Foreman-Mackey et al. (2012) for another clear description of the algorithm (and of the
widely used 'emcee' implementation).
The basic idea is that at each step, we loop through each chain in turn, and pick
another random chain, and create a proposal based on the positions of the current and
random other chain. This implementation splits the set of chains in two, and picks a
random chain from the other set. This allows the proposals to be pre-computed, and also
for the logPosteriors to be computed in parallel. See Foreman-Mackey et al. (2012) for
an explanation.
The initial set of chains are simply drawn from a diagonalised Gaussian distribution.
Chains are replaced at random if more than dlogP from maximum likelihood to ensure the
starting points are ok. If more than half the points are outside this range code will
raise an exception. This is usually because the starting distribution is too large for
one of the parameters, and/or the starting point is far from the max. This can usually
be fixed by calling again with smaller x_err. Note that full mcmc implementation
inferno.mcmc has many more features for refining starting points and running different
flavours of MCMC.
The total number of samples will be N * (burnin * chain)
inputs
------
f - logPosterior function, which can be called as f(x,*args), and returns the value
of the logPosterior for the parameter vector x
x - array of starting points (means) to populate initial chains
x_err - corresponding uncertainties (stdevs)
burnin - length of burnin period, where current state is not recorded
chain - length of chain, where each state of the chain is recorded
N - number of chains/walkers, must be even greater than 16
args - additional arguments to the logPosterior f - i.e. f(x,*args)
a - parameter used to control the acceptance ratio. This can be varied based on the
acceptance, but is fixed here. See inferno.mcmc(mode="AffInv")
dlogP - starting points for chains are rejected if more than dlogP from the maximum
likelihood computed from the initial draw. This will include points in restricted
prior space (ie with f(x)=-np.inf). If more than a quarter are outside this
range, will raise an exception
returns a dictionary with the following parameters
--------------------------------------------------
'p' - means of the parameters
'e' - standard deviation of the parameters
'chains' - array of chains of shape: (chains x N x len(x))
'logP' - corresponding values of logP at each point in the chain, shape: (chains x N)
'Xinit' - initial points in the chain, useful for debugging, shape: (N x len(x))
'Xinit_logP' - corresponding values of logP, shape: (N)
"""
#check a few inputs are ok
assert N%2==0 and N>16, "N must be even and greater than 16"
#define simple mapping function, written in this way to allow easy parallelisation with multiprocessing
def f_args(x): return f(x,*args) #create simple wrapper function that doesn't require args
def map_func(X): return np.array(list(map(f_args,X))) #create mapping function using default map
#get starting points for the chains and compute logP for each
X = np.random.multivariate_normal(x,np.diag(x_err**2),N) #use gaussian distribution
XlogP = map_func(X) #compute logP
Xinit,Xinit_logP=np.copy(X),np.copy(XlogP)
#define arrays for chains
chains = np.empty((chain,N,x.size)) # accepted chains
logP = np.empty((chain,N)) # accepted posterior values
n_steps = burnin+chain #and total number of steps
acc = np.full((n_steps,N),False) # acceptance array, start with all False
#re-draw starting points for outliers
cull_index = XlogP.max() - XlogP > dlogP
if np.sum(~cull_index) < np.sum(x_err>0.)*2: #raise exception if number of good points is too low
raise ValueError("too many points ({}/{}) with ndim {} are outside acceptable range, use smaller x_err".format(np.sum(cull_index),len(cull_index),np.sum(x_err>0.)))
if np.any(cull_index):
print("redrawing {}/{} points".format(np.sum(cull_index),N))
ind_good = np.where(~cull_index)[0]
good_points_ind = np.random.choice(ind_good,cull_index.sum())
X[cull_index],XlogP[cull_index] = X[good_points_ind],XlogP[good_points_ind]
#predefine random arrays, for acceptance, step sizes, and random other chain
RandNoArr = np.random.rand(n_steps,N) #for acceptance step
#then z and z^D-1 used in proposal and acceptance
z = (np.random.rand(n_steps,N) * (np.sqrt(4.*a)-np.sqrt(4./a)) + np.sqrt(4./a))**2 / 4.
z_Dm1 = z**(np.sum(x_err>0.)-1)
#pick random other chain to use for each step
rand_chain = np.random.randint(0,N//2,(n_steps,N)) #first pick a random value from 0 to N//2 for each chain
rand_chain[:,:N//2]+=N//2 #then add on N//2 for the 1st set
slices = [slice(0,N//2),slice(N//2,None)]
start_time = time.time() #get start time
#compute MCMC chain
for i in tqdm(range(n_steps),position=0,desc='running mcmc chain'):
for sl in slices: #loop over each half of chains in turn
#get proposal steps and compute logP
X_prop = X[rand_chain[i,sl]] + z[i][sl,np.newaxis] * (X[sl] - X[rand_chain[i,sl]])
XlogP_prop = map_func(X_prop) #compute logP for proposal steps
#accept or reject proposal steps
accepted = RandNoArr[i,sl] < z_Dm1[i,sl] * np.exp(XlogP_prop - XlogP[sl])
X[sl][accepted] = X_prop[accepted]
XlogP[sl][accepted] = XlogP_prop[accepted]
#store results in chain/acceptance arrays
if i >= burnin:
acc[i-burnin,sl] = accepted
chains[i-burnin,sl] = X[sl]
logP[i-burnin,sl] = XlogP[sl]
ts = time.time() - start_time
print('Total time: {:.0f}m {:.2f}s'.format(ts // 60., ts % 60.))
print("Final acceptance = {}%".format(acc.sum()/acc.size*100))
return dict(p=chains.mean(axis=(0,1)),e=chains.std(axis=(0,1)),chains=chains,logP=logP,Xinit=Xinit,Xinit_logP=Xinit_logP)
def miniSamplePlot(X,N=None,labels=None,samples=300,x=None,left=0.07,bottom=0.07,right=0.93,top=0.93,wspace=0.03,hspace=0.03):
"""
Create a simple plot of MCMC chains
X - chains (chain_length x N_chains x N_pars)
N - reshape into N pseudo chains
labels - labels for each parameter
samples - no of samples to plot from each chain
"""
assert X.ndim==3
if N is not None and N>1:
X = X.reshape(-1,N,X.shape[-1])
else: N = X.shape[1]
#define labels if None
if labels is None: labels = [r'$\theta_{{{}}}$'.format(i) for i in range(X.shape[-1])]
#create filter for any fixed parameters
filt = ~np.isclose(np.std(X,axis=(0,1)),0)
S = X[...,filt]
labels = np.array(labels)[filt]
if x is not None: x = x[filt]
#first get the axes
plt.figure()
ax = {}
D = filt.sum() #number of variable parameters
for i in range(D): #loop over the parameter indexes supplied
for q in range(i+1):
ax['{}{}'.format(i,q)] = plt.subplot(D,D,i*D+q+1,xticks=[],yticks=[])
if i == (D-1): ax['{}{}'.format(i,q)].set_xlabel(labels[q])
ax['{}{}'.format(i,0)].set_ylabel(labels[i])
#do histograms
for n in range(N): #loop over chains
for i in range(D): #loop over parameters
ax['{}{}'.format(i,i)].hist(S[:,n,i],20,histtype='step',density=1)
if n==0 and x is not None: ax['{}{}'.format(i,i)].axvline(x[i],color='0.5',lw=1,ls='--')
#do scatter plots
for n in range(N): # loop over chains
ind = np.random.randint(0,X.shape[0],samples)
#loop over the axes (except-diagonals) and make scatter plot
for i in range(D): #loop over the parameter indexes supplied
for q in range(i):
ax['{}{}'.format(i,q)].plot(S[:,n,q][ind],S[:,n,i][ind],'o',ms=3,alpha=0.3)
if n==0 and x is not None:
ax['{}{}'.format(i,q)].axvline(x[q],color='0.5',lw=1,ls='--')
ax['{}{}'.format(i,q)].axhline(x[i],color='0.5',lw=1,ls='--')
if n==N-1:
ax['{}{}'.format(i,q)].set_xlim(ax['{}{}'.format(q,q)].set_xlim())
plt.subplots_adjust(left=left,bottom=bottom,right=right,top=top,wspace=wspace,hspace=hspace)
| [
"numpy.sqrt",
"numpy.random.rand",
"numpy.array",
"scipy.optimize.fmin",
"numpy.where",
"numpy.exp",
"numpy.empty",
"numpy.ones",
"numpy.any",
"numpy.std",
"time.time",
"matplotlib.pyplot.subplots_adjust",
"numpy.copy",
"numpy.diag",
"numpy.sum",
"numpy.random.randint",
"matplotlib.p... | [((1305, 1316), 'numpy.copy', 'np.copy', (['x0'], {}), '(x0)\n', (1312, 1316), True, 'import numpy as np\n'), ((1684, 1731), 'scipy.optimize.fmin', 'fmin', (['wrapper', 'x0[var_ind]'], {'args': 'args'}), '(wrapper, x0[var_ind], args=args, **kwargs)\n', (1688, 1731), False, 'from scipy.optimize import fmin\n'), ((2403, 2467), 'numpy.array', 'np.array', (['[(i[0] if i is not None else -np.inf) for i in bounds]'], {}), '([(i[0] if i is not None else -np.inf) for i in bounds])\n', (2411, 2467), True, 'import numpy as np\n'), ((2481, 2544), 'numpy.array', 'np.array', (['[(i[1] if i is not None else np.inf) for i in bounds]'], {}), '([(i[1] if i is not None else np.inf) for i in bounds])\n', (2489, 2544), True, 'import numpy as np\n'), ((7344, 7372), 'numpy.empty', 'np.empty', (['(chain, N, x.size)'], {}), '((chain, N, x.size))\n', (7352, 7372), True, 'import numpy as np\n'), ((7398, 7418), 'numpy.empty', 'np.empty', (['(chain, N)'], {}), '((chain, N))\n', (7406, 7418), True, 'import numpy as np\n'), ((7506, 7534), 'numpy.full', 'np.full', (['(n_steps, N)', '(False)'], {}), '((n_steps, N), False)\n', (7513, 7534), True, 'import numpy as np\n'), ((7934, 7952), 'numpy.any', 'np.any', (['cull_index'], {}), '(cull_index)\n', (7940, 7952), True, 'import numpy as np\n'), ((8301, 8327), 'numpy.random.rand', 'np.random.rand', (['n_steps', 'N'], {}), '(n_steps, N)\n', (8315, 8327), True, 'import numpy as np\n'), ((8587, 8629), 'numpy.random.randint', 'np.random.randint', (['(0)', '(N // 2)', '(n_steps, N)'], {}), '(0, N // 2, (n_steps, N))\n', (8604, 8629), True, 'import numpy as np\n'), ((8806, 8817), 'time.time', 'time.time', ([], {}), '()\n', (8815, 8817), False, 'import time\n'), ((10669, 10681), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10679, 10681), True, 'import matplotlib.pyplot as plt\n'), ((11936, 12038), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': 'left', 'bottom': 'bottom', 'right': 'right', 'top': 'top', 'wspace': 'wspace', 'hspace': 'hspace'}), '(left=left, bottom=bottom, right=right, top=top, wspace=\n wspace, hspace=hspace)\n', (11955, 12038), True, 'import matplotlib.pyplot as plt\n'), ((1264, 1280), 'numpy.ones', 'np.ones', (['x0.size'], {}), '(x0.size)\n', (1271, 1280), True, 'import numpy as np\n'), ((2678, 2700), 'numpy.any', 'np.any', (['(p < lower_lims)'], {}), '(p < lower_lims)\n', (2684, 2700), True, 'import numpy as np\n'), ((2722, 2744), 'numpy.any', 'np.any', (['(p > upper_lims)'], {}), '(p > upper_lims)\n', (2728, 2744), True, 'import numpy as np\n'), ((3203, 3220), 'numpy.any', 'np.any', (['(x < lower)'], {}), '(x < lower)\n', (3209, 3220), True, 'import numpy as np\n'), ((7173, 7192), 'numpy.diag', 'np.diag', (['(x_err ** 2)'], {}), '(x_err ** 2)\n', (7180, 7192), True, 'import numpy as np\n'), ((7276, 7286), 'numpy.copy', 'np.copy', (['X'], {}), '(X)\n', (7283, 7286), True, 'import numpy as np\n'), ((7287, 7301), 'numpy.copy', 'np.copy', (['XlogP'], {}), '(XlogP)\n', (7294, 7301), True, 'import numpy as np\n'), ((7665, 7684), 'numpy.sum', 'np.sum', (['(~cull_index)'], {}), '(~cull_index)\n', (7671, 7684), True, 'import numpy as np\n'), ((9588, 9599), 'time.time', 'time.time', ([], {}), '()\n', (9597, 9599), False, 'import time\n'), ((10587, 10603), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (10595, 10603), True, 'import numpy as np\n'), ((11366, 11407), 'numpy.random.randint', 'np.random.randint', (['(0)', 'X.shape[0]', 'samples'], {}), '(0, X.shape[0], samples)\n', (11383, 11407), True, 'import numpy as np\n'), ((3244, 3261), 'numpy.any', 'np.any', (['(x > upper)'], {}), '(x > upper)\n', (3250, 3261), True, 'import numpy as np\n'), ((7687, 7706), 'numpy.sum', 'np.sum', (['(x_err > 0.0)'], {}), '(x_err > 0.0)\n', (7693, 7706), True, 'import numpy as np\n'), ((8034, 8055), 'numpy.where', 'np.where', (['(~cull_index)'], {}), '(~cull_index)\n', (8042, 8055), True, 'import numpy as np\n'), ((8504, 8523), 'numpy.sum', 'np.sum', (['(x_err > 0.0)'], {}), '(x_err > 0.0)\n', (8510, 8523), True, 'import numpy as np\n'), ((10534, 10556), 'numpy.std', 'np.std', (['X'], {'axis': '(0, 1)'}), '(X, axis=(0, 1))\n', (10540, 10556), True, 'import numpy as np\n'), ((10862, 10916), 'matplotlib.pyplot.subplot', 'plt.subplot', (['D', 'D', '(i * D + q + 1)'], {'xticks': '[]', 'yticks': '[]'}), '(D, D, i * D + q + 1, xticks=[], yticks=[])\n', (10873, 10916), True, 'import matplotlib.pyplot as plt\n'), ((7875, 7893), 'numpy.sum', 'np.sum', (['cull_index'], {}), '(cull_index)\n', (7881, 7893), True, 'import numpy as np\n'), ((7910, 7929), 'numpy.sum', 'np.sum', (['(x_err > 0.0)'], {}), '(x_err > 0.0)\n', (7916, 7929), True, 'import numpy as np\n'), ((7996, 8014), 'numpy.sum', 'np.sum', (['cull_index'], {}), '(cull_index)\n', (8002, 8014), True, 'import numpy as np\n'), ((8467, 8483), 'numpy.sqrt', 'np.sqrt', (['(4.0 / a)'], {}), '(4.0 / a)\n', (8474, 8483), True, 'import numpy as np\n'), ((8407, 8433), 'numpy.random.rand', 'np.random.rand', (['n_steps', 'N'], {}), '(n_steps, N)\n', (8421, 8433), True, 'import numpy as np\n'), ((9279, 9309), 'numpy.exp', 'np.exp', (['(XlogP_prop - XlogP[sl])'], {}), '(XlogP_prop - XlogP[sl])\n', (9285, 9309), True, 'import numpy as np\n'), ((8436, 8452), 'numpy.sqrt', 'np.sqrt', (['(4.0 * a)'], {}), '(4.0 * a)\n', (8443, 8452), True, 'import numpy as np\n'), ((8450, 8466), 'numpy.sqrt', 'np.sqrt', (['(4.0 / a)'], {}), '(4.0 / a)\n', (8457, 8466), True, 'import numpy as np\n')] |
from pyomo.opt import SolverFactory, SolverStatus, TerminationCondition
import pyomo.environ as en
import os
import numpy as np
import logging
logger = logging.getLogger(__name__)
####################################################################
# Define some useful container objects to define the optimisation objectives
class OptimiserObjective(object):
ConnectionPointCost = 1
ConnectionPointEnergy = 2
ThroughputCost = 3
Throughput = 4
GreedyGenerationCharging = 5
GreedyDemandDischarging = 6
EqualStorageActions = 7
ConnectionPointPeakPower = 8
ConnectionPointQuantisedPeak = 9
PiecewiseLinear = 10
LocalModelsCost = 11
LocalGridMinimiser = 12
LocalThirdParty = 13
LocalGridPeakPower = 14
class OptimiserObjectiveSet(object):
FinancialOptimisation = [OptimiserObjective.ConnectionPointCost,
#OptimiserObjective.GreedyGenerationCharging,
OptimiserObjective.ThroughputCost,
OptimiserObjective.EqualStorageActions]
EnergyOptimisation = [OptimiserObjective.ConnectionPointEnergy,
OptimiserObjective.GreedyGenerationCharging,
OptimiserObjective.GreedyDemandDischarging,
OptimiserObjective.Throughput,
OptimiserObjective.EqualStorageActions]
PeakOptimisation = [OptimiserObjective.ConnectionPointPeakPower]
QuantisedPeakOptimisation = [OptimiserObjective.ConnectionPointQuantisedPeak]
DispatchOptimisation = [OptimiserObjective.PiecewiseLinear] + FinancialOptimisation
LocalModels = [OptimiserObjective.LocalModelsCost,
OptimiserObjective.ThroughputCost,
OptimiserObjective.EqualStorageActions]
LocalModelsThirdParty = [OptimiserObjective.LocalThirdParty,
OptimiserObjective.ThroughputCost,
OptimiserObjective.EqualStorageActions]
LocalPeakOptimisation = [OptimiserObjective.LocalGridPeakPower]
# Define some useful constants
minutes_per_hour = 60.0
####################################################################
class EnergyOptimiser(object):
def __init__(self, interval_duration, number_of_intervals, energy_system, objective, optimiser_engine=None,
optimiser_engine_executable=None, use_bool_variables=True):
"""
Sets up the energy optimiser using the supplied solver. The configuration for the optimiser can be read from
environmental variables (OPTIMISER_ENGINE and OPTIMISER_ENGINE_EXECUTABLE) if not supplied as arguments
Args:
objective(list): List of Optimiser Objectives
optimiser_engine(str): name of engine to be used
optimiser_engine_executable(str): path to engine if required (e.G. cplex)
use_bool_variables(bool): converts bools to int if false
"""
# Configure the optimiser through setting appropriate environmental variables.
if optimiser_engine is None:
self.optimiser_engine = os.environ.get('OPTIMISER_ENGINE') # ipopt doesn't work with int/bool variables
else:
self.optimiser_engine = optimiser_engine
if optimiser_engine_executable is None:
if self.optimiser_engine == "cplex":
self.optimiser_engine_executable = os.environ.get('OPTIMISER_ENGINE_EXECUTABLE')
else:
self.optimiser_engine_executable = None
else:
self.optimiser_engine_executable = optimiser_engine_executable
self.use_bool_vars = use_bool_variables
self.interval_duration = interval_duration # The duration (in minutes) of each of the intervals being optimised over
self.number_of_intervals = number_of_intervals
self.energy_system = energy_system
# These values have been arbitrarily chosen
# A better understanding of the sensitivity of these values may be advantageous
self.bigM = 5000000
self.smallM = 0.0001
self.objectives = objective
self.build_model()
self.apply_constraints()
self.build_objective()
def build_model(self):
# Set up the Pyomo model
self.model = en.ConcreteModel()
# We use RangeSet to create a index for each of the time
# periods that we will optimise within.
self.model.Time = en.RangeSet(0, self.number_of_intervals - 1)
# Configure the initial demand and generation
system_demand = self.energy_system.demand.demand
system_generation = self.energy_system.generation.generation
# and convert the data into the right format for the optimiser objects
self.system_demand_dct = dict(enumerate(system_demand))
self.system_generation_dct = dict(enumerate(system_generation))
#### Initialise the optimisation variables (all indexed by self.model.Time) ####
# The state of charge of the battery
self.model.storage_state_of_charge = en.Var(self.model.Time,
bounds=(0, self.energy_system.energy_storage.capacity),
initialize=0)
# The increase in energy storage state of charge at each time step
self.model.storage_charge_total = en.Var(self.model.Time, initialize=0)
# The decrease in energy storage state of charge at each time step
self.model.storage_discharge_total = en.Var(self.model.Time, initialize=0)
# Increase in battery SoC from the Grid
self.model.storage_charge_grid = en.Var(self.model.Time,
bounds=(0, self.energy_system.energy_storage.charging_power_limit *
(self.interval_duration / minutes_per_hour)),
initialize=0)
# Increase in battery SoC from PV Generation
self.model.storage_charge_generation = en.Var(self.model.Time,
bounds=(0, self.energy_system.energy_storage.charging_power_limit *
(self.interval_duration / minutes_per_hour)),
initialize=0)
# Satisfying local demand from the battery
self.model.storage_discharge_demand = en.Var(self.model.Time,
bounds=(self.energy_system.energy_storage.discharging_power_limit *
(self.interval_duration / minutes_per_hour), 0),
initialize=0)
# Exporting to the grid from the battery
self.model.storage_discharge_grid = en.Var(self.model.Time,
bounds=(self.energy_system.energy_storage.discharging_power_limit *
(self.interval_duration / minutes_per_hour), 0),
initialize=0)
#### Boolean variables (again indexed by Time) ####
# These may not be necessary so provide a binary flag for turning them off
if self.use_bool_vars:
# Is the battery charging in a given time interval
self.model.is_charging = en.Var(self.model.Time, within=en.Boolean)
# Is the battery discharging in a given time interval
self.model.is_discharging = en.Var(self.model.Time, within=en.Boolean, initialize=0)
self.model.local_demand_satisfied = en.Var(self.model.Time, within=en.Boolean, initialize=0)
self.model.local_generation_satisfied = en.Var(self.model.Time, within=en.Boolean, initialize=0)
self.model.is_importing = en.Var(self.model.Time, within=en.Boolean)
# Is the battery discharging in a given time interval
self.model.is_local_exporting = en.Var(self.model.Time, within=en.Boolean, initialize=0)
#### Battery Parameters ####
# The battery charging efficiency
self.model.eta_chg = en.Param(initialize=self.energy_system.energy_storage.charging_efficiency)
# The battery discharging efficiency
self.model.eta_dischg = en.Param(initialize=self.energy_system.energy_storage.discharging_efficiency)
# The battery charge power limit
self.model.charging_limit = en.Param(
initialize=self.energy_system.energy_storage.charging_power_limit * (self.interval_duration / minutes_per_hour))
# The battery discharge power limit
self.model.discharging_limit = en.Param(
initialize=self.energy_system.energy_storage.discharging_power_limit * (self.interval_duration / minutes_per_hour))
# The throughput cost for the energy storage
self.model.throughput_cost = en.Param(initialize=self.energy_system.energy_storage.throughput_cost)
#### Bias Values ####
# A small fudge factor for reducing the size of the solution set and
# achieving a unique optimisation solution
self.model.scale_func = en.Param(initialize=self.smallM)
# A bigM value for integer optimisation
self.model.bigM = en.Param(initialize=self.bigM)
#### Initial Demand / Generation Profile Parameters ####
# The local energy consumption
self.model.system_demand = en.Param(self.model.Time, initialize=self.system_demand_dct)
# The local energy generation
self.model.system_generation = en.Param(self.model.Time, initialize=self.system_generation_dct)
def apply_constraints(self):
# Calculate the increased state of charge of the energy storage from the
# imported energy and locally generated energy. We ensure that the
# storage charging efficiency is taken into account.
def storage_charge_behaviour(model, time_interval):
return model.storage_charge_grid[time_interval] + model.storage_charge_generation[time_interval] \
== model.storage_charge_total[time_interval] / model.eta_chg
# Calculate the decreased state of charge of the energy storage from the
# exported energy and locally consumed energy. We ensure that the
# storage discharging efficiency is taken into account.
def storage_discharge_behaviour(model, time_interval):
return model.storage_discharge_demand[time_interval] + model.storage_discharge_grid[time_interval] \
== model.storage_discharge_total[time_interval] * model.eta_dischg
# Enforce the charging rate limit
def storage_charge_rate_limit(model, time_interval):
return (model.storage_charge_grid[time_interval] + model.storage_charge_generation[
time_interval]) <= model.charging_limit
# Enforce the discharge rate limit
def storage_discharge_rate_limit(model, time_interval):
return (model.storage_discharge_demand[time_interval] + model.storage_discharge_grid[
time_interval]) >= model.discharging_limit
# Add the constraints to the optimisation model
self.model.storage_charge_behaviour_constraint = en.Constraint(self.model.Time, rule=storage_charge_behaviour)
self.model.storage_discharge_behaviour_constraint = en.Constraint(self.model.Time, rule=storage_discharge_behaviour)
self.model.storage_charge_rate_limit_constraint = en.Constraint(self.model.Time, rule=storage_charge_rate_limit)
self.model.storage_discharge_rate_limit_constraint = en.Constraint(self.model.Time, rule=storage_discharge_rate_limit)
# Calculate the state of charge of the battery in each time interval
initial_state_of_charge = self.energy_system.energy_storage.initial_state_of_charge
def SOC_rule(model, time_interval):
if time_interval == 0:
return model.storage_state_of_charge[time_interval] \
== initial_state_of_charge + model.storage_charge_total[time_interval] + \
model.storage_discharge_total[
time_interval]
else:
return model.storage_state_of_charge[time_interval] \
== model.storage_state_of_charge[time_interval - 1] + model.storage_charge_total[time_interval] + \
model.storage_discharge_total[time_interval]
self.model.Batt_SOC = en.Constraint(self.model.Time, rule=SOC_rule)
# Use bigM formulation to ensure that the battery is only charging or discharging in each time interval
if self.use_bool_vars:
# If the battery is charging then the charge energy is bounded from below by -bigM
# If the battery is discharging the charge energy is bounded from below by zero
def bool_cd_rule_one(model, time_interval):
return model.storage_charge_total[time_interval] >= -self.model.bigM * model.is_charging[time_interval]
# If the battery is charging then the charge energy is bounded from above by bigM
# If the battery is discharging the charge energy is bounded from above by zero
def bool_cd_rule_two(model, time_interval):
return model.storage_charge_total[time_interval] <= self.model.bigM * (1 - model.is_discharging[time_interval])
# If the battery is charging then the discharge energy is bounded from above by zero
# If the battery is discharging the discharge energy is bounded from above by bigM
def bool_cd_rule_three(model, time_interval):
return model.storage_discharge_total[time_interval] <= self.model.bigM * model.is_discharging[time_interval]
# If the battery is charging then the discharge energy is bounded from below by zero
# If the battery is discharging the discharge energy is bounded from below by -bigM
def bool_cd_rule_four(model, time_interval):
return model.storage_discharge_total[time_interval] >= -self.model.bigM * (1 - model.is_charging[time_interval])
# The battery can only be charging or discharging
def bool_cd_rule_five(model, time_interval):
return model.is_charging[time_interval] + model.is_discharging[time_interval] == 1
# Add the constraints to the optimisation model
self.model.bcdr_one = en.Constraint(self.model.Time, rule=bool_cd_rule_one)
self.model.bcdr_two = en.Constraint(self.model.Time, rule=bool_cd_rule_two)
self.model.bcdr_three = en.Constraint(self.model.Time, rule=bool_cd_rule_three)
self.model.bcdr_four = en.Constraint(self.model.Time, rule=bool_cd_rule_four)
self.model.bcdr_five = en.Constraint(self.model.Time, rule=bool_cd_rule_five)
def build_objective(self):
# Build the objective function ready for optimisation
self.objective = 0
if OptimiserObjective.ThroughputCost in self.objectives:
# Throughput cost of using energy storage - we attribute half the cost to charging and half to discharging
self.objective += sum(self.model.storage_charge_total[i] - self.model.storage_discharge_total[i]
for i in self.model.Time) * self.model.throughput_cost / 2.0
if OptimiserObjective.Throughput in self.objectives:
# Throughput of using energy storage - it mirrors the throughput cost above
self.objective += sum(self.model.storage_charge_total[i] - self.model.storage_discharge_total[i]
for i in self.model.Time) * self.model.scale_func
if OptimiserObjective.EqualStorageActions in self.objectives:
# ToDo - Which is the better implementation?
self.objective += sum((self.model.storage_charge_grid[i] * self.model.storage_charge_grid[i]) +
(self.model.storage_charge_generation[i] * self.model.storage_charge_generation[i]) +
(self.model.storage_discharge_grid[i] * self.model.storage_discharge_grid[i]) +
(self.model.storage_discharge_demand[i] * self.model.storage_discharge_demand[i])
for i in self.model.Time) * self.model.scale_func
'''objective += sum(self.model.storage_charge_total[i] * self.model.storage_charge_total[i] +
self.model.storage_discharge_total[i] * self.model.storage_discharge_total[i]
for i in self.model.Time) * self.model.scale_func'''
'''if OptimiserObjective.PiecewiseLinear in self.objectives: # ToDo - Fix this implementation to make it complete
for i in self.energy_system.dispatch.linear_ramp[0]:
objective += -1 * (self.model.storage_charge_total[i] + self.model.storage_discharge_total[i]) * (
1 - self.model.turning_point_two_ramp[i])'''
def optimise(self):
"""
Calls the associated solver and computes the optimal solution
based on the given objective.
"""
def objective_function(model):
return self.objective
self.model.total_cost = en.Objective(rule=objective_function, sense=en.minimize)
# set the path to the solver
if self.optimiser_engine is not None:
if self.optimiser_engine_executable is not None:
opt = SolverFactory(self.optimiser_engine, executable=self.optimiser_engine_executable)
else:
opt = SolverFactory(self.optimiser_engine)
else:
raise AttributeError("Solver not specified, use either Environment Variables or Parameter to specify")
result = opt.solve(self.model, tee=False)
if result.solver.status != SolverStatus.ok:
if (result.solver.status == SolverStatus.aborted) and (len(result.solution) > 0):
logging.warning(
"WARNING - Loading a SolverResults object with an 'aborted' status, but containing a solution")
else:
raise ValueError("Cannot load a SolverResults object with bad status:", result.solver.status)
elif result.solver.status == SolverStatus.ok or result.solver.status == SolverStatus.warning: # solver seams ok, lets check the termination conditions.
if (result.solver.termination_condition != TerminationCondition.globallyOptimal) \
and (result.solver.termination_condition != TerminationCondition.locallyOptimal) \
and (result.solver.termination_condition != TerminationCondition.optimal) \
and (result.solver.termination_condition != TerminationCondition.other):
raise ValueError(result.solver.termination_message)
def values(self, variable_name, decimal_places=3):
output = np.zeros(self.number_of_intervals)
var_obj = getattr(self.model, variable_name)
for index in var_obj:
try:
output[index] = round(var_obj[index].value, decimal_places)
except AttributeError:
output[index] = round(var_obj[index], decimal_places)
return output
def result_dct(self, include_indexed_params=True):
"""Extract the resulting `Var`s (and input `Param`s) as a dictionary
Args:
include_indexed_params (bool, optional): Whether to include indexed `Param`s in output. Defaults to True.
Returns:
dict: Results dict
"""
if include_indexed_params:
component_objects = (en.Var, en.Param)
else:
component_objects = en.Var
dct = {}
for var_obj in self.model.component_objects(component_objects):
if var_obj.is_indexed():
dct[var_obj.name] = var_obj.extract_values()
return dct
def result_df(self, include_indexed_params=True):
"""Return result (and optionally indexed `Param`s) as a dataframe
Args:
include_indexed_params (bool, optional): Whether to include indexed `Param`s in output. Defaults to True.
Returns:
pd.DataFrame: Results dataframe
"""
import pandas as pd # TODO Check if pandas is otherwise required and import at head of file
return pd.DataFrame(self.result_dct(include_indexed_params))
class BTMEnergyOptimiser(EnergyOptimiser):
def __init__(self, interval_duration, number_of_intervals, energy_system, objective, optimiser_engine=None,
optimiser_engine_executable=None, use_bool_variables=True):
super().__init__(interval_duration, number_of_intervals, energy_system, objective, optimiser_engine,
optimiser_engine_executable, use_bool_variables=True)
self.use_piecewise_segments = True # Defined for a future implementation of linear piecewise segments
self.update_build_model()
self.update_apply_constraints()
self.update_build_objective()
super().optimise()
def update_build_model(self):
#### Behind - the - Meter (BTM) Models ####
# Net import from the grid
self.model.btm_net_import = en.Var(self.model.Time, initialize=self.system_demand_dct)
# Net export to the grid
self.model.btm_net_export = en.Var(self.model.Time, initialize=self.system_generation_dct)
# The import tariff per kWh
self.model.btm_import_tariff = en.Param(self.model.Time, initialize=self.energy_system.tariff.import_tariff)
# The export tariff per kWh
self.model.btm_export_tariff = en.Param(self.model.Time, initialize=self.energy_system.tariff.export_tariff)
#### BTM Connection Point Peak Power ####
self.model.peak_connection_point_import_power = en.Var(within=en.NonNegativeReals)
self.model.peak_connection_point_export_power = en.Var(within=en.NonNegativeReals)
def peak_connection_point_import(model, interval):
return model.peak_connection_point_import_power >= model.btm_net_import[interval]
def peak_connection_point_export(model, interval):
return model.peak_connection_point_export_power >= -model.btm_net_export[interval]
self.model.peak_connection_point_import_constraint = en.Constraint(self.model.Time,
rule=peak_connection_point_import)
self.model.peak_connection_point_export_constraint = en.Constraint(self.model.Time,
rule=peak_connection_point_export)
#### Piecewise Linear Segments (To be fully implemented later) ####
'''if self.use_piecewise_segments:
# The turning points for the piecewise linear segments
self.model.turning_point_one_ramp = en.Var(self.model.Time, within=en.Boolean, initialize=0)
self.model.turning_point_two_ramp = en.Var(self.model.Time, within=en.Boolean, initialize=0)
lims_one = [None] * (len(net) - 1) # ToDo - Fix this indexing
lims_two = [None] * (len(net) - 1) # ToDo - Fix this indexing
ind = self.energy_system.dispatch.linear_ramp[0]
lim = self.energy_system.dispatch.linear_ramp[1]
for i, l in zip(ind, lim):
lims_one[i] = l[0]
lims_two[i] = l[1]
lim_dct_one = dict(enumerate(lims_one))
self.model.limits_one = en.Param(self.model.Time, initialize=lim_dct_one)
lim_dct_two = dict(enumerate(lims_two))
self.model.limits_two = en.Param(self.model.Time, initialize=lim_dct_two)
self.model.my_set = en.Set(initialize=ind)
def B1(m, s):
return m.limits_one[s] <= m.storage_charge_total[s] + m.storage_discharge_total[s] + self.bigM * (1 - m.turning_point_one_ramp[s])
def B2(m, s):
return m.limits_one[s] >= m.storage_charge_total[s] + m.storage_discharge_total[s] - self.bigM * m.turning_point_one_ramp[s]
self.model.B1 = en.Constraint(self.model.my_set, rule=B1)
self.model.B2 = en.Constraint(self.model.my_set, rule=B2)
def B3(m, s):
return m.limits_two[s] <= m.storage_charge_total[s] + m.storage_discharge_total[s] + self.bigM * (1 - m.turning_point_two_ramp[s])
def B4(m, s):
return m.limits_two[s] >= m.storage_charge_total[s] + m.storage_discharge_total[s] - self.bigM * m.turning_point_two_ramp[s]
self.model.B3 = en.Constraint(self.model.my_set, rule=B3)
self.model.B4 = en.Constraint(self.model.my_set, rule=B4)'''
def update_apply_constraints(self):
# Enforce the limits of charging the energy storage from locally generated energy
def storage_generation_charging_behaviour(model, time_interval):
return model.storage_charge_generation[time_interval] <= -model.system_generation[time_interval]
# Enforce the limits of discharging the energy storage to satisfy local demand
def storage_demand_discharging_behaviour(model, time_interval):
return model.storage_discharge_demand[time_interval] >= -model.system_demand[time_interval]
# Add the constraints to the optimisation model
self.model.generation_charging_behaviour_constraint = en.Constraint(self.model.Time,
rule=storage_generation_charging_behaviour)
self.model.local_discharge_behaviour_constraint = en.Constraint(self.model.Time,
rule=storage_demand_discharging_behaviour)
# Calculate the net energy import
def btm_net_connection_point_import(model, time_interval):
return model.btm_net_import[time_interval] == model.system_demand[time_interval] + \
model.storage_charge_grid[time_interval] + model.storage_discharge_demand[time_interval]
# calculate the net energy export
def btm_net_connection_point_export(model, time_interval):
return model.btm_net_export[time_interval] == model.system_generation[time_interval] + \
model.storage_charge_generation[time_interval] + model.storage_discharge_grid[time_interval]
# Add the constraints to the optimisation model
self.model.btm_net_import_constraint = en.Constraint(self.model.Time, rule=btm_net_connection_point_import)
self.model.btm_net_export_constraint = en.Constraint(self.model.Time, rule=btm_net_connection_point_export)
def update_build_objective(self):
# Build the objective function ready for optimisation
if OptimiserObjective.ConnectionPointCost in self.objectives:
# Connection point cost
self.objective += sum(self.model.btm_import_tariff[i] * self.model.btm_net_import[i] + # The cost of purchasing energy
self.model.btm_export_tariff[i] * self.model.btm_net_export[i] # The value of selling energy
for i in self.model.Time)
if OptimiserObjective.ConnectionPointEnergy in self.objectives:
# The amount of energy crossing the meter boundary
self.objective += sum((-self.model.btm_net_export[i] + self.model.btm_net_import[i])
for i in self.model.Time)
if OptimiserObjective.GreedyGenerationCharging in self.objectives:
# Greedy Generation - Favour charging fully from generation in earlier intervals
self.objective += sum(-self.model.btm_net_export[i]
* 1 / self.number_of_intervals
* (1 - i / self.number_of_intervals)
for i in self.model.Time)
if OptimiserObjective.GreedyDemandDischarging in self.objectives:
# Greedy Demand Discharging - Favour satisfying all demand from the storage in earlier intervals
self.objective += sum(self.model.btm_net_import[i]
* 1 / self.number_of_intervals
* (1 - i / self.number_of_intervals)
for i in self.model.Time)
if OptimiserObjective.ConnectionPointPeakPower in self.objectives:
# ToDo - More work is needed to convert this into a demand tariff objective (i.e. a cost etc.)
self.objective += self.model.peak_connection_point_import_power + self.model.peak_connection_point_export_power
if OptimiserObjective.ConnectionPointQuantisedPeak in self.objectives:
# ToDo - What is this objective function? Quantises the Connection point?
self.objective += sum(self.model.btm_net_export[i] * self.model.btm_net_export[i] +
self.model.btm_net_import[i] * self.model.btm_net_import[i]
for i in self.model.Time)
'''if OptimiserObjective.PiecewiseLinear in self.objectives: # ToDo - Fix this implementation to make it complete
for i in self.energy_system.dispatch.linear_ramp[0]:
objective += -1 * (self.model.storage_charge_total[i] + self.model.storage_discharge_total[i]) * (
1 - self.model.turning_point_two_ramp[i])'''
class LocalEnergyOptimiser(EnergyOptimiser):
def __init__(self, interval_duration, number_of_intervals, energy_system, objective, optimiser_engine=None,
optimiser_engine_executable=None, use_bool_variables=True,
enforce_local_feasability=False, enforce_battery_feasability=False):
super().__init__(interval_duration, number_of_intervals, energy_system, objective, optimiser_engine,
optimiser_engine_executable, use_bool_variables)
self.enforce_local_feasability = enforce_local_feasability
self.enforce_battery_feasability = enforce_battery_feasability
self.update_build_model()
self.update_apply_constraints()
self.update_build_objective()
super().optimise()
def update_build_model(self):
#### Local Energy Models ####
# Net import from the grid (without BTM Storage)
self.model.local_net_import = en.Var(self.model.Time, initialize=self.system_demand_dct)
# Net export to the grid (without BTM Storage)
self.model.local_net_export = en.Var(self.model.Time, initialize=self.system_generation_dct)
# Local consumption (Satisfy local demand from local generation)
self.model.local_demand_transfer = en.Var(self.model.Time, within=en.NonNegativeReals, initialize=0.0)
# Local Energy Tariffs
self.model.le_import_tariff = en.Param(self.model.Time,
initialize=self.energy_system.local_tariff.le_import_tariff)
self.model.le_export_tariff = en.Param(self.model.Time,
initialize=self.energy_system.local_tariff.le_export_tariff)
self.model.lt_import_tariff = en.Param(self.model.Time,
initialize=self.energy_system.local_tariff.lt_import_tariff)
self.model.lt_export_tariff = en.Param(self.model.Time,
initialize=self.energy_system.local_tariff.lt_export_tariff)
self.model.re_import_tariff = en.Param(self.model.Time,
initialize=self.energy_system.local_tariff.re_import_tariff)
self.model.re_export_tariff = en.Param(self.model.Time,
initialize=self.energy_system.local_tariff.re_export_tariff)
self.model.rt_import_tariff = en.Param(self.model.Time,
initialize=self.energy_system.local_tariff.rt_import_tariff)
self.model.rt_export_tariff = en.Param(self.model.Time,
initialize=self.energy_system.local_tariff.rt_export_tariff)
#### Local Grid Flows Peak Power ####
self.model.local_peak_connection_point_import_power = en.Var(within=en.NonNegativeReals)
self.model.local_peak_connection_point_export_power = en.Var(within=en.NonNegativeReals)
def local_peak_connection_point_import(model, interval):
return model.local_peak_connection_point_import_power >= self.model.storage_charge_grid[interval] + \
self.model.storage_discharge_grid[interval] + self.model.local_net_import[interval] + \
self.model.local_net_export[interval]
def local_peak_connection_point_export(model, interval):
return model.local_peak_connection_point_export_power >= -(self.model.storage_charge_grid[interval] + \
self.model.storage_discharge_grid[interval] + self.model.local_net_import[interval] + \
self.model.local_net_export[interval])
self.model.local_peak_connection_point_import_constraint = en.Constraint(self.model.Time,
rule=local_peak_connection_point_import)
self.model.local_peak_connection_point_export_constraint = en.Constraint(self.model.Time,
rule=local_peak_connection_point_export)
def update_apply_constraints(self):
# Calculate the customer net energy import
def local_net_import(model, time_interval):
return model.local_net_import[time_interval] == model.system_demand[time_interval] + \
model.storage_discharge_demand[time_interval] - model.local_demand_transfer[time_interval]
# calculate the customer net energy export
def local_net_export(model, time_interval):
return model.local_net_export[time_interval] == model.system_generation[time_interval] + \
model.storage_charge_generation[time_interval] + model.local_demand_transfer[time_interval]
# constrain the use of local energy exports
def local_demand_transfer_export(model, time_interval):
return model.local_demand_transfer[time_interval] + model.storage_charge_generation[time_interval] <= -model.system_generation[time_interval]
# constrain the use of local energy imports
def local_demand_transfer_import(model, time_interval):
return model.storage_discharge_demand[time_interval] - model.local_demand_transfer[time_interval] >= -model.system_demand[time_interval]
# Add the constraints to the optimisation model
self.model.local_net_import_constraint = en.Constraint(self.model.Time, rule=local_net_import)
self.model.local_net_export_constraint = en.Constraint(self.model.Time, rule=local_net_export)
self.model.local_demand_transfer_export_constraint = en.Constraint(self.model.Time, rule=local_demand_transfer_export)
self.model.local_demand_transfer_import_constraint = en.Constraint(self.model.Time, rule=local_demand_transfer_import)
# These set of constraints are designed to enforce the battery to satisfy any residual
# local demand before discharging to the grid.
if self.enforce_battery_feasability:
def electrical_feasability_discharge_grid_one(model: en.ConcreteModel, time_interval: int): # TODO these annotations are probably wrong
"""This constraint (combined with `electrical_feasability_discharge_grid_two`)
enforces the electrical requirement that the battery must satisfy local demand
before discharging into the grid. It maps between the boolean variable
`local_demand_satisfied` and a bound on `storage_discharge_grid`.
`local_demand_satisfed = 1` corresponds to a lower bound on `storage_discharge_grid` of zero.
I.e. if local demand is not satisfied, it is impossible to discharge into the grid
`local_demand_satisfied = 0` corresponds to a lower bound of `-bigM` (effectively no lower bound).
Args:
model: Pyomo model
time_interval: time interval variable
Returns:
obj: constraint object
"""
return model.storage_discharge_grid[time_interval] >= -self.model.bigM * model.local_demand_satisfied[time_interval]
def electrical_feasability_discharge_grid_two(model: en.ConcreteModel, time_interval: int):
"""This constraint maps between a boolean `local_demand_satisfied` and its correspondence
to `storage_discharge_demand`. Combined with `electrical_feasability_discharge_grid_one`,
this enforces the electrical requirement that the battery must satisfy local demand
before discharging into the grid.
`local_demand_satisfied = 1` corresponds to `storage_discharge_demand` having the net excess generation
as an upper bound.
`local_demand_satisfied = 0` corresponds to `storage_discharge_demand` having an upper bound of 0.
Args:
model: Pyomo model
time_interval: time interval passed into constraint equation
Returns:
obj: constraint object
"""
return model.storage_discharge_demand[time_interval] <= -(model.system_demand[time_interval] + model.system_generation[time_interval]) * model.local_demand_satisfied[time_interval]
self.model.efdc_one = en.Constraint(self.model.Time, rule=electrical_feasability_discharge_grid_one)
self.model.efdc_two = en.Constraint(self.model.Time, rule=electrical_feasability_discharge_grid_two)
def electrical_feasability_charge_grid_one(model: en.ConcreteModel, time_interval: int): # TODO these annotations are probably wrong
"""This constraint (combined with `electrical_feasability_charge_grid_two`)
enforces the electrical requirement that the battery must charge from local
generation before charging from the grid. It maps between the boolean variable
`local_generation_satisfied` and a bound on `storage_charge_grid`.
`local_generation_satisfied = 1` corresponds to an upper bound on `storage_charge_grid` of `bigM`.
`local_generation_satisfied = 0` corresponds to an upper bound of `0` .
I.e. if local generation is not accounted for, it is impossible to charge from the grid.
Args:
model: Pyomo model
time_interval: time interval variable
Returns:
obj: constraint object
"""
return model.storage_charge_grid[time_interval] <= self.model.bigM * model.local_generation_satisfied[time_interval]
def electrical_feasability_charge_grid_two(model: en.ConcreteModel, time_interval: int):
"""This constraint maps between a boolean `local_generation_satisfied` and its correspondence
to `storage_charge_generation`. Combined with `electrical_feasability_charge_grid_one`,
this enforces the electrical requirement that the battery must charge from local excess
generation before charging from the grid.
`local_generation_satisfied = 1` corresponds to `storage_charge_generation` having the net excess generation
as an upper bound.
`local_generation_satisfied = 0` corresponds to `storage_charge_generation` having an upper bound of 0.
Args:
model: Pyomo model
time_interval: time interval passed into constraint equation
Returns:
obj: constraint object
"""
return model.storage_charge_generation[time_interval] >= -(model.system_demand[time_interval] + model.system_generation[time_interval]) * model.local_generation_satisfied[time_interval]
self.model.efcc_one = en.Constraint(self.model.Time, rule=electrical_feasability_charge_grid_one)
self.model.efcc_two = en.Constraint(self.model.Time, rule=electrical_feasability_charge_grid_two)
# Additional rules to enforce electrical feasability
# (Without these rules, the local generation can preferentially export to the grid
# before satisfying local demand)
if self.enforce_local_feasability:
def import_export_rule_one(model: en.ConcreteModel, time_interval: int):
"""Enforce a lower bound on `local_net_export` of `0` or `-bigM` depending on
whether `is_local_exporting` is zero or one.
Args:
model (en.ConcreteModel): Pyomo model
time_interval (int): time interval passed into constraint
Returns:
obj: constraint object
"""
return model.local_net_export[time_interval] >= -model.is_local_exporting[time_interval] * self.model.bigM
def import_export_rule_two(model: en.ConcreteModel, time_interval: int):
"""Enforce an upper bound on `local_net_import` of `0` or `bigM` depending on
whether `is_local_exporting` is one or zero. Combined with `import_export_rule_one`,
this enforces that the system can only be exporting or importing locally.
Args:
model (en.ConcreteModel): Pyomo model
time_interval (int): time interval passed into constraint
Returns:
obj: constraint object
"""
return model.local_net_import[time_interval] <= (1 - model.is_local_exporting[time_interval]) * self.model.bigM
self.model.ie_one = en.Constraint(self.model.Time, rule=import_export_rule_one)
self.model.ie_two = en.Constraint(self.model.Time, rule=import_export_rule_two)
def update_build_objective(self):
# Build the objective function ready for optimisation
if OptimiserObjective.LocalModelsCost in self.objectives:
self.objective += sum((self.model.storage_charge_grid[i] * (self.model.re_import_tariff[i] + self.model.rt_import_tariff[i])) +
(self.model.storage_discharge_grid[i] * (self.model.re_export_tariff[i] - self.model.rt_export_tariff[i])) +
(self.model.storage_charge_generation[i] * (-self.model.le_export_tariff[i] + self.model.le_import_tariff[i] + self.model.lt_export_tariff[i] + self.model.lt_import_tariff[i])) +
(self.model.storage_discharge_demand[i] * (self.model.le_export_tariff[i] - self.model.le_import_tariff[i] - self.model.lt_export_tariff[i] - self.model.lt_import_tariff[i])) +
(self.model.local_net_import[i] * (self.model.re_import_tariff[i] + self.model.rt_import_tariff[i])) +
(self.model.local_net_export[i] * (self.model.re_export_tariff[i] - self.model.rt_export_tariff[i])) +
(self.model.local_demand_transfer[i] * (-self.model.le_export_tariff[i] + self.model.le_import_tariff[i] + self.model.lt_export_tariff[i] + self.model.lt_import_tariff[i]))
for i in self.model.Time)
if OptimiserObjective.LocalThirdParty in self.objectives:
self.objective += sum((self.model.storage_charge_grid[i] * (self.model.re_import_tariff[i] + self.model.rt_import_tariff[i])) +
(self.model.storage_discharge_grid[i] * (self.model.re_export_tariff[i] - self.model.rt_export_tariff[i])) +
(self.model.storage_charge_generation[i] * (self.model.le_import_tariff[i] + self.model.lt_import_tariff[i])) +
(self.model.storage_discharge_demand[i] * (self.model.le_export_tariff[i] - self.model.lt_export_tariff[i]))
for i in self.model.Time)
if OptimiserObjective.LocalGridPeakPower in self.objectives:
# ToDo - More work is needed to convert this into a demand tariff objective (i.e. a cost etc.)
self.objective += self.model.local_peak_connection_point_import_power + self.model.local_peak_connection_point_export_power
if OptimiserObjective.LocalGridMinimiser in self.objectives:
# ToDo - What is this objective function? Quantises the Connection point?
self.objective += sum((self.model.storage_charge_grid[i] + self.model.storage_discharge_grid[i]
+ self.model.local_net_import[i] + self.model.local_net_export[i]) *
(self.model.storage_charge_grid[i] + self.model.storage_discharge_grid[i]
+ self.model.local_net_import[i] + self.model.local_net_export[i])
for i in self.model.Time) * self.smallM
if OptimiserObjective.GreedyGenerationCharging in self.objectives:
# # Preferentially charge from local solar as soon as possible
# # This amounts to minimising the quantity of exported energy in early periods
# self.object
self.objective += sum(self.model.local_net_export[i]
* 1 / self.number_of_intervals
* (i / self.number_of_intervals)
for i in self.model.Time)
if OptimiserObjective.GreedyDemandDischarging in self.objectives:
self.objective += sum(self.model.local_net_import[i]
* 1 / self.number_of_intervals
* (-i / self.number_of_intervals)
for i in self.model.Time)
| [
"logging.getLogger",
"pyomo.environ.Objective",
"os.environ.get",
"pyomo.environ.Param",
"logging.warning",
"numpy.zeros",
"pyomo.environ.Var",
"pyomo.opt.SolverFactory",
"pyomo.environ.RangeSet",
"pyomo.environ.Constraint",
"pyomo.environ.ConcreteModel"
] | [((153, 180), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (170, 180), False, 'import logging\n'), ((4342, 4360), 'pyomo.environ.ConcreteModel', 'en.ConcreteModel', ([], {}), '()\n', (4358, 4360), True, 'import pyomo.environ as en\n'), ((4501, 4545), 'pyomo.environ.RangeSet', 'en.RangeSet', (['(0)', '(self.number_of_intervals - 1)'], {}), '(0, self.number_of_intervals - 1)\n', (4512, 4545), True, 'import pyomo.environ as en\n'), ((5123, 5221), 'pyomo.environ.Var', 'en.Var', (['self.model.Time'], {'bounds': '(0, self.energy_system.energy_storage.capacity)', 'initialize': '(0)'}), '(self.model.Time, bounds=(0, self.energy_system.energy_storage.\n capacity), initialize=0)\n', (5129, 5221), True, 'import pyomo.environ as en\n'), ((5439, 5476), 'pyomo.environ.Var', 'en.Var', (['self.model.Time'], {'initialize': '(0)'}), '(self.model.Time, initialize=0)\n', (5445, 5476), True, 'import pyomo.environ as en\n'), ((5598, 5635), 'pyomo.environ.Var', 'en.Var', (['self.model.Time'], {'initialize': '(0)'}), '(self.model.Time, initialize=0)\n', (5604, 5635), True, 'import pyomo.environ as en\n'), ((5726, 5886), 'pyomo.environ.Var', 'en.Var', (['self.model.Time'], {'bounds': '(0, self.energy_system.energy_storage.charging_power_limit * (self.\n interval_duration / minutes_per_hour))', 'initialize': '(0)'}), '(self.model.Time, bounds=(0, self.energy_system.energy_storage.\n charging_power_limit * (self.interval_duration / minutes_per_hour)),\n initialize=0)\n', (5732, 5886), True, 'import pyomo.environ as en\n'), ((6131, 6291), 'pyomo.environ.Var', 'en.Var', (['self.model.Time'], {'bounds': '(0, self.energy_system.energy_storage.charging_power_limit * (self.\n interval_duration / minutes_per_hour))', 'initialize': '(0)'}), '(self.model.Time, bounds=(0, self.energy_system.energy_storage.\n charging_power_limit * (self.interval_duration / minutes_per_hour)),\n initialize=0)\n', (6137, 6291), True, 'import pyomo.environ as en\n'), ((6554, 6718), 'pyomo.environ.Var', 'en.Var', (['self.model.Time'], {'bounds': '(self.energy_system.energy_storage.discharging_power_limit * (self.\n interval_duration / minutes_per_hour), 0)', 'initialize': '(0)'}), '(self.model.Time, bounds=(self.energy_system.energy_storage.\n discharging_power_limit * (self.interval_duration / minutes_per_hour), \n 0), initialize=0)\n', (6560, 6718), True, 'import pyomo.environ as en\n'), ((6968, 7132), 'pyomo.environ.Var', 'en.Var', (['self.model.Time'], {'bounds': '(self.energy_system.energy_storage.discharging_power_limit * (self.\n interval_duration / minutes_per_hour), 0)', 'initialize': '(0)'}), '(self.model.Time, bounds=(self.energy_system.energy_storage.\n discharging_power_limit * (self.interval_duration / minutes_per_hour), \n 0), initialize=0)\n', (6974, 7132), True, 'import pyomo.environ as en\n'), ((8354, 8428), 'pyomo.environ.Param', 'en.Param', ([], {'initialize': 'self.energy_system.energy_storage.charging_efficiency'}), '(initialize=self.energy_system.energy_storage.charging_efficiency)\n', (8362, 8428), True, 'import pyomo.environ as en\n'), ((8506, 8583), 'pyomo.environ.Param', 'en.Param', ([], {'initialize': 'self.energy_system.energy_storage.discharging_efficiency'}), '(initialize=self.energy_system.energy_storage.discharging_efficiency)\n', (8514, 8583), True, 'import pyomo.environ as en\n'), ((8661, 8786), 'pyomo.environ.Param', 'en.Param', ([], {'initialize': '(self.energy_system.energy_storage.charging_power_limit * (self.\n interval_duration / minutes_per_hour))'}), '(initialize=self.energy_system.energy_storage.charging_power_limit *\n (self.interval_duration / minutes_per_hour))\n', (8669, 8786), True, 'import pyomo.environ as en\n'), ((8879, 9008), 'pyomo.environ.Param', 'en.Param', ([], {'initialize': '(self.energy_system.energy_storage.discharging_power_limit * (self.\n interval_duration / minutes_per_hour))'}), '(initialize=self.energy_system.energy_storage.\n discharging_power_limit * (self.interval_duration / minutes_per_hour))\n', (8887, 9008), True, 'import pyomo.environ as en\n'), ((9107, 9177), 'pyomo.environ.Param', 'en.Param', ([], {'initialize': 'self.energy_system.energy_storage.throughput_cost'}), '(initialize=self.energy_system.energy_storage.throughput_cost)\n', (9115, 9177), True, 'import pyomo.environ as en\n'), ((9378, 9410), 'pyomo.environ.Param', 'en.Param', ([], {'initialize': 'self.smallM'}), '(initialize=self.smallM)\n', (9386, 9410), True, 'import pyomo.environ as en\n'), ((9485, 9515), 'pyomo.environ.Param', 'en.Param', ([], {'initialize': 'self.bigM'}), '(initialize=self.bigM)\n', (9493, 9515), True, 'import pyomo.environ as en\n'), ((9665, 9725), 'pyomo.environ.Param', 'en.Param', (['self.model.Time'], {'initialize': 'self.system_demand_dct'}), '(self.model.Time, initialize=self.system_demand_dct)\n', (9673, 9725), True, 'import pyomo.environ as en\n'), ((9803, 9867), 'pyomo.environ.Param', 'en.Param', (['self.model.Time'], {'initialize': 'self.system_generation_dct'}), '(self.model.Time, initialize=self.system_generation_dct)\n', (9811, 9867), True, 'import pyomo.environ as en\n'), ((11489, 11550), 'pyomo.environ.Constraint', 'en.Constraint', (['self.model.Time'], {'rule': 'storage_charge_behaviour'}), '(self.model.Time, rule=storage_charge_behaviour)\n', (11502, 11550), True, 'import pyomo.environ as en\n'), ((11611, 11675), 'pyomo.environ.Constraint', 'en.Constraint', (['self.model.Time'], {'rule': 'storage_discharge_behaviour'}), '(self.model.Time, rule=storage_discharge_behaviour)\n', (11624, 11675), True, 'import pyomo.environ as en\n'), ((11734, 11796), 'pyomo.environ.Constraint', 'en.Constraint', (['self.model.Time'], {'rule': 'storage_charge_rate_limit'}), '(self.model.Time, rule=storage_charge_rate_limit)\n', (11747, 11796), True, 'import pyomo.environ as en\n'), ((11858, 11923), 'pyomo.environ.Constraint', 'en.Constraint', (['self.model.Time'], {'rule': 'storage_discharge_rate_limit'}), '(self.model.Time, rule=storage_discharge_rate_limit)\n', (11871, 11923), True, 'import pyomo.environ as en\n'), ((12748, 12793), 'pyomo.environ.Constraint', 'en.Constraint', (['self.model.Time'], {'rule': 'SOC_rule'}), '(self.model.Time, rule=SOC_rule)\n', (12761, 12793), True, 'import pyomo.environ as en\n'), ((17596, 17652), 'pyomo.environ.Objective', 'en.Objective', ([], {'rule': 'objective_function', 'sense': 'en.minimize'}), '(rule=objective_function, sense=en.minimize)\n', (17608, 17652), True, 'import pyomo.environ as en\n'), ((19272, 19306), 'numpy.zeros', 'np.zeros', (['self.number_of_intervals'], {}), '(self.number_of_intervals)\n', (19280, 19306), True, 'import numpy as np\n'), ((21631, 21689), 'pyomo.environ.Var', 'en.Var', (['self.model.Time'], {'initialize': 'self.system_demand_dct'}), '(self.model.Time, initialize=self.system_demand_dct)\n', (21637, 21689), True, 'import pyomo.environ as en\n'), ((21760, 21822), 'pyomo.environ.Var', 'en.Var', (['self.model.Time'], {'initialize': 'self.system_generation_dct'}), '(self.model.Time, initialize=self.system_generation_dct)\n', (21766, 21822), True, 'import pyomo.environ as en\n'), ((21898, 21975), 'pyomo.environ.Param', 'en.Param', (['self.model.Time'], {'initialize': 'self.energy_system.tariff.import_tariff'}), '(self.model.Time, initialize=self.energy_system.tariff.import_tariff)\n', (21906, 21975), True, 'import pyomo.environ as en\n'), ((22051, 22128), 'pyomo.environ.Param', 'en.Param', (['self.model.Time'], {'initialize': 'self.energy_system.tariff.export_tariff'}), '(self.model.Time, initialize=self.energy_system.tariff.export_tariff)\n', (22059, 22128), True, 'import pyomo.environ as en\n'), ((22237, 22271), 'pyomo.environ.Var', 'en.Var', ([], {'within': 'en.NonNegativeReals'}), '(within=en.NonNegativeReals)\n', (22243, 22271), True, 'import pyomo.environ as en\n'), ((22328, 22362), 'pyomo.environ.Var', 'en.Var', ([], {'within': 'en.NonNegativeReals'}), '(within=en.NonNegativeReals)\n', (22334, 22362), True, 'import pyomo.environ as en\n'), ((22734, 22799), 'pyomo.environ.Constraint', 'en.Constraint', (['self.model.Time'], {'rule': 'peak_connection_point_import'}), '(self.model.Time, rule=peak_connection_point_import)\n', (22747, 22799), True, 'import pyomo.environ as en\n'), ((22936, 23001), 'pyomo.environ.Constraint', 'en.Constraint', (['self.model.Time'], {'rule': 'peak_connection_point_export'}), '(self.model.Time, rule=peak_connection_point_export)\n', (22949, 23001), True, 'import pyomo.environ as en\n'), ((25855, 25929), 'pyomo.environ.Constraint', 'en.Constraint', (['self.model.Time'], {'rule': 'storage_generation_charging_behaviour'}), '(self.model.Time, rule=storage_generation_charging_behaviour)\n', (25868, 25929), True, 'import pyomo.environ as en\n'), ((26064, 26137), 'pyomo.environ.Constraint', 'en.Constraint', (['self.model.Time'], {'rule': 'storage_demand_discharging_behaviour'}), '(self.model.Time, rule=storage_demand_discharging_behaviour)\n', (26077, 26137), True, 'import pyomo.environ as en\n'), ((26952, 27020), 'pyomo.environ.Constraint', 'en.Constraint', (['self.model.Time'], {'rule': 'btm_net_connection_point_import'}), '(self.model.Time, rule=btm_net_connection_point_import)\n', (26965, 27020), True, 'import pyomo.environ as en\n'), ((27068, 27136), 'pyomo.environ.Constraint', 'en.Constraint', (['self.model.Time'], {'rule': 'btm_net_connection_point_export'}), '(self.model.Time, rule=btm_net_connection_point_export)\n', (27081, 27136), True, 'import pyomo.environ as en\n'), ((30817, 30875), 'pyomo.environ.Var', 'en.Var', (['self.model.Time'], {'initialize': 'self.system_demand_dct'}), '(self.model.Time, initialize=self.system_demand_dct)\n', (30823, 30875), True, 'import pyomo.environ as en\n'), ((30970, 31032), 'pyomo.environ.Var', 'en.Var', (['self.model.Time'], {'initialize': 'self.system_generation_dct'}), '(self.model.Time, initialize=self.system_generation_dct)\n', (30976, 31032), True, 'import pyomo.environ as en\n'), ((31150, 31217), 'pyomo.environ.Var', 'en.Var', (['self.model.Time'], {'within': 'en.NonNegativeReals', 'initialize': '(0.0)'}), '(self.model.Time, within=en.NonNegativeReals, initialize=0.0)\n', (31156, 31217), True, 'import pyomo.environ as en\n'), ((31288, 31379), 'pyomo.environ.Param', 'en.Param', (['self.model.Time'], {'initialize': 'self.energy_system.local_tariff.le_import_tariff'}), '(self.model.Time, initialize=self.energy_system.local_tariff.\n le_import_tariff)\n', (31296, 31379), True, 'import pyomo.environ as en\n'), ((31460, 31551), 'pyomo.environ.Param', 'en.Param', (['self.model.Time'], {'initialize': 'self.energy_system.local_tariff.le_export_tariff'}), '(self.model.Time, initialize=self.energy_system.local_tariff.\n le_export_tariff)\n', (31468, 31551), True, 'import pyomo.environ as en\n'), ((31632, 31723), 'pyomo.environ.Param', 'en.Param', (['self.model.Time'], {'initialize': 'self.energy_system.local_tariff.lt_import_tariff'}), '(self.model.Time, initialize=self.energy_system.local_tariff.\n lt_import_tariff)\n', (31640, 31723), True, 'import pyomo.environ as en\n'), ((31804, 31895), 'pyomo.environ.Param', 'en.Param', (['self.model.Time'], {'initialize': 'self.energy_system.local_tariff.lt_export_tariff'}), '(self.model.Time, initialize=self.energy_system.local_tariff.\n lt_export_tariff)\n', (31812, 31895), True, 'import pyomo.environ as en\n'), ((31976, 32067), 'pyomo.environ.Param', 'en.Param', (['self.model.Time'], {'initialize': 'self.energy_system.local_tariff.re_import_tariff'}), '(self.model.Time, initialize=self.energy_system.local_tariff.\n re_import_tariff)\n', (31984, 32067), True, 'import pyomo.environ as en\n'), ((32148, 32239), 'pyomo.environ.Param', 'en.Param', (['self.model.Time'], {'initialize': 'self.energy_system.local_tariff.re_export_tariff'}), '(self.model.Time, initialize=self.energy_system.local_tariff.\n re_export_tariff)\n', (32156, 32239), True, 'import pyomo.environ as en\n'), ((32320, 32411), 'pyomo.environ.Param', 'en.Param', (['self.model.Time'], {'initialize': 'self.energy_system.local_tariff.rt_import_tariff'}), '(self.model.Time, initialize=self.energy_system.local_tariff.\n rt_import_tariff)\n', (32328, 32411), True, 'import pyomo.environ as en\n'), ((32492, 32583), 'pyomo.environ.Param', 'en.Param', (['self.model.Time'], {'initialize': 'self.energy_system.local_tariff.rt_export_tariff'}), '(self.model.Time, initialize=self.energy_system.local_tariff.\n rt_export_tariff)\n', (32500, 32583), True, 'import pyomo.environ as en\n'), ((32736, 32770), 'pyomo.environ.Var', 'en.Var', ([], {'within': 'en.NonNegativeReals'}), '(within=en.NonNegativeReals)\n', (32742, 32770), True, 'import pyomo.environ as en\n'), ((32833, 32867), 'pyomo.environ.Var', 'en.Var', ([], {'within': 'en.NonNegativeReals'}), '(within=en.NonNegativeReals)\n', (32839, 32867), True, 'import pyomo.environ as en\n'), ((33627, 33698), 'pyomo.environ.Constraint', 'en.Constraint', (['self.model.Time'], {'rule': 'local_peak_connection_point_import'}), '(self.model.Time, rule=local_peak_connection_point_import)\n', (33640, 33698), True, 'import pyomo.environ as en\n'), ((33841, 33912), 'pyomo.environ.Constraint', 'en.Constraint', (['self.model.Time'], {'rule': 'local_peak_connection_point_export'}), '(self.model.Time, rule=local_peak_connection_point_export)\n', (33854, 33912), True, 'import pyomo.environ as en\n'), ((35303, 35356), 'pyomo.environ.Constraint', 'en.Constraint', (['self.model.Time'], {'rule': 'local_net_import'}), '(self.model.Time, rule=local_net_import)\n', (35316, 35356), True, 'import pyomo.environ as en\n'), ((35406, 35459), 'pyomo.environ.Constraint', 'en.Constraint', (['self.model.Time'], {'rule': 'local_net_export'}), '(self.model.Time, rule=local_net_export)\n', (35419, 35459), True, 'import pyomo.environ as en\n'), ((35521, 35586), 'pyomo.environ.Constraint', 'en.Constraint', (['self.model.Time'], {'rule': 'local_demand_transfer_export'}), '(self.model.Time, rule=local_demand_transfer_export)\n', (35534, 35586), True, 'import pyomo.environ as en\n'), ((35648, 35713), 'pyomo.environ.Constraint', 'en.Constraint', (['self.model.Time'], {'rule': 'local_demand_transfer_import'}), '(self.model.Time, rule=local_demand_transfer_import)\n', (35661, 35713), True, 'import pyomo.environ as en\n'), ((3145, 3179), 'os.environ.get', 'os.environ.get', (['"""OPTIMISER_ENGINE"""'], {}), "('OPTIMISER_ENGINE')\n", (3159, 3179), False, 'import os\n'), ((7561, 7603), 'pyomo.environ.Var', 'en.Var', (['self.model.Time'], {'within': 'en.Boolean'}), '(self.model.Time, within=en.Boolean)\n', (7567, 7603), True, 'import pyomo.environ as en\n'), ((7710, 7766), 'pyomo.environ.Var', 'en.Var', (['self.model.Time'], {'within': 'en.Boolean', 'initialize': '(0)'}), '(self.model.Time, within=en.Boolean, initialize=0)\n', (7716, 7766), True, 'import pyomo.environ as en\n'), ((7816, 7872), 'pyomo.environ.Var', 'en.Var', (['self.model.Time'], {'within': 'en.Boolean', 'initialize': '(0)'}), '(self.model.Time, within=en.Boolean, initialize=0)\n', (7822, 7872), True, 'import pyomo.environ as en\n'), ((7925, 7981), 'pyomo.environ.Var', 'en.Var', (['self.model.Time'], {'within': 'en.Boolean', 'initialize': '(0)'}), '(self.model.Time, within=en.Boolean, initialize=0)\n', (7931, 7981), True, 'import pyomo.environ as en\n'), ((8021, 8063), 'pyomo.environ.Var', 'en.Var', (['self.model.Time'], {'within': 'en.Boolean'}), '(self.model.Time, within=en.Boolean)\n', (8027, 8063), True, 'import pyomo.environ as en\n'), ((8174, 8230), 'pyomo.environ.Var', 'en.Var', (['self.model.Time'], {'within': 'en.Boolean', 'initialize': '(0)'}), '(self.model.Time, within=en.Boolean, initialize=0)\n', (8180, 8230), True, 'import pyomo.environ as en\n'), ((14743, 14796), 'pyomo.environ.Constraint', 'en.Constraint', (['self.model.Time'], {'rule': 'bool_cd_rule_one'}), '(self.model.Time, rule=bool_cd_rule_one)\n', (14756, 14796), True, 'import pyomo.environ as en\n'), ((14831, 14884), 'pyomo.environ.Constraint', 'en.Constraint', (['self.model.Time'], {'rule': 'bool_cd_rule_two'}), '(self.model.Time, rule=bool_cd_rule_two)\n', (14844, 14884), True, 'import pyomo.environ as en\n'), ((14921, 14976), 'pyomo.environ.Constraint', 'en.Constraint', (['self.model.Time'], {'rule': 'bool_cd_rule_three'}), '(self.model.Time, rule=bool_cd_rule_three)\n', (14934, 14976), True, 'import pyomo.environ as en\n'), ((15012, 15066), 'pyomo.environ.Constraint', 'en.Constraint', (['self.model.Time'], {'rule': 'bool_cd_rule_four'}), '(self.model.Time, rule=bool_cd_rule_four)\n', (15025, 15066), True, 'import pyomo.environ as en\n'), ((15102, 15156), 'pyomo.environ.Constraint', 'en.Constraint', (['self.model.Time'], {'rule': 'bool_cd_rule_five'}), '(self.model.Time, rule=bool_cd_rule_five)\n', (15115, 15156), True, 'import pyomo.environ as en\n'), ((38320, 38398), 'pyomo.environ.Constraint', 'en.Constraint', (['self.model.Time'], {'rule': 'electrical_feasability_discharge_grid_one'}), '(self.model.Time, rule=electrical_feasability_discharge_grid_one)\n', (38333, 38398), True, 'import pyomo.environ as en\n'), ((38433, 38511), 'pyomo.environ.Constraint', 'en.Constraint', (['self.model.Time'], {'rule': 'electrical_feasability_discharge_grid_two'}), '(self.model.Time, rule=electrical_feasability_discharge_grid_two)\n', (38446, 38511), True, 'import pyomo.environ as en\n'), ((40935, 41010), 'pyomo.environ.Constraint', 'en.Constraint', (['self.model.Time'], {'rule': 'electrical_feasability_charge_grid_one'}), '(self.model.Time, rule=electrical_feasability_charge_grid_one)\n', (40948, 41010), True, 'import pyomo.environ as en\n'), ((41045, 41120), 'pyomo.environ.Constraint', 'en.Constraint', (['self.model.Time'], {'rule': 'electrical_feasability_charge_grid_two'}), '(self.model.Time, rule=electrical_feasability_charge_grid_two)\n', (41058, 41120), True, 'import pyomo.environ as en\n'), ((42765, 42824), 'pyomo.environ.Constraint', 'en.Constraint', (['self.model.Time'], {'rule': 'import_export_rule_one'}), '(self.model.Time, rule=import_export_rule_one)\n', (42778, 42824), True, 'import pyomo.environ as en\n'), ((42857, 42916), 'pyomo.environ.Constraint', 'en.Constraint', (['self.model.Time'], {'rule': 'import_export_rule_two'}), '(self.model.Time, rule=import_export_rule_two)\n', (42870, 42916), True, 'import pyomo.environ as en\n'), ((3442, 3487), 'os.environ.get', 'os.environ.get', (['"""OPTIMISER_ENGINE_EXECUTABLE"""'], {}), "('OPTIMISER_ENGINE_EXECUTABLE')\n", (3456, 3487), False, 'import os\n'), ((17820, 17906), 'pyomo.opt.SolverFactory', 'SolverFactory', (['self.optimiser_engine'], {'executable': 'self.optimiser_engine_executable'}), '(self.optimiser_engine, executable=self.\n optimiser_engine_executable)\n', (17833, 17906), False, 'from pyomo.opt import SolverFactory, SolverStatus, TerminationCondition\n'), ((17942, 17978), 'pyomo.opt.SolverFactory', 'SolverFactory', (['self.optimiser_engine'], {}), '(self.optimiser_engine)\n', (17955, 17978), False, 'from pyomo.opt import SolverFactory, SolverStatus, TerminationCondition\n'), ((18322, 18443), 'logging.warning', 'logging.warning', (['"""WARNING - Loading a SolverResults object with an \'aborted\' status, but containing a solution"""'], {}), '(\n "WARNING - Loading a SolverResults object with an \'aborted\' status, but containing a solution"\n )\n', (18337, 18443), False, 'import logging\n')] |
"""Visualize convergence"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from .test_functions import simple_nonconvex_function, ackley
from .visualisation import FIGSIZE
def max_distances(history, f):
data = {'max_distance': max_distances}
stats = pd.DataFrame(data)
plt.gcf().clear()
stats.max_distance.plot(
logy=True)
plt.show()
def visualize(history, f):
data = {}
plot_range = np.arange(-10, 10, 0.0001)
values = f(plot_range)
min_val, min_loc = values.min(), values.argmin()
location_hist, _ = zip(*history)
location_hist = list(location_hist)
while np.all(location_hist[-1] == location_hist[-2]):
location_hist.pop(-1)
locs = np.array(location_hist).reshape(len(location_hist), -1)
# Norm the values to have value 0 at the min
mean_vals = f(locs).mean(axis=1)
data['mean_values'] = mean_vals
# if f == ackley or f == simple_nonconvex_function:
# if f == simple_nonconvex_function:
# def convex_f(x):
# return x**2
# mean_vals_convex = convex_f(locs).mean(axis=1) - min_val
# data['mean_values_convex'] = mean_vals_convex
# elif f == ackley:
# # Create convex hull on compact set
# left_x, right_x = np.floor(locs.min()), np.ceil(locs.max())
# left_x = min(-20, left_x)
# right_x = max(20, right_x)
# left_y, right_y = f(left_x), f(right_x)
# min_y = f(0)
# def convex_f(x):
# return np.where(
# x > 0,
# x * (right_y / right_x) + min_y,
# x * (left_y / left_x) + min_y)
# mean_vals_convex = convex_f(locs).mean(axis=1) - min_val
# data['mean_values_convex'] = mean_vals_convex
# max_distances = locs.max(axis=1) - locs.min(axis=1)
fig = plt.figure(figsize=FIGSIZE)
ax = fig.add_subplot(111)
df = pd.DataFrame(data)
ax = df.plot(
ax=ax,
logy=True
)
plt.tight_layout()
# win_size = 100
# df.mean_values.rolling(win_size, center=True).mean().plot(
# ax=ax,
# color='blue',
# alpha=0.5,
# linestyle='dashed')
# df.mean_values_convex.rolling(win_size, center=True).mean().plot(
# ax=ax,
# color='orange',
# alpha=0.5,
# linestyle='dashed')
return ax
if False:
data = {
'iteration': np.concatenate((
np.arange(locs.shape[0]), np.arange(locs.shape[0]))),
'mean_values': np.concatenate((mean_vals, mean_vals_convex)),
'convex_hull': ([False for _ in mean_vals] +
[True for _ in mean_vals_convex]),
}
df = pd.DataFrame(data)
df['log_mean_values'] = np.log(df.mean_values)
# df.iteration = df.iteration // 10 * 10
sns.set()
g = sns.lmplot(
data=df,
x='iteration',
y='log_mean_values',
hue='convex_hull',
)
# ax.set(yscale='log')
return g
| [
"seaborn.lmplot",
"seaborn.set",
"matplotlib.pyplot.gcf",
"numpy.log",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"numpy.concatenate",
"pandas.DataFrame",
"numpy.all",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((307, 325), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (319, 325), True, 'import pandas as pd\n'), ((401, 411), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (409, 411), True, 'import matplotlib.pyplot as plt\n'), ((472, 498), 'numpy.arange', 'np.arange', (['(-10)', '(10)', '(0.0001)'], {}), '(-10, 10, 0.0001)\n', (481, 498), True, 'import numpy as np\n'), ((667, 713), 'numpy.all', 'np.all', (['(location_hist[-1] == location_hist[-2])'], {}), '(location_hist[-1] == location_hist[-2])\n', (673, 713), True, 'import numpy as np\n'), ((1940, 1967), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'FIGSIZE'}), '(figsize=FIGSIZE)\n', (1950, 1967), True, 'import matplotlib.pyplot as plt\n'), ((2007, 2025), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (2019, 2025), True, 'import pandas as pd\n'), ((2087, 2105), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2103, 2105), True, 'import matplotlib.pyplot as plt\n'), ((2828, 2846), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (2840, 2846), True, 'import pandas as pd\n'), ((2879, 2901), 'numpy.log', 'np.log', (['df.mean_values'], {}), '(df.mean_values)\n', (2885, 2901), True, 'import numpy as np\n'), ((2959, 2968), 'seaborn.set', 'sns.set', ([], {}), '()\n', (2966, 2968), True, 'import seaborn as sns\n'), ((2981, 3055), 'seaborn.lmplot', 'sns.lmplot', ([], {'data': 'df', 'x': '"""iteration"""', 'y': '"""log_mean_values"""', 'hue': '"""convex_hull"""'}), "(data=df, x='iteration', y='log_mean_values', hue='convex_hull')\n", (2991, 3055), True, 'import seaborn as sns\n'), ((331, 340), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (338, 340), True, 'import matplotlib.pyplot as plt\n'), ((756, 779), 'numpy.array', 'np.array', (['location_hist'], {}), '(location_hist)\n', (764, 779), True, 'import numpy as np\n'), ((2637, 2682), 'numpy.concatenate', 'np.concatenate', (['(mean_vals, mean_vals_convex)'], {}), '((mean_vals, mean_vals_convex))\n', (2651, 2682), True, 'import numpy as np\n'), ((2556, 2580), 'numpy.arange', 'np.arange', (['locs.shape[0]'], {}), '(locs.shape[0])\n', (2565, 2580), True, 'import numpy as np\n'), ((2582, 2606), 'numpy.arange', 'np.arange', (['locs.shape[0]'], {}), '(locs.shape[0])\n', (2591, 2606), True, 'import numpy as np\n')] |
import numpy as np
from numpy import linalg as LA
from sklearn.decomposition import PCA
from utils import pulse_helper
np.set_printoptions(suppress=True)
if __name__ == '__main__':
TEN_BITS_ADC_VALUE = 1023
pedestal = 0
dimension = 7
number_of_data = 20
qtd_for_training = 10
qtd_for_testing = number_of_data - qtd_for_training
noise = pedestal + np.random.randn(number_of_data, dimension)
# Getting data from boundaries
noise_training = noise[:qtd_for_training, :]
noise_testing = noise[qtd_for_testing:, :]
noise_training = np.matrix([
[-0.8756796, -0.9904594, 0.0763564, 0.2866689, -0.8491597, -2.6331943, 0.4875299],
[0.5691059, 1.0500695, 0.2050344, 0.5682747, 0.9148819, 0.9840557, 0.0631626],
[-0.8314480, -1.2014296, 0.0640685, 1.0649667, 0.3001502, 0.7921802, 0.0182123],
[-1.6749044, 0.9083217, 0.4855328, -0.8777319, -0.4533996, 0.4545650, 0.4935619],
[-2.0737103, -0.3444838, 2.6205070, -2.3721530, 0.3851043, 0.2318063, 0.6319781],
[-0.1389740, -0.7860030, -0.0916941, 2.4410314, 1.6296622, -0.3318915, 0.1720283],
[1.5643478, -0.4838364, -1.3568845, -0.0097646, 0.3173777, 1.0363776, 1.1373946],
[1.2958982, -0.4076244, 0.0509913, -0.3809160, -0.0055843, -0.5429556, -1.2209100],
[-0.4553984, 0.2447099, -0.0911029, 1.5816856, -0.1473872, -0.2368131, 0.7251548],
[0.8811581, 0.2740093, 0.2105274, 0.0017615, -0.1050384, 0.5999477, -0.4440812]
])
noise_testing = np.matrix([
[0.9431525, 0.4929578, 0.8702650, -0.2384978, 1.1779368, 0.9613488, 0.7198087],
[-0.1507800, -0.8556539, -0.3505294, 0.4385733, 0.2599012, 0.6289688, 1.1821911],
[-1.1579471, -0.7761330, 0.5048862, -0.7994381, 0.5862172, -1.0139671, 0.0973860],
[-0.8073489, 0.6542282, 0.2723901, -0.5065714, 1.4904722, 0.4038073, -0.9281103],
[0.7330344, -0.5733226, 0.2751025, 2.3656258, -0.1680237, -0.6095974, -0.9048640],
[-1.0540677, 0.2591932, 0.5585528, 0.5630835, 0.3494722, 0.3133169, 0.1195288],
[1.0160440, 1.0072891, 0.2649281, 0.0984453, 0.8794560, 0.7006237, -0.4429727],
[-0.9385259, 1.1363805, 0.3755675, -0.3434197, -0.2836825, 0.3928002, -0.5461819],
[-0.0046605, 1.2195139, 0.0973187, 1.4662556, -1.0468390, 0.7239156, 0.5695964],
[0.7419353, -0.1169670, 0.8664425, 0.4642078, 0.2907896, -1.3489066, 0.3598890]
])
amplitude = np.zeros(qtd_for_testing)
signal_testing = np.zeros((qtd_for_testing, dimension))
for i in range(0, qtd_for_testing):
amplitude[i] = TEN_BITS_ADC_VALUE * np.random.random(1)
signal_testing[i, :] = pedestal + np.random.randn(1, dimension) + \
np.multiply(amplitude[i], pulse_helper.get_jitter_pulse())
amplitude = [775.902, 10.351, 771.456, 1019.714, 512.243, 396.846, 65.751, 68.398, 439.107, 913.696]
signal_testing = np.matrix([
[0.236776, 11.798491, 352.442790, 777.724908, 436.837438, 115.284537, 31.993758],
[-0.868811, -0.044646, 3.415924, 9.414231, 6.257210, 2.728463, -0.782330],
[0.047594, 14.065240, 348.083577, 772.605168, 432.601323, 114.304198, 33.564886],
[0.696711, 19.768088, 461.249975, 1019.274032, 575.525264, 152.047669, 44.799316],
[-0.508484, 7.670365, 233.040694, 512.310657, 288.821356, 74.425243, 21.451117],
[-0.970319, 5.826021, 181.056663, 398.103175, 223.515092, 59.347880, 18.228919],
[0.413107, 1.553945, 29.551838, 64.758933, 35.236955, 10.107259, 3.132132],
[0.444259, 2.005030, 30.434815, 68.274549, 37.801127, 10.440632, 2.696392],
[-0.177446, 8.735986, 197.462098, 437.059435, 247.049569, 63.410463, 17.028772],
[-1.036739, 15.283146, 413.917933, 912.159329, 513.726378, 135.862961, 38.262066]
])
# Branqueamento
# noise_train_cov = np.cov(noise_training)
noise_train_cov = np.matrix([
[1.5238484, 0.0343165, -0.8543985, 0.4900083, 0.1731590, 0.2918901, -0.3036230],
[0.0343165, 0.5913561, 0.0801816, -0.2130887, -0.0283764, 0.3523750, 0.0020615],
[-0.8543985, 0.0801816, 0.9541332, -0.8122446, -0.0099779, -0.0503286, -0.0313772],
[0.4900083, -0.2130887, -0.8122446, 1.7790657, 0.3740738, -0.1521554, -0.0214180],
[0.1731590, -0.0283764, -0.0099779, 0.3740738, 0.4885224, 0.3277632, -0.0170296],
[0.2918901, 0.3523750, -0.0503286, -0.1521554, 0.3277632, 1.1858336, 0.0485573],
[-0.3036230, 0.0020615, -0.0313772, -0.0214180, -0.0170296, 0.0485573, 0.4439914]
])
[D, V] = LA.eig(noise_train_cov)
# TODO Discover why I cant do it after np.diag for whitening filter.
# If I do it, I am getting a diag matrix with other elements as inf.
D = D**(-.5)
# eig returns D as an array, we need to transform it into a diagonal matrix
D = np.diag(D)
W = D * np.transpose(V)
W = np.matrix([
[-1.6748373, 0.5175131, -2.9246219, -1.3834463, 2.2066308, -0.6199292, -1.4226427],
[0.6572618, 0.6355380, 0.3928698, -0.2736311, 1.1956474, -0.9112701, 1.1674937],
[-0.0684082, 1.2086504, 0.2564293, 0.3959205, -0.2316473, -0.3087873, -0.6167130],
[0.2274185, -0.4449974, 0.6545361, 0.0488708, 0.6400391, -0.0656841, -0.8013342],
[-0.4987594, 0.0996750, 0.1633758, 0.4668367, 0.3917242, 0.4615647, 0.2400612],
[0.3093050, 0.2629946, 0.0019590, -0.3352125, 0.0916674, 0.5640587, -0.0501276],
[0.3268204, -0.0336678, -0.2916859, 0.3680302, 0.0858922, 0.0387934, -0.0389346]
])
W_t = np.transpose(W)
# PCA Part
pure_signal = np.zeros((qtd_for_testing, dimension))
for i in range(0, qtd_for_testing):
pure_signal[i, :] = np.multiply(
TEN_BITS_ADC_VALUE * np.random.random(1),
pulse_helper.get_jitter_pulse()
)
pure_signal = np.matrix([
[0.00089477, 0.66899891, 17.57100806, 38.83567740, 21.87640893, 5.79952588, 1.64507153],
[0.00463039, 3.46201728, 90.92859833, 200.97160612, 113.20871253, 30.01209480, 8.51311704],
[0.00371731, 2.77933154, 72.99811097, 161.34140276, 90.88474157, 24.09391838, 6.83438955],
[0.01695149, 12.67418518, 332.88276802, 735.74195320, 414.44859243, 109.87202458, 31.16588199],
[0.00950488, 7.10654984, 186.65089286, 412.53830380, 232.38571430, 61.60640760, 17.47504004],
[0.01038399, 7.76383156, 203.91415341, 450.69379353, 253.87896875, 67.30435766, 19.09129896],
[0.00079089, 0.59132464, 15.53092201, 34.32665187, 19.33644329, 5.12617056, 1.45407011],
[0.02063659, 15.42943668, 405.24842555, 895.68549890, 504.54591133, 133.75719398, 37.94105860],
[0.00129928, 0.97144042, 25.51452205, 56.39253843, 31.76631165, 8.42137973, 2.38877665],
[0.01686500, 12.60951796, 331.18430740, 731.98799279, 412.33396025, 109.31142690, 31.00686498]
])
n_pca_components = dimension
pca = PCA(n_components=n_pca_components)
coeff = pca.fit(pure_signal * W_t).components_
Y = pca.explained_variance_
# stochastic filter params
# ddof=1 to use Sampled data variance -> N-1
variance = np.var(noise_training[:, 3], ddof=1)
reference_pulse = [0.0000, 0.0172, 0.4524, 1.0000, 0.5633, 0.1493, 0.0424]
bleached_reference_pulse = reference_pulse * W_t
optimal_reference_pulse = bleached_reference_pulse * \
np.transpose(coeff[:, :n_pca_components])
optimal_noise = ((noise_testing - pedestal) * W_t) * np.transpose(coeff[:, :n_pca_components])
optimal_signal = ((signal_testing - pedestal) * W_t) * np.transpose(coeff[:, :n_pca_components])
No = variance * 2
h1 = np.zeros((dimension, dimension))
h2 = np.zeros((dimension, dimension))
for i in range(0, n_pca_components):
h1 = h1 + (Y[i] / (Y[i] + variance)) * (np.transpose(np.asmatrix(coeff[i, :])) * coeff[i, :])
h2 = h2 + (1.0 / (Y[i] + variance)) * (np.transpose(np.asmatrix(coeff[i, :])) * coeff[i, :])
IR_noise = np.zeros((len(noise_testing), 1))
IR_signal = np.zeros((len(signal_testing), 1))
for ev in range(0, len(noise_testing)):
IR_noise[ev] = (1.0 / No) * np.transpose((
(optimal_noise[ev, :] * (coeff[:, :n_pca_components])) * h1 *
np.transpose(optimal_noise[ev, :] * (coeff[:, :n_pca_components]))
))
for ev in range(0, len(signal_testing)):
IR_signal[ev] = (1.0 / No) * np.transpose((
(optimal_signal[ev, :] * (coeff[:, :n_pca_components])) * h1 *
np.transpose(optimal_signal[ev, :] * (coeff[:, :n_pca_components]))
))
ID_noise = np.zeros((len(noise_testing), 1))
ID_signal = np.zeros((len(signal_testing), 1))
for ev in range(0, len(noise_testing)):
ID_noise[ev] = ((optimal_reference_pulse * coeff[:, :n_pca_components]) * h2 *
np.transpose(optimal_noise[ev, :] * coeff[:, :n_pca_components]))
for ev in range(0, len(signal_testing)):
ID_signal[ev] = ((optimal_reference_pulse * coeff[:, :n_pca_components]) * h2 *
np.transpose(optimal_signal[ev, :] * coeff[:, :n_pca_components]))
# Matched Filter estimatives
estimated_noise = ID_noise + IR_noise
estimated_signal = ID_signal + IR_signal
test = np.matrix([
[-0.784071, 0.597765, -0.063745, 0.110153, 0.061466, -0.082475, 0.033644],
[0.244821, 0.182834, 0.514858, 0.442823, 0.432092, -0.506405, -0.048100],
[0.160964, 0.424736, 0.634737, -0.539170, -0.226289, 0.203796, 0.085756],
[0.319035, 0.417696, -0.203085, 0.233144, 0.383461, 0.652085, -0.236409],
[0.410766, 0.486923, -0.435696, -0.011337, -0.437815, -0.438568, -0.142103],
[-0.095613, -0.081021, 0.309344, 0.568404, -0.625782, 0.234851, -0.344613],
[0.140470, 0.103537, -0.034607, 0.351428, -0.167281, 0.150022, 0.891269]
]).T
mEstimacao = np.matrix([
2.050257740, 0.000019448, -0.000026811, 0.000017090, 0.000031557, -0.000072463, 0.000027790
])
# Amplitue estimative
b1 = coeff[:, :n_pca_components].T.dot(coeff[:, :n_pca_components])
b2 = (1.0 / No) * (
coeff[:, :n_pca_components].T * h1 *
coeff[:, :n_pca_components]
)
b3 = (optimal_reference_pulse * coeff[:, :n_pca_components]) * h2 * coeff[:, :n_pca_components]
# ampRuido = zeros(size(ruidoTes,1),1);
# ampSinal = zeros(size(sinalTes,1),1);
# a = (1/No)*((mEstimacao*COEFF(:,1:N)')*h1*(mEstimacao*COEFF(:,1:N)')');
# b = (mEstimacao*COEFF(:,1:N)')*h2*(mEstimacao*COEFF(:,1:N)')';
# cs=0;
# cr=0;
# for i=1:size(sinalTes,1)
# ra = b*b+4*a*FCestSinal(i);
# if ra<0
# ra=0;
# cs=cs+1;
# end
# ampSinal(i) = (-b+sqrt(ra))/(2*a); % amplitude do sinal usando a saida do filtro casado
# end
# for i=1:size(ruidoTes,1)
# ra = b*b+4*a*FCestRuido(i);
# if ra<0
# ra=0;
# cr=cr+1;
# end
# ampRuido(i) = (-b+sqrt(ra))/(2*a); % amplitude do ruido usando a saida do filtro casado
# end
# from pudb import set_trace; set_trace()
print('==================== b2')
print(b2)
print('====================')
# print('==================== np.transpose(coeff[:, :n_pca_components])')
# print(np.transpose(coeff[:, :n_pca_components]))
print('====================')
# print('==================== coeff[:, :n_pca_components]')
# print(coeff[:, :n_pca_components])
print('====================')
print('====================')
print('====================')
print('====================')
print('====================')
print('====================')
print('====================')
print('====================')
print('====================')
print('====================')
print('====================')
| [
"numpy.linalg.eig",
"sklearn.decomposition.PCA",
"numpy.random.random",
"numpy.asmatrix",
"utils.pulse_helper.get_jitter_pulse",
"numpy.diag",
"numpy.zeros",
"numpy.matrix",
"numpy.transpose",
"numpy.random.randn",
"numpy.var",
"numpy.set_printoptions"
] | [((121, 155), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)'}), '(suppress=True)\n', (140, 155), True, 'import numpy as np\n'), ((574, 1451), 'numpy.matrix', 'np.matrix', (['[[-0.8756796, -0.9904594, 0.0763564, 0.2866689, -0.8491597, -2.6331943, \n 0.4875299], [0.5691059, 1.0500695, 0.2050344, 0.5682747, 0.9148819, \n 0.9840557, 0.0631626], [-0.831448, -1.2014296, 0.0640685, 1.0649667, \n 0.3001502, 0.7921802, 0.0182123], [-1.6749044, 0.9083217, 0.4855328, -\n 0.8777319, -0.4533996, 0.454565, 0.4935619], [-2.0737103, -0.3444838, \n 2.620507, -2.372153, 0.3851043, 0.2318063, 0.6319781], [-0.138974, -\n 0.786003, -0.0916941, 2.4410314, 1.6296622, -0.3318915, 0.1720283], [\n 1.5643478, -0.4838364, -1.3568845, -0.0097646, 0.3173777, 1.0363776, \n 1.1373946], [1.2958982, -0.4076244, 0.0509913, -0.380916, -0.0055843, -\n 0.5429556, -1.22091], [-0.4553984, 0.2447099, -0.0911029, 1.5816856, -\n 0.1473872, -0.2368131, 0.7251548], [0.8811581, 0.2740093, 0.2105274, \n 0.0017615, -0.1050384, 0.5999477, -0.4440812]]'], {}), '([[-0.8756796, -0.9904594, 0.0763564, 0.2866689, -0.8491597, -\n 2.6331943, 0.4875299], [0.5691059, 1.0500695, 0.2050344, 0.5682747, \n 0.9148819, 0.9840557, 0.0631626], [-0.831448, -1.2014296, 0.0640685, \n 1.0649667, 0.3001502, 0.7921802, 0.0182123], [-1.6749044, 0.9083217, \n 0.4855328, -0.8777319, -0.4533996, 0.454565, 0.4935619], [-2.0737103, -\n 0.3444838, 2.620507, -2.372153, 0.3851043, 0.2318063, 0.6319781], [-\n 0.138974, -0.786003, -0.0916941, 2.4410314, 1.6296622, -0.3318915, \n 0.1720283], [1.5643478, -0.4838364, -1.3568845, -0.0097646, 0.3173777, \n 1.0363776, 1.1373946], [1.2958982, -0.4076244, 0.0509913, -0.380916, -\n 0.0055843, -0.5429556, -1.22091], [-0.4553984, 0.2447099, -0.0911029, \n 1.5816856, -0.1473872, -0.2368131, 0.7251548], [0.8811581, 0.2740093, \n 0.2105274, 0.0017615, -0.1050384, 0.5999477, -0.4440812]])\n', (583, 1451), True, 'import numpy as np\n'), ((1601, 2471), 'numpy.matrix', 'np.matrix', (['[[0.9431525, 0.4929578, 0.870265, -0.2384978, 1.1779368, 0.9613488, \n 0.7198087], [-0.15078, -0.8556539, -0.3505294, 0.4385733, 0.2599012, \n 0.6289688, 1.1821911], [-1.1579471, -0.776133, 0.5048862, -0.7994381, \n 0.5862172, -1.0139671, 0.097386], [-0.8073489, 0.6542282, 0.2723901, -\n 0.5065714, 1.4904722, 0.4038073, -0.9281103], [0.7330344, -0.5733226, \n 0.2751025, 2.3656258, -0.1680237, -0.6095974, -0.904864], [-1.0540677, \n 0.2591932, 0.5585528, 0.5630835, 0.3494722, 0.3133169, 0.1195288], [\n 1.016044, 1.0072891, 0.2649281, 0.0984453, 0.879456, 0.7006237, -\n 0.4429727], [-0.9385259, 1.1363805, 0.3755675, -0.3434197, -0.2836825, \n 0.3928002, -0.5461819], [-0.0046605, 1.2195139, 0.0973187, 1.4662556, -\n 1.046839, 0.7239156, 0.5695964], [0.7419353, -0.116967, 0.8664425, \n 0.4642078, 0.2907896, -1.3489066, 0.359889]]'], {}), '([[0.9431525, 0.4929578, 0.870265, -0.2384978, 1.1779368, \n 0.9613488, 0.7198087], [-0.15078, -0.8556539, -0.3505294, 0.4385733, \n 0.2599012, 0.6289688, 1.1821911], [-1.1579471, -0.776133, 0.5048862, -\n 0.7994381, 0.5862172, -1.0139671, 0.097386], [-0.8073489, 0.6542282, \n 0.2723901, -0.5065714, 1.4904722, 0.4038073, -0.9281103], [0.7330344, -\n 0.5733226, 0.2751025, 2.3656258, -0.1680237, -0.6095974, -0.904864], [-\n 1.0540677, 0.2591932, 0.5585528, 0.5630835, 0.3494722, 0.3133169, \n 0.1195288], [1.016044, 1.0072891, 0.2649281, 0.0984453, 0.879456, \n 0.7006237, -0.4429727], [-0.9385259, 1.1363805, 0.3755675, -0.3434197, \n -0.2836825, 0.3928002, -0.5461819], [-0.0046605, 1.2195139, 0.0973187, \n 1.4662556, -1.046839, 0.7239156, 0.5695964], [0.7419353, -0.116967, \n 0.8664425, 0.4642078, 0.2907896, -1.3489066, 0.359889]])\n', (1610, 2471), True, 'import numpy as np\n'), ((2616, 2641), 'numpy.zeros', 'np.zeros', (['qtd_for_testing'], {}), '(qtd_for_testing)\n', (2624, 2641), True, 'import numpy as np\n'), ((2663, 2701), 'numpy.zeros', 'np.zeros', (['(qtd_for_testing, dimension)'], {}), '((qtd_for_testing, dimension))\n', (2671, 2701), True, 'import numpy as np\n'), ((3082, 3941), 'numpy.matrix', 'np.matrix', (['[[0.236776, 11.798491, 352.44279, 777.724908, 436.837438, 115.284537, \n 31.993758], [-0.868811, -0.044646, 3.415924, 9.414231, 6.25721, \n 2.728463, -0.78233], [0.047594, 14.06524, 348.083577, 772.605168, \n 432.601323, 114.304198, 33.564886], [0.696711, 19.768088, 461.249975, \n 1019.274032, 575.525264, 152.047669, 44.799316], [-0.508484, 7.670365, \n 233.040694, 512.310657, 288.821356, 74.425243, 21.451117], [-0.970319, \n 5.826021, 181.056663, 398.103175, 223.515092, 59.34788, 18.228919], [\n 0.413107, 1.553945, 29.551838, 64.758933, 35.236955, 10.107259, \n 3.132132], [0.444259, 2.00503, 30.434815, 68.274549, 37.801127, \n 10.440632, 2.696392], [-0.177446, 8.735986, 197.462098, 437.059435, \n 247.049569, 63.410463, 17.028772], [-1.036739, 15.283146, 413.917933, \n 912.159329, 513.726378, 135.862961, 38.262066]]'], {}), '([[0.236776, 11.798491, 352.44279, 777.724908, 436.837438, \n 115.284537, 31.993758], [-0.868811, -0.044646, 3.415924, 9.414231, \n 6.25721, 2.728463, -0.78233], [0.047594, 14.06524, 348.083577, \n 772.605168, 432.601323, 114.304198, 33.564886], [0.696711, 19.768088, \n 461.249975, 1019.274032, 575.525264, 152.047669, 44.799316], [-0.508484,\n 7.670365, 233.040694, 512.310657, 288.821356, 74.425243, 21.451117], [-\n 0.970319, 5.826021, 181.056663, 398.103175, 223.515092, 59.34788, \n 18.228919], [0.413107, 1.553945, 29.551838, 64.758933, 35.236955, \n 10.107259, 3.132132], [0.444259, 2.00503, 30.434815, 68.274549, \n 37.801127, 10.440632, 2.696392], [-0.177446, 8.735986, 197.462098, \n 437.059435, 247.049569, 63.410463, 17.028772], [-1.036739, 15.283146, \n 413.917933, 912.159329, 513.726378, 135.862961, 38.262066]])\n', (3091, 3941), True, 'import numpy as np\n'), ((4278, 4895), 'numpy.matrix', 'np.matrix', (['[[1.5238484, 0.0343165, -0.8543985, 0.4900083, 0.173159, 0.2918901, -\n 0.303623], [0.0343165, 0.5913561, 0.0801816, -0.2130887, -0.0283764, \n 0.352375, 0.0020615], [-0.8543985, 0.0801816, 0.9541332, -0.8122446, -\n 0.0099779, -0.0503286, -0.0313772], [0.4900083, -0.2130887, -0.8122446,\n 1.7790657, 0.3740738, -0.1521554, -0.021418], [0.173159, -0.0283764, -\n 0.0099779, 0.3740738, 0.4885224, 0.3277632, -0.0170296], [0.2918901, \n 0.352375, -0.0503286, -0.1521554, 0.3277632, 1.1858336, 0.0485573], [-\n 0.303623, 0.0020615, -0.0313772, -0.021418, -0.0170296, 0.0485573, \n 0.4439914]]'], {}), '([[1.5238484, 0.0343165, -0.8543985, 0.4900083, 0.173159, \n 0.2918901, -0.303623], [0.0343165, 0.5913561, 0.0801816, -0.2130887, -\n 0.0283764, 0.352375, 0.0020615], [-0.8543985, 0.0801816, 0.9541332, -\n 0.8122446, -0.0099779, -0.0503286, -0.0313772], [0.4900083, -0.2130887,\n -0.8122446, 1.7790657, 0.3740738, -0.1521554, -0.021418], [0.173159, -\n 0.0283764, -0.0099779, 0.3740738, 0.4885224, 0.3277632, -0.0170296], [\n 0.2918901, 0.352375, -0.0503286, -0.1521554, 0.3277632, 1.1858336, \n 0.0485573], [-0.303623, 0.0020615, -0.0313772, -0.021418, -0.0170296, \n 0.0485573, 0.4439914]])\n', (4287, 4895), True, 'import numpy as np\n'), ((5039, 5062), 'numpy.linalg.eig', 'LA.eig', (['noise_train_cov'], {}), '(noise_train_cov)\n', (5045, 5062), True, 'from numpy import linalg as LA\n'), ((5316, 5326), 'numpy.diag', 'np.diag', (['D'], {}), '(D)\n', (5323, 5326), True, 'import numpy as np\n'), ((5365, 5984), 'numpy.matrix', 'np.matrix', (['[[-1.6748373, 0.5175131, -2.9246219, -1.3834463, 2.2066308, -0.6199292, -\n 1.4226427], [0.6572618, 0.635538, 0.3928698, -0.2736311, 1.1956474, -\n 0.9112701, 1.1674937], [-0.0684082, 1.2086504, 0.2564293, 0.3959205, -\n 0.2316473, -0.3087873, -0.616713], [0.2274185, -0.4449974, 0.6545361, \n 0.0488708, 0.6400391, -0.0656841, -0.8013342], [-0.4987594, 0.099675, \n 0.1633758, 0.4668367, 0.3917242, 0.4615647, 0.2400612], [0.309305, \n 0.2629946, 0.001959, -0.3352125, 0.0916674, 0.5640587, -0.0501276], [\n 0.3268204, -0.0336678, -0.2916859, 0.3680302, 0.0858922, 0.0387934, -\n 0.0389346]]'], {}), '([[-1.6748373, 0.5175131, -2.9246219, -1.3834463, 2.2066308, -\n 0.6199292, -1.4226427], [0.6572618, 0.635538, 0.3928698, -0.2736311, \n 1.1956474, -0.9112701, 1.1674937], [-0.0684082, 1.2086504, 0.2564293, \n 0.3959205, -0.2316473, -0.3087873, -0.616713], [0.2274185, -0.4449974, \n 0.6545361, 0.0488708, 0.6400391, -0.0656841, -0.8013342], [-0.4987594, \n 0.099675, 0.1633758, 0.4668367, 0.3917242, 0.4615647, 0.2400612], [\n 0.309305, 0.2629946, 0.001959, -0.3352125, 0.0916674, 0.5640587, -\n 0.0501276], [0.3268204, -0.0336678, -0.2916859, 0.3680302, 0.0858922, \n 0.0387934, -0.0389346]])\n', (5374, 5984), True, 'import numpy as np\n'), ((6102, 6117), 'numpy.transpose', 'np.transpose', (['W'], {}), '(W)\n', (6114, 6117), True, 'import numpy as np\n'), ((6152, 6190), 'numpy.zeros', 'np.zeros', (['(qtd_for_testing, dimension)'], {}), '((qtd_for_testing, dimension))\n', (6160, 6190), True, 'import numpy as np\n'), ((6459, 7449), 'numpy.matrix', 'np.matrix', (['[[0.00089477, 0.66899891, 17.57100806, 38.8356774, 21.87640893, 5.79952588,\n 1.64507153], [0.00463039, 3.46201728, 90.92859833, 200.97160612, \n 113.20871253, 30.0120948, 8.51311704], [0.00371731, 2.77933154, \n 72.99811097, 161.34140276, 90.88474157, 24.09391838, 6.83438955], [\n 0.01695149, 12.67418518, 332.88276802, 735.7419532, 414.44859243, \n 109.87202458, 31.16588199], [0.00950488, 7.10654984, 186.65089286, \n 412.5383038, 232.3857143, 61.6064076, 17.47504004], [0.01038399, \n 7.76383156, 203.91415341, 450.69379353, 253.87896875, 67.30435766, \n 19.09129896], [0.00079089, 0.59132464, 15.53092201, 34.32665187, \n 19.33644329, 5.12617056, 1.45407011], [0.02063659, 15.42943668, \n 405.24842555, 895.6854989, 504.54591133, 133.75719398, 37.9410586], [\n 0.00129928, 0.97144042, 25.51452205, 56.39253843, 31.76631165, \n 8.42137973, 2.38877665], [0.016865, 12.60951796, 331.1843074, \n 731.98799279, 412.33396025, 109.3114269, 31.00686498]]'], {}), '([[0.00089477, 0.66899891, 17.57100806, 38.8356774, 21.87640893, \n 5.79952588, 1.64507153], [0.00463039, 3.46201728, 90.92859833, \n 200.97160612, 113.20871253, 30.0120948, 8.51311704], [0.00371731, \n 2.77933154, 72.99811097, 161.34140276, 90.88474157, 24.09391838, \n 6.83438955], [0.01695149, 12.67418518, 332.88276802, 735.7419532, \n 414.44859243, 109.87202458, 31.16588199], [0.00950488, 7.10654984, \n 186.65089286, 412.5383038, 232.3857143, 61.6064076, 17.47504004], [\n 0.01038399, 7.76383156, 203.91415341, 450.69379353, 253.87896875, \n 67.30435766, 19.09129896], [0.00079089, 0.59132464, 15.53092201, \n 34.32665187, 19.33644329, 5.12617056, 1.45407011], [0.02063659, \n 15.42943668, 405.24842555, 895.6854989, 504.54591133, 133.75719398, \n 37.9410586], [0.00129928, 0.97144042, 25.51452205, 56.39253843, \n 31.76631165, 8.42137973, 2.38877665], [0.016865, 12.60951796, \n 331.1843074, 731.98799279, 412.33396025, 109.3114269, 31.00686498]])\n', (6468, 7449), True, 'import numpy as np\n'), ((7712, 7746), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'n_pca_components'}), '(n_components=n_pca_components)\n', (7715, 7746), False, 'from sklearn.decomposition import PCA\n'), ((7926, 7962), 'numpy.var', 'np.var', (['noise_training[:, 3]'], {'ddof': '(1)'}), '(noise_training[:, 3], ddof=1)\n', (7932, 7962), True, 'import numpy as np\n'), ((8438, 8470), 'numpy.zeros', 'np.zeros', (['(dimension, dimension)'], {}), '((dimension, dimension))\n', (8446, 8470), True, 'import numpy as np\n'), ((8480, 8512), 'numpy.zeros', 'np.zeros', (['(dimension, dimension)'], {}), '((dimension, dimension))\n', (8488, 8512), True, 'import numpy as np\n'), ((10814, 10914), 'numpy.matrix', 'np.matrix', (['[2.05025774, 1.9448e-05, -2.6811e-05, 1.709e-05, 3.1557e-05, -7.2463e-05, \n 2.779e-05]'], {}), '([2.05025774, 1.9448e-05, -2.6811e-05, 1.709e-05, 3.1557e-05, -\n 7.2463e-05, 2.779e-05])\n', (10823, 10914), True, 'import numpy as np\n'), ((379, 421), 'numpy.random.randn', 'np.random.randn', (['number_of_data', 'dimension'], {}), '(number_of_data, dimension)\n', (394, 421), True, 'import numpy as np\n'), ((5340, 5355), 'numpy.transpose', 'np.transpose', (['V'], {}), '(V)\n', (5352, 5355), True, 'import numpy as np\n'), ((8163, 8204), 'numpy.transpose', 'np.transpose', (['coeff[:, :n_pca_components]'], {}), '(coeff[:, :n_pca_components])\n', (8175, 8204), True, 'import numpy as np\n'), ((8263, 8304), 'numpy.transpose', 'np.transpose', (['coeff[:, :n_pca_components]'], {}), '(coeff[:, :n_pca_components])\n', (8275, 8304), True, 'import numpy as np\n'), ((8364, 8405), 'numpy.transpose', 'np.transpose', (['coeff[:, :n_pca_components]'], {}), '(coeff[:, :n_pca_components])\n', (8376, 8405), True, 'import numpy as np\n'), ((10111, 10673), 'numpy.matrix', 'np.matrix', (['[[-0.784071, 0.597765, -0.063745, 0.110153, 0.061466, -0.082475, 0.033644],\n [0.244821, 0.182834, 0.514858, 0.442823, 0.432092, -0.506405, -0.0481],\n [0.160964, 0.424736, 0.634737, -0.53917, -0.226289, 0.203796, 0.085756],\n [0.319035, 0.417696, -0.203085, 0.233144, 0.383461, 0.652085, -0.236409\n ], [0.410766, 0.486923, -0.435696, -0.011337, -0.437815, -0.438568, -\n 0.142103], [-0.095613, -0.081021, 0.309344, 0.568404, -0.625782, \n 0.234851, -0.344613], [0.14047, 0.103537, -0.034607, 0.351428, -\n 0.167281, 0.150022, 0.891269]]'], {}), '([[-0.784071, 0.597765, -0.063745, 0.110153, 0.061466, -0.082475, \n 0.033644], [0.244821, 0.182834, 0.514858, 0.442823, 0.432092, -0.506405,\n -0.0481], [0.160964, 0.424736, 0.634737, -0.53917, -0.226289, 0.203796,\n 0.085756], [0.319035, 0.417696, -0.203085, 0.233144, 0.383461, 0.652085,\n -0.236409], [0.410766, 0.486923, -0.435696, -0.011337, -0.437815, -\n 0.438568, -0.142103], [-0.095613, -0.081021, 0.309344, 0.568404, -\n 0.625782, 0.234851, -0.344613], [0.14047, 0.103537, -0.034607, 0.351428,\n -0.167281, 0.150022, 0.891269]])\n', (10120, 10673), True, 'import numpy as np\n'), ((2787, 2806), 'numpy.random.random', 'np.random.random', (['(1)'], {}), '(1)\n', (2803, 2806), True, 'import numpy as np\n'), ((6378, 6409), 'utils.pulse_helper.get_jitter_pulse', 'pulse_helper.get_jitter_pulse', ([], {}), '()\n', (6407, 6409), False, 'from utils import pulse_helper\n'), ((9687, 9751), 'numpy.transpose', 'np.transpose', (['(optimal_noise[ev, :] * coeff[:, :n_pca_components])'], {}), '(optimal_noise[ev, :] * coeff[:, :n_pca_components])\n', (9699, 9751), True, 'import numpy as np\n'), ((9911, 9976), 'numpy.transpose', 'np.transpose', (['(optimal_signal[ev, :] * coeff[:, :n_pca_components])'], {}), '(optimal_signal[ev, :] * coeff[:, :n_pca_components])\n', (9923, 9976), True, 'import numpy as np\n'), ((2849, 2878), 'numpy.random.randn', 'np.random.randn', (['(1)', 'dimension'], {}), '(1, dimension)\n', (2864, 2878), True, 'import numpy as np\n'), ((2921, 2952), 'utils.pulse_helper.get_jitter_pulse', 'pulse_helper.get_jitter_pulse', ([], {}), '()\n', (2950, 2952), False, 'from utils import pulse_helper\n'), ((6325, 6344), 'numpy.random.random', 'np.random.random', (['(1)'], {}), '(1)\n', (6341, 6344), True, 'import numpy as np\n'), ((9057, 9121), 'numpy.transpose', 'np.transpose', (['(optimal_noise[ev, :] * coeff[:, :n_pca_components])'], {}), '(optimal_noise[ev, :] * coeff[:, :n_pca_components])\n', (9069, 9121), True, 'import numpy as np\n'), ((9344, 9409), 'numpy.transpose', 'np.transpose', (['(optimal_signal[ev, :] * coeff[:, :n_pca_components])'], {}), '(optimal_signal[ev, :] * coeff[:, :n_pca_components])\n', (9356, 9409), True, 'import numpy as np\n'), ((8616, 8640), 'numpy.asmatrix', 'np.asmatrix', (['coeff[i, :]'], {}), '(coeff[i, :])\n', (8627, 8640), True, 'import numpy as np\n'), ((8717, 8741), 'numpy.asmatrix', 'np.asmatrix', (['coeff[i, :]'], {}), '(coeff[i, :])\n', (8728, 8741), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 14:42:02 2020
@author: figueroa
"""
import sys
import numpy as np
import warnings
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, WhiteKernel, ConstantKernel as C
from scipy import linalg
from scipy.spatial import distance
from tqdm import tqdm
from multiprocessing import Pool
from scipy.optimize import curve_fit
np.random.seed()
warnings.filterwarnings('ignore')
# In[]
"""
GPR Tools
"""
# =============================================================================
# Ordinary Kriging Optimizer
# =============================================================================
def kriging(X, Y):
"""
Scikit-learn Implementation for training the GP parameters. In here a
Squared Exponential kernel is used, plus a small term of white noise
"""
# =============================================================================
# Instantiate Gaussian Process
# =============================================================================
kernel = C(10.0, (1e-6, 1e6)) *\
RBF([10 for i in range(len(X[0]))] , (1e-5, 1e10)) +\
WhiteKernel(noise_level=1e-5, noise_level_bounds=(1e-10, 1e+4))
gp = GaussianProcessRegressor(kernel=kernel , n_restarts_optimizer=150,
normalize_y=True)
gp.fit(X, Y)
Params = np.exp(gp.kernel_.theta)
# =============================================================================
# Precompute Kernel Parameters
# =============================================================================
Params = np.exp(gp.kernel_.theta)
alpha_ = gp.alpha_
L_inv = linalg.solve_triangular(gp.L_.T, np.eye(gp.L_.shape[0]))
#K_inv = L_inv.dot(L_inv.T)
LAMBDA = np.eye(len(X[0]))
length_scales = (1/Params[1:-1])**2
np.fill_diagonal(LAMBDA,length_scales)
return [Params, LAMBDA, alpha_, L_inv]
# =============================================================================
# GPR Model:
# =============================================================================
def GPR_MODEL(Constant,Lambda_Inv,Xtrain,Ytrain,alpha_,Xtest):#,tconstant):
"""
Implementation of the calculation of the mean of a GP
"""
#Distance = Xtest[:-1] - Xtrain
Distance = Xtest - Xtrain
Distance_sq = np.diag(np.dot(np.dot(Distance,Lambda_Inv),Distance.T))
Ktrans = Constant * np.exp(-0.5*Distance_sq)
Y_pred = np.dot(Ktrans,alpha_)
Y_pred = (Y_pred*np.std(Ytrain))+np.mean(Ytrain)
return Y_pred #* np.exp(-tconstant * Xtest[-1])
def alpha_calculator(Params,X,Y):
"""
Calculate the array alpha_ used to predict the posterior GP mean given
the parameters of the GP and a given training set (X,Y)
inputs:
- Params = GP Parameters [Cs,P.....,alpha]
- X = GP Xtrain dataset
- Y = GP Ytrain dataset
returns:
- alpha_ = alpha_ array for further use in prediction
"""
Cs = Params[0]
P = Params[1:-1]
alpha = Params[-1]
LAMBDA = np.eye(len(X[0]))
length_scales = (1/P)
np.fill_diagonal(LAMBDA,length_scales)
Xtrain = np.dot(X,LAMBDA)
distSself = distance.cdist(Xtrain , Xtrain, metric='sqeuclidean').T
# Calculate Self Correlation:
KSelf = Cs * np.exp(-.5 * distSself)
Y_norm = (Y - np.mean(Y))/np.std(Y) # Normalize Outputs
KSelf = KSelf + alpha*np.eye(len(KSelf))
L_ = linalg.cholesky(KSelf,lower=True)
alpha_ = linalg.cho_solve((L_,True),Y_norm)
return alpha_
# In[]
"""
CV Tools
"""
def CV_Analysis(X,Y,Nfold,N_repeats,multithreading=True,option='sequential'):
"""
This code performs K-fold Cross-Validation analysis by splitting the entire
dataset into several 'folds'. GPR is performed on each fold and the
performance of the hyperparameters is tracked across each fold. In the end,
point estimations of the CV function are produced from which the kernel
parameters which result on its minimum should be taken. In principle, these
kernel parameters are those that perform the best independently of the
training set taken, which of course is of a given size.
We have added an N_repeats option, so the process of random selection of the
data set can be repeated, in order to obtain better estimates of the cross
validation error
* The 'option' parameter allows the selection between
Multi-threading (Recommended) / Single-threading
"""
Indexes = np.array(range(len(Y)))
N = len(Y)//Nfold
CV_Error = []
Models = []
if multithreading == True:
print('GPR Training Mode = Multi-thread')
if option=='sequential':
print('GPR Training Type = {}'.format(option))
print('\n Training GPR Models... \n')
print('\n The counter might increase and decrease during calculation. \n Multiple processes are running on parallel and share the same progress bar \n')
# =============================================================================
# Generate the different folds to perform the cross validation in:
# =============================================================================
IdxSets = [np.arange(N*i,N*(i+1)) for i in range(Nfold-1)]
IdxSets.append(np.arange(IdxSets[-1][-1]+1,len(Y)))
Models, CV_Error = get_CV_Multithread(X,Y,Nfold,IdxSets)
"""Override the user input, as more repeats is not efficient,
due to Sobol Sampling"""
return CV_Error, Models
else:
print('GPR Training Type = Random')
print('\n Training GPR Models... \n')
print('\n The counter might increase and decrease during calculation. \n Multiple processes are running on parallel and share the same progress bar \n')
for a in range(N_repeats):
# =============================================================================
# Generate the different folds to perform the cross validation in:
# =============================================================================
IdxSets = [np.random.choice(Indexes,size = N,replace=False) for
i in range(Nfold)]
Models_Data, CVE = get_CV_Multithread(X,Y,Nfold,IdxSets)
Models.extend(Models_Data)
CV_Error.extend(CVE)
return CV_Error, Models
else:
print('GPR Training Mode = Single-thread')
# =============================================================================
# Single Threading Option --> Much Slower!:
# =============================================================================
for a in range(N_repeats):
# =============================================================================
# Generate the different folds to perform the cross validation in:
# =============================================================================
print('\n GPR with {}-fold Cross Validation Computation | Repetition {} of {}\n'.format(Nfold,a+1,N_repeats))
IdxSets = [np.random.choice(Indexes,size = N,replace=False) for
i in range(Nfold)]
Yfolds = [Y[idx] for idx in IdxSets]
Xfolds = [X[idx] for idx in IdxSets]
print('\n Training GPR Models... \n')
print('\n The counter might increase and decrease during calculation. \n Multiple processes are running on parallel and share the same progress bar \n')
Models_Data = []
for i in tqdm(range(len(Yfolds))):
M = kriging(Xfolds[i],Yfolds[i])
Models_Data.append(M)
Models.append(M)
# =============================================================================
# Perform Cross-Validation:
# =============================================================================
for i in tqdm(range(len(Models_Data))):
CV_Error.append(CV_Error_Calc(Xfolds,Yfolds,i,Models_Data,Nfold))
return CV_Error, Models
def CV_Error_Calc(Xfolds,Yfolds,index,Models_Data,Nfold):
"""
Calculate the total error of the k-fold iteration by training on each fold
and using each set of parameters iterating over each fold to predict the
values of the rest. The total error is then a sum over all folds error,
representing the global predictive error associated with the parameter set
indicated by 'index'
"""
Cons = Models_Data[index][0][0]
Lambda = Models_Data[index][1]
Fold_Errors = []
for k in range(len(Yfolds)):
Fold_Error = Fold_Error_Calc(
Cons,Lambda,Models_Data[index],Xfolds,Yfolds,k)
Fold_Errors.append(Fold_Error)
CV_Error = sum(Fold_Errors)/Nfold
return CV_Error
def Fold_Error_Calc(Cons,Lambda,Model_Data,Xfolds,Yfolds,k):
"""
Calculate the Prediction error when using the training parameters of a
given fold to predict the values of the training (test) set assigned to
the rest of the folds.
xf = xfold
yf = yfold
xt = xtest
yt = ytest
"""
xf = Xfolds[k]
yf = Yfolds[k]
xt = np.vstack([Xfolds[j] for j in range(len(Xfolds)) if j!=k])
yt = np.hstack([Yfolds[j] for j in range(len(Yfolds)) if j!=k])
alpha_ = alpha_calculator(Model_Data[0],xf,yf)
Ypred = np.array([GPR_MODEL(Cons,Lambda,xf,yf,alpha_,x) for x in xt])
Ydiff = np.power(yt - Ypred,2)
Fold_Error = np.sum(Ydiff)
return Fold_Error
def multi_kriging(Xfolds,Yfolds):
"""
Multi-threading implementation of the GP training function
"""
Results = []
with Pool() as pool:
Results = pool.starmap(kriging,
[(Xfolds[i],Yfolds[i]
) for i in tqdm(range(len(Yfolds)))])
pool.close()
pool.join()
return Results
def CV_Errors_Multi(Xfolds,Yfolds,Models_Data,Nfold):
"""
Multi-threading implementation of the CV Error Calculation function
"""
with Pool() as pool:
Results = pool.starmap(CV_Error_Calc,
[(Xfolds,Yfolds,i,Models_Data,Nfold
) for i in tqdm(range(len(Models_Data)))])
pool.close()
pool.join()
return Results
def get_CV_Multithread(X,Y,Nfold,IdxSets):
"""
Train GPR models on the different folds provided by IdxSets.
Calculate Model Performance and
inputs:
- X = Dataset of input parameters onto which GP models are trained
- Y = Dataset of input parameters onto which GP models are trained
- Nfold = Number of folds
- IdxSets = Set of list of indexes used to generate the different
k-folds
returns:
- Models_Data = array consisting of trained model parameters
- ResultsCV = array consisting of the model's predictive Errors under
Cross-Validation
"""
Yfolds = [Y[idx] for idx in IdxSets]
Xfolds = [X[idx] for idx in IdxSets]
print('\n Training GPR Models... \n')
Models_Data = []
Results = multi_kriging(Xfolds,Yfolds)
for M in Results:
Models_Data.append(M)
# Each entry in Models data has the following order: Params_array, Lambda_Matrix, alpha_, L_inv
# =============================================================================
# Perform Cross-Validation:
# =============================================================================
print('\n Performing Cross Validation...\n')
ResultsCV = CV_Errors_Multi(Xfolds,Yfolds,Models_Data,Nfold)
return Models_Data, ResultsCV
# In[]
def Kernel_Maker(isotope,Nfolds,Nrepeats,Multithreading=True,option='random'):
Path = ''
Xdata = np.load(Path+'Training_Sets/{}.npy',allow_pickle=True)
# =========================================================================
# Use this to perform the regression on the atom density data set
# =========================================================================
#Ydata = np.load(Path+'Training_Sets/Y_Candu_Grid625.npy',allow_pickle=True).item()
# =========================================================================
# Use this to perform the regression on the total mass data set
# =========================================================================
Ydata = np.load(Path+'Training_Sets/{}.npy',allow_pickle=True).item()
Data = np.array(Ydata[isotope])
print('GPR Started')
if Nfolds > 1:
CV, Models = CV_Analysis(Xdata,Data,Nfolds,Nrepeats,Multithreading,option)
# =============================================================================
# Select the parameters that result in the smallest cross validation error
# =============================================================================
idx = np.argmin(CV)
print(idx)
print(len(CV),len(Models))
Params = Models[idx][0]
Cons = Params[0]
Lambda = Models[idx][1]
# =============================================================================
# Diagnostics to evaluate the prediction error using now the first 500
# entries of the Dataset as Training Data
# =============================================================================
stIdx = 500
alpha_ = alpha_calculator(Params,Xdata[:stIdx],Data[:stIdx])
Ypred = np.array([
GPR_MODEL(Cons,Lambda,Xdata[:stIdx],Data[:stIdx],alpha_,x) for
x in Xdata[stIdx:]])
Error = Data[stIdx:] - np.array(Ypred)
rel_Error = Error*100/Data[stIdx:]
print(np.max(Error),np.max(rel_Error))
Kernel = {}
Kernel['Params'] = Models[idx][0]
Kernel['LAMBDA'] = Models[idx][1]
Kernel['alpha_'] = Models[idx][2]
Kernel_Path = 'Kernels-v2/Grid/{}.npy'.format(isotope)
np.save(Path+Kernel_Path,Kernel)
else:
Models = kriging(Xdata,Data)
Kernel = {}
Kernel['Params'] = Models[0]
Kernel['LAMBDA'] = Models[1]
Kernel['alpha_'] = Models[2]
Kernel_Path = 'Kernels-All/Grid/{}.npy'.format(isotope)
np.save(Path+Kernel_Path,Kernel)
if __name__ == '__main__':
"""
Performing 'sequential' sampling to generate the folds generally results in
good GPR models if the original simulation sampling is done approprietly,
e.g Sobol, Halton.
Otherwise, a slower but often better approach -provided enough folds and
repetitions are made- is using 'random' sampling to generate the folds
"""
isotope = sys.argv[1]
# E.G:
Nfolds = 5
Nrepeats = 1
Multithreading = True
Type = 'random'# |'random'|'sequential'|
Kernel_Maker(isotope,Nfolds,Nrepeats,Multithreading,Type)
| [
"scipy.linalg.cholesky",
"numpy.array",
"sklearn.gaussian_process.kernels.WhiteKernel",
"numpy.save",
"numpy.arange",
"sklearn.gaussian_process.GaussianProcessRegressor",
"scipy.linalg.cho_solve",
"numpy.mean",
"sklearn.gaussian_process.kernels.ConstantKernel",
"numpy.max",
"numpy.exp",
"numpy... | [((453, 469), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (467, 469), True, 'import numpy as np\n'), ((470, 503), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (493, 503), False, 'import warnings\n'), ((1289, 1376), 'sklearn.gaussian_process.GaussianProcessRegressor', 'GaussianProcessRegressor', ([], {'kernel': 'kernel', 'n_restarts_optimizer': '(150)', 'normalize_y': '(True)'}), '(kernel=kernel, n_restarts_optimizer=150,\n normalize_y=True)\n', (1313, 1376), False, 'from sklearn.gaussian_process import GaussianProcessRegressor\n'), ((1439, 1463), 'numpy.exp', 'np.exp', (['gp.kernel_.theta'], {}), '(gp.kernel_.theta)\n', (1445, 1463), True, 'import numpy as np\n'), ((1680, 1704), 'numpy.exp', 'np.exp', (['gp.kernel_.theta'], {}), '(gp.kernel_.theta)\n', (1686, 1704), True, 'import numpy as np\n'), ((1904, 1943), 'numpy.fill_diagonal', 'np.fill_diagonal', (['LAMBDA', 'length_scales'], {}), '(LAMBDA, length_scales)\n', (1920, 1943), True, 'import numpy as np\n'), ((2511, 2533), 'numpy.dot', 'np.dot', (['Ktrans', 'alpha_'], {}), '(Ktrans, alpha_)\n', (2517, 2533), True, 'import numpy as np\n'), ((3159, 3198), 'numpy.fill_diagonal', 'np.fill_diagonal', (['LAMBDA', 'length_scales'], {}), '(LAMBDA, length_scales)\n', (3175, 3198), True, 'import numpy as np\n'), ((3211, 3228), 'numpy.dot', 'np.dot', (['X', 'LAMBDA'], {}), '(X, LAMBDA)\n', (3217, 3228), True, 'import numpy as np\n'), ((3489, 3523), 'scipy.linalg.cholesky', 'linalg.cholesky', (['KSelf'], {'lower': '(True)'}), '(KSelf, lower=True)\n', (3504, 3523), False, 'from scipy import linalg\n'), ((3536, 3572), 'scipy.linalg.cho_solve', 'linalg.cho_solve', (['(L_, True)', 'Y_norm'], {}), '((L_, True), Y_norm)\n', (3552, 3572), False, 'from scipy import linalg\n'), ((9643, 9666), 'numpy.power', 'np.power', (['(yt - Ypred)', '(2)'], {}), '(yt - Ypred, 2)\n', (9651, 9666), True, 'import numpy as np\n'), ((9683, 9696), 'numpy.sum', 'np.sum', (['Ydiff'], {}), '(Ydiff)\n', (9689, 9696), True, 'import numpy as np\n'), ((11985, 12042), 'numpy.load', 'np.load', (["(Path + 'Training_Sets/{}.npy')"], {'allow_pickle': '(True)'}), "(Path + 'Training_Sets/{}.npy', allow_pickle=True)\n", (11992, 12042), True, 'import numpy as np\n'), ((12691, 12715), 'numpy.array', 'np.array', (['Ydata[isotope]'], {}), '(Ydata[isotope])\n', (12699, 12715), True, 'import numpy as np\n'), ((1216, 1283), 'sklearn.gaussian_process.kernels.WhiteKernel', 'WhiteKernel', ([], {'noise_level': '(1e-05)', 'noise_level_bounds': '(1e-10, 10000.0)'}), '(noise_level=1e-05, noise_level_bounds=(1e-10, 10000.0))\n', (1227, 1283), False, 'from sklearn.gaussian_process.kernels import RBF, WhiteKernel, ConstantKernel as C\n'), ((1773, 1795), 'numpy.eye', 'np.eye', (['gp.L_.shape[0]'], {}), '(gp.L_.shape[0])\n', (1779, 1795), True, 'import numpy as np\n'), ((2473, 2499), 'numpy.exp', 'np.exp', (['(-0.5 * Distance_sq)'], {}), '(-0.5 * Distance_sq)\n', (2479, 2499), True, 'import numpy as np\n'), ((2570, 2585), 'numpy.mean', 'np.mean', (['Ytrain'], {}), '(Ytrain)\n', (2577, 2585), True, 'import numpy as np\n'), ((3244, 3296), 'scipy.spatial.distance.cdist', 'distance.cdist', (['Xtrain', 'Xtrain'], {'metric': '"""sqeuclidean"""'}), "(Xtrain, Xtrain, metric='sqeuclidean')\n", (3258, 3296), False, 'from scipy.spatial import distance\n'), ((3351, 3375), 'numpy.exp', 'np.exp', (['(-0.5 * distSself)'], {}), '(-0.5 * distSself)\n', (3357, 3375), True, 'import numpy as np\n'), ((3405, 3414), 'numpy.std', 'np.std', (['Y'], {}), '(Y)\n', (3411, 3414), True, 'import numpy as np\n'), ((9863, 9869), 'multiprocessing.Pool', 'Pool', ([], {}), '()\n', (9867, 9869), False, 'from multiprocessing import Pool\n'), ((10248, 10254), 'multiprocessing.Pool', 'Pool', ([], {}), '()\n', (10252, 10254), False, 'from multiprocessing import Pool\n'), ((13124, 13137), 'numpy.argmin', 'np.argmin', (['CV'], {}), '(CV)\n', (13133, 13137), True, 'import numpy as np\n'), ((14164, 14199), 'numpy.save', 'np.save', (['(Path + Kernel_Path)', 'Kernel'], {}), '(Path + Kernel_Path, Kernel)\n', (14171, 14199), True, 'import numpy as np\n'), ((14448, 14483), 'numpy.save', 'np.save', (['(Path + Kernel_Path)', 'Kernel'], {}), '(Path + Kernel_Path, Kernel)\n', (14455, 14483), True, 'import numpy as np\n'), ((1122, 1149), 'sklearn.gaussian_process.kernels.ConstantKernel', 'C', (['(10.0)', '(1e-06, 1000000.0)'], {}), '(10.0, (1e-06, 1000000.0))\n', (1123, 1149), True, 'from sklearn.gaussian_process.kernels import RBF, WhiteKernel, ConstantKernel as C\n'), ((2408, 2436), 'numpy.dot', 'np.dot', (['Distance', 'Lambda_Inv'], {}), '(Distance, Lambda_Inv)\n', (2414, 2436), True, 'import numpy as np\n'), ((2554, 2568), 'numpy.std', 'np.std', (['Ytrain'], {}), '(Ytrain)\n', (2560, 2568), True, 'import numpy as np\n'), ((3393, 3403), 'numpy.mean', 'np.mean', (['Y'], {}), '(Y)\n', (3400, 3403), True, 'import numpy as np\n'), ((12613, 12670), 'numpy.load', 'np.load', (["(Path + 'Training_Sets/{}.npy')"], {'allow_pickle': '(True)'}), "(Path + 'Training_Sets/{}.npy', allow_pickle=True)\n", (12620, 12670), True, 'import numpy as np\n'), ((13839, 13854), 'numpy.array', 'np.array', (['Ypred'], {}), '(Ypred)\n', (13847, 13854), True, 'import numpy as np\n'), ((13912, 13925), 'numpy.max', 'np.max', (['Error'], {}), '(Error)\n', (13918, 13925), True, 'import numpy as np\n'), ((13926, 13943), 'numpy.max', 'np.max', (['rel_Error'], {}), '(rel_Error)\n', (13932, 13943), True, 'import numpy as np\n'), ((5311, 5340), 'numpy.arange', 'np.arange', (['(N * i)', '(N * (i + 1))'], {}), '(N * i, N * (i + 1))\n', (5320, 5340), True, 'import numpy as np\n'), ((7263, 7311), 'numpy.random.choice', 'np.random.choice', (['Indexes'], {'size': 'N', 'replace': '(False)'}), '(Indexes, size=N, replace=False)\n', (7279, 7311), True, 'import numpy as np\n'), ((6262, 6310), 'numpy.random.choice', 'np.random.choice', (['Indexes'], {'size': 'N', 'replace': '(False)'}), '(Indexes, size=N, replace=False)\n', (6278, 6310), True, 'import numpy as np\n')] |
# Implementation based on tf.keras.callbacks.py and tf.keras.utils.generic_utils.py
# https://github.com/tensorflow/tensorflow/blob/2b96f3662bd776e277f86997659e61046b56c315/tensorflow/python/keras/callbacks.py
# https://github.com/tensorflow/tensorflow/blob/2b96f3662bd776e277f86997659e61046b56c315/tensorflow/python/keras/utils/generic_utils.py
import copy
import os
import sys
import time
import numpy as np
from .callback import Callback
class ProgbarLogger(Callback):
"""Callback that prints metrics to stdout.
Arguments:
count_mode: One of "steps" or "samples".
Whether the progress bar should
count samples seen or steps (batches) seen.
stateful_metrics: Iterable of string names of metrics that
should *not* be averaged over an epoch.
Metrics in this list will be logged as-is.
All others will be averaged over time (e.g. loss, etc).
If not provided, defaults to the `Model`'s metrics.
Raises:
ValueError: In case of invalid `count_mode`.
"""
def __init__(self, count_mode="samples", stateful_metrics=None):
super(ProgbarLogger, self).__init__()
if count_mode == "samples":
self.use_steps = False
elif count_mode == "steps":
self.use_steps = True
else:
raise ValueError("Unknown `count_mode`: " + str(count_mode))
# Defaults to all Model's metrics except for loss.
self.stateful_metrics = set(stateful_metrics) if stateful_metrics else None
self.seen = 0
self.progbar = None
self.target = None
self.verbose = 1
self.epochs = 1
self._called_in_fit = False
def set_params(self, params):
self.verbose = params["verbose"]
self.epochs = params["epochs"]
if self.use_steps and "steps" in params:
self.target = params["steps"]
elif not self.use_steps and "samples" in params:
self.target = params["samples"]
else:
self.target = None # Will be inferred at the end of the first epoch.
def on_train_begin(self, logs=None):
# When this logger is called inside `fit`, validation is silent.
self._called_in_fit = True
def on_test_begin(self, logs=None):
if not self._called_in_fit:
self._reset_progbar()
def on_predict_begin(self, logs=None):
self._reset_progbar()
def on_epoch_begin(self, epoch, logs=None):
self._reset_progbar(epoch=epoch)
if self.verbose in [1, 2] and self.epochs > 1:
print("Epoch %d/%d" % (epoch + 1, self.epochs))
def on_train_batch_end(self, batch, logs=None):
self._batch_update_progbar(logs)
def on_test_batch_end(self, batch, logs=None):
if not self._called_in_fit:
self._batch_update_progbar(logs)
def on_predict_batch_end(self, batch, logs=None):
self._batch_update_progbar(None) # Don't pass prediction results.
def on_epoch_end(self, epoch, logs=None):
self._finalize_progbar(logs)
def on_test_end(self, logs=None):
if not self._called_in_fit:
self._finalize_progbar(logs)
def on_predict_end(self, logs=None):
self._finalize_progbar(logs)
def _reset_progbar(self, epoch=None):
self.seen = 0
prevprogbar = self.progbar
self.progbar = None
self.progbar = Progbar(
target=self.target,
verbose=self.verbose,
stateful_metrics=self.stateful_metrics,
unit_name="step" if self.use_steps else "sample",
epoch=epoch,
)
if prevprogbar is not None:
# inherit the column widths from the previous progbar for nicer looks
self.progbar._compact_table_column_width = (
prevprogbar._compact_table_column_width
)
def _batch_update_progbar(self, logs=None):
"""Updates the progbar."""
if self.stateful_metrics is None:
if logs:
# self.stateful_metrics = set(m.name for m in self.model.metrics)
self.stateful_metrics = set(logs.keys())
else:
self.stateful_metrics = set()
logs = copy.copy(logs) if logs else {}
batch_size = logs.pop("size", 0)
num_steps = logs.pop("num_steps", 1) # DistStrat can run >1 steps.
logs.pop("batch", None)
add_seen = num_steps if self.use_steps else num_steps * batch_size
self.seen += add_seen
self.progbar.update(self.seen, list(logs.items()), finalize=False)
def _finalize_progbar(self, logs):
if self.target is None:
self.target = self.seen
self.progbar.target = self.seen
logs = logs or {}
# remove size or val_size for displaying
logs.pop("size", None)
logs.pop("val_size", None)
self.progbar.update(self.seen, list(logs.items()), finalize=True)
class Progbar(object):
"""Displays a progress bar.
Arguments:
target: Total number of steps expected, None if unknown.
width: Progress bar width on screen.
verbose: Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose), 3 (compact table)
stateful_metrics: Iterable of string names of metrics that should *not* be
averaged over time. Metrics in this list will be displayed as-is. All
others will be averaged by the progbar before display.
interval: Minimum visual progress update interval (in seconds).
unit_name: Display name for step counts (usually "step" or "sample").
"""
def __init__(
self,
target,
width=30,
verbose=1,
interval=0.05,
stateful_metrics=None,
unit_name="step",
epoch=None,
):
self.target = target
self.width = width
self.verbose = verbose
self.interval = interval
self.unit_name = unit_name
self.epoch = epoch
if stateful_metrics:
self.stateful_metrics = set(stateful_metrics)
else:
self.stateful_metrics = set()
self._dynamic_display = (
(hasattr(sys.stdout, "isatty") and sys.stdout.isatty())
or "ipykernel" in sys.modules
or "posix" in sys.modules
or "PYCHARM_HOSTED" in os.environ
)
self._total_width = 0
self._seen_so_far = 0
# We use a dict + list to avoid garbage collection
# issues found in OrderedDict
self._values = {}
self._values_order = []
self._start = time.time()
self._last_update = 0
self._compact_header_already_printed_names = []
self._compact_table_column_width = dict()
def update(self, current, values=None, finalize=None):
"""Updates the progress bar.
Arguments:
current: Index of current step.
values: List of tuples: `(name, value_for_last_step)`. If `name` is in
`stateful_metrics`, `value_for_last_step` will be displayed as-is.
Else, an average of the metric over time will be displayed.
finalize: Whether this is the last update for the progress bar. If
`None`, defaults to `current >= self.target`.
"""
if finalize is None:
if self.target is None:
finalize = False
else:
finalize = current >= self.target
values = values or []
for k, v in values:
if k not in self._values_order:
self._values_order.append(k)
if k not in self.stateful_metrics:
# In the case that progress bar doesn't have a target value in the first
# epoch, both on_batch_end and on_epoch_end will be called, which will
# cause 'current' and 'self._seen_so_far' to have the same value. Force
# the minimal value to 1 here, otherwise stateful_metric will be 0s.
value_base = max(current - self._seen_so_far, 1)
if k not in self._values:
self._values[k] = [v * value_base, value_base]
else:
self._values[k][0] += v * value_base
self._values[k][1] += value_base
else:
# Stateful metrics output a numeric value. This representation
# means "take an average from a single value" but keeps the
# numeric formatting.
self._values[k] = [v, 1]
self._seen_so_far = current
now = time.time()
info = " - %.0fs" % (now - self._start)
if self.verbose == 1:
if now - self._last_update < self.interval and not finalize:
return
prev_total_width = self._total_width
if self._dynamic_display:
sys.stdout.write("\b" * prev_total_width)
sys.stdout.write("\r")
else:
sys.stdout.write("\n")
if self.target is not None:
numdigits = int(np.log10(self.target)) + 1
bar = ("%" + str(numdigits) + "d/%d [") % (current, self.target)
prog = float(current) / self.target
prog_width = int(self.width * prog)
if prog_width > 0:
bar += "=" * (prog_width - 1)
if current < self.target:
bar += ">"
else:
bar += "="
bar += "." * (self.width - prog_width)
bar += "]"
else:
bar = "%7d/Unknown" % current
self._total_width = len(bar)
sys.stdout.write(bar)
if current:
time_per_unit = (now - self._start) / current
else:
time_per_unit = 0
if self.target is None or finalize:
if time_per_unit >= 1 or time_per_unit == 0:
info += " %.0fs/%s" % (time_per_unit, self.unit_name)
elif time_per_unit >= 1e-3:
info += " %.0fms/%s" % (time_per_unit * 1e3, self.unit_name)
else:
info += " %.0fus/%s" % (time_per_unit * 1e6, self.unit_name)
else:
eta = time_per_unit * (self.target - current)
if eta > 3600:
eta_format = "%d:%02d:%02d" % (
eta // 3600,
(eta % 3600) // 60,
eta % 60,
)
elif eta > 60:
eta_format = "%d:%02d" % (eta // 60, eta % 60)
else:
eta_format = "%ds" % eta
info = " - ETA: %s" % eta_format
for k in self._values_order:
info += " - %s:" % k
if isinstance(self._values[k], list):
avg = np.mean(self._values[k][0] / max(1, self._values[k][1]))
if abs(avg) > 1e-3:
info += " %.4f" % avg
else:
info += " %.4e" % avg
else:
info += " %s" % self._values[k]
self._total_width += len(info)
if prev_total_width > self._total_width:
info += " " * (prev_total_width - self._total_width)
if finalize:
info += "\n"
sys.stdout.write(info)
sys.stdout.flush()
elif self.verbose == 2:
if finalize:
numdigits = int(np.log10(self.target)) + 1
count = ("%" + str(numdigits) + "d/%d") % (current, self.target)
info = count + info
for k in self._values_order:
info += " - %s:" % k
avg = np.mean(self._values[k][0] / max(1, self._values[k][1]))
if avg > 1e-3:
info += " %.4f" % avg
else:
info += " %.4e" % avg
info += "\n"
sys.stdout.write(info)
sys.stdout.flush()
elif self.verbose == 3:
self.compact_table_progress(current, finalize)
self._last_update = now
def add(self, n, values=None):
self.update(self._seen_so_far + n, values)
def compact_table_progress(self, current, finalize=False):
now = time.time()
if now - self._last_update < self.interval and not finalize:
return
def to_column_name(i):
# 0,1,2,3,4,5,6,7,8,9,A,B,C,D, etc
return str(i) if i < 10 else chr(i + 55)
if self.epoch == 0 and (self._last_update == 0 or finalize):
# first epoch, first or last update: show the metric names
info = ""
for i, k in enumerate(["Step/Epoch", "Time"] + self._values_order):
if k not in self._compact_header_already_printed_names:
i = to_column_name(i)
info += f"[{i}]{k} | "
self._compact_header_already_printed_names += [k]
# remove the last |
info = info[:-3]
if len(info):
print(info, end="\n", file=sys.stdout, flush=True)
info = " "
# first column: show step if running, epoch if finished
if finalize and self.epoch is not None:
# epoch
numdigits = int(np.log10(self.target)) + 1
colstr = f"%{numdigits*2+1}d" % self.epoch
else:
# step
if self.target is not None:
numdigits = int(np.log10(self.target)) + 1
colstr = ("%" + str(numdigits) + "d/%d") % (current, self.target)
progress = float(current) / self.target
else:
colstr = "%7d/Unknown" % current
self._compact_table_column_width[0] = len(colstr)
info += colstr
# second column: elapsed time
elapsed = now - self._start
timestr = f"{elapsed:6.1f}s"
self._compact_table_column_width[1] = len(timestr)
info += " | " + timestr
for i, k in enumerate(self._values_order):
avg = np.mean(self._values[k][0] / max(1, self._values[k][1]))
if 1e4 >= avg > 1e-3:
valstr = "%.3f" % avg
else:
valstr = "%.3e" % avg
# prepend spaces if necessary to keep column width the same as before
valstr = (
" " * (self._compact_table_column_width.get(i + 2, 0) - len(valstr))
+ valstr
)
self._compact_table_column_width[i + 2] = len(valstr)
info += " | " + valstr
if self.epoch == 0 and finalize:
# first epoch, last update: show table separator with column numbers
if self.target is None:
self.target = current
sep = " " * len(info) + "\n"
for i in range(len(self._values_order) + 2):
colwidth = self._compact_table_column_width[i]
colname = to_column_name(i)
colwidth = colwidth - len(f"[{colname}]")
sep += (
"#" * (int(np.ceil(colwidth / 2)) + 1)
+ f"[{colname}]"
+ "#" * (int(np.floor(colwidth / 2)) + 1)
+ "|"
)
# remove the last |
sep = sep[:-1]
print(sep, end="\n", file=sys.stdout, flush=True)
print(
info,
end="\r" if self._dynamic_display and not finalize else "\n",
file=sys.stdout,
flush=True,
)
| [
"numpy.ceil",
"numpy.log10",
"numpy.floor",
"copy.copy",
"sys.stdout.isatty",
"sys.stdout.flush",
"time.time",
"sys.stdout.write"
] | [((6671, 6682), 'time.time', 'time.time', ([], {}), '()\n', (6680, 6682), False, 'import time\n'), ((8688, 8699), 'time.time', 'time.time', ([], {}), '()\n', (8697, 8699), False, 'import time\n'), ((12602, 12613), 'time.time', 'time.time', ([], {}), '()\n', (12611, 12613), False, 'import time\n'), ((4286, 4301), 'copy.copy', 'copy.copy', (['logs'], {}), '(logs)\n', (4295, 4301), False, 'import copy\n'), ((9828, 9849), 'sys.stdout.write', 'sys.stdout.write', (['bar'], {}), '(bar)\n', (9844, 9849), False, 'import sys\n'), ((11598, 11620), 'sys.stdout.write', 'sys.stdout.write', (['info'], {}), '(info)\n', (11614, 11620), False, 'import sys\n'), ((11633, 11651), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (11649, 11651), False, 'import sys\n'), ((6277, 6296), 'sys.stdout.isatty', 'sys.stdout.isatty', ([], {}), '()\n', (6294, 6296), False, 'import sys\n'), ((8978, 9021), 'sys.stdout.write', 'sys.stdout.write', (["('\\x08' * prev_total_width)"], {}), "('\\x08' * prev_total_width)\n", (8994, 9021), False, 'import sys\n'), ((9036, 9058), 'sys.stdout.write', 'sys.stdout.write', (["'\\r'"], {}), "('\\r')\n", (9052, 9058), False, 'import sys\n'), ((9093, 9115), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (9109, 9115), False, 'import sys\n'), ((12254, 12276), 'sys.stdout.write', 'sys.stdout.write', (['info'], {}), '(info)\n', (12270, 12276), False, 'import sys\n'), ((12293, 12311), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (12309, 12311), False, 'import sys\n'), ((13638, 13659), 'numpy.log10', 'np.log10', (['self.target'], {}), '(self.target)\n', (13646, 13659), True, 'import numpy as np\n'), ((9189, 9210), 'numpy.log10', 'np.log10', (['self.target'], {}), '(self.target)\n', (9197, 9210), True, 'import numpy as np\n'), ((13825, 13846), 'numpy.log10', 'np.log10', (['self.target'], {}), '(self.target)\n', (13833, 13846), True, 'import numpy as np\n'), ((11742, 11763), 'numpy.log10', 'np.log10', (['self.target'], {}), '(self.target)\n', (11750, 11763), True, 'import numpy as np\n'), ((15542, 15564), 'numpy.floor', 'np.floor', (['(colwidth / 2)'], {}), '(colwidth / 2)\n', (15550, 15564), True, 'import numpy as np\n'), ((15444, 15465), 'numpy.ceil', 'np.ceil', (['(colwidth / 2)'], {}), '(colwidth / 2)\n', (15451, 15465), True, 'import numpy as np\n')] |
#!/usr/bin/python
'''
Distance calculation substitute for cosmolopy
All formula used are from
https://arxiv.org/pdf/astro-ph/9905116.pdf
'''
import numpy as np
from scipy.integrate import quad
class cosmo_distance(object):
def __init__(self, **cosmology):
'''
To initiate, cosmological parameters must be supplied
in the form of a dictionary with the following keys
{'omega_M_0', 'omega_lambda_0', 'h'}
Input:
**kwargs = {'omega_M_0', 'omega_lambda_0', 'h'}
'''
self.c = 299792458 # speed of light in ms^-1
# initialize class
for k in ['omega_M_0', 'omega_lambda_0', 'h']:
if k not in cosmology.keys():
raise Exception('Cosmological parameter {} must be supplied'.format(k))
self.load_param(**cosmology)
def load_param(self, **cosmology):
'''
set up the cosmological parameters
density of curvature is defined as 1-om0-ode
unless otherwise specified.
'''
self.om0 = cosmology.get('omega_M_0')
self.ode = cosmology.get('omega_lambda_0')
self.ok0 = cosmology.get('omega_k_0', 1-self.om0-self.ode)
self.h = cosmology.get('h')
# Hubble distance D_h (unit=Mpc)
self.D_h = self.c/1000./100./self.h
def E(self, z):
'''
time derivative of the logarithm of the scale factor
E(z) = \frac{\dot{a}(t)}{a(t)}
Input:
z : float or array of float, redshift
kwargs: 'omega_M_0'
'omega_lambda_0'
'omega_k_0'
density parameters
output:
E : float or array of float
'''
EE = np.sqrt(self.om0*(1.+z)**3.+self.ok0*(1.+z)**2.+self.ode)
return EE
def comoving_distance(self, z, z_0=0):
'''
line-of-sight comoving distance
D_c = D_h \int^z_0 \frac{dz'}{E(z')}
unit = Mpc
Input:
z : float or array of float, redshift
z0 : float, default is 0, where integration begins
kwargs: 'omega_M_0'
'omega_lambda_0'
'omega_k_0'
density parameters
output:
D_c : float or array of float
'''
z = np.atleast_1d(z)
# epsabs can be tweaked to achieve higher precision
D_c = np.array([self.D_h*quad(lambda x:1./self.E(x),
z_0, lim, epsabs=1.e-5)[0] for lim in z])
# return a float if only one redshift
if np.size(D_c) > 1:
return D_c
else:
return D_c[0]
def transverse_comoving_distance(self, z):
'''
The comoving distance between two events at the same redshift or distance
but separated on the sky by some angle is D_m * d\theta
D_m is the transverse comoving distance
if omega_R > 0:
D_m = D_h /\sqrt(Omega_R) \sinh(\sqrt(Omega_R)*D_c/D_h)
if omega_R = 0:
D_m = D_c
if omega_R < 0:
D_m = D_h /\sqrt(\abs(Omega_R)) \sin(\sqrt(\abs(Omega_R))*D_c/D_h)
unit = Mpc
Input:
z : float or array of float, redshift
kwargs: 'omega_M_0'
'omega_lambda_0'
'omega_k_0'
density parameters
output:
D_m : float or array of float
'''
if self.ok0==0:
return self.comoving_distance(z)
elif self.ok0>0:
return self.D_h/np.sqrt(self.ok0)*np.sinh(np.sqrt(self.ok0)\
*self.comoving_distance(z)/self.D_h)
else:
return self.D_h/np.sqrt(np.abs(self.ok0))*np.sin(np.sqrt(np.abs(self.ok0))\
*self.comoving_distance(z)/self.D_h)
def angular_diameter_distance(self, z):
'''
ratio of an object’s physical transverse size to its angular size
D_A = \frac{D_M}{1+z}
Input:
z : float or array of float, redshift
kwargs: 'omega_M_0'
'omega_lambda_0'
'omega_k_0'
density parameters
output:
D_A : float or array of float
'''
return self.transverse_comoving_distance(z)/(1.+z)
def luminosity_distance(self, z):
'''
for a flat universe
D_L = (1+z)*D_M
unit = Mpc
Input:
z : float or array of float, redshift
kwargs: 'omega_M_0'
'omega_lambda_0'
'omega_k_0'
density parameters
output:
D_L : float or array of float
'''
return (1.+z)*self.transverse_comoving_distance(z)
def comoving_volume(self, z):
'''
comoving volume up to redshift z
for flat universe
V_c = \frac{4\pi}{3}D_M^3 = \frac{4\pi}{3}D_c^3
other definitions please refer to the reference
unit = Mpc^3
Input:
z : float or array of float, redshift
kwargs: 'omega_M_0'
'omega_lambda_0'
'omega_k_0'
density parameters
output:
V_c : float or array of float
'''
if self.ok0 == 0:
return 4*np.pi/3 * self.comoving_distance(z)**3.
elif ok0>0:
D_m = self.transverse_comoving_distance(z)
D_ratio = D_m/self.D_h
prefactor = 4.*np.pi*self.D_h**3./2./self.ok0
return prefactor*(D_ratio*np.sqrt(1+self.ok0*D_ratio**2.)\
-np.arcsinh(np.sqrt(np.abs(self.ok0))*D_ratio)/np.sqrt(np.abs(self.ok0)))
else:
D_m = transverse_comoving_distance(z)
D_ratio = D_m/D_h
return prefactor*(D_ratio*np.sqrt(1+self.ok0*D_ratio**2.)\
-np.arcsin(np.sqrt(np.abs(self.ok0))*D_ratio)/np.sqrt(np.abs(self.ok0)))
def diff_comoving_volume(self, z):
'''
differential comoving volume at redshift z
dV_c = D_h * \frac{(1+z)^2 * D_A^2}{E(z)} d\Omega dz
unit = Mpc^3 sr^{-1}
'''
dV_c = self.D_h*(1.+z)**2.*self.angular_diameter_distance(z)**2.\
/self.E(z)
return dV_c
| [
"numpy.abs",
"numpy.size",
"numpy.sqrt",
"numpy.atleast_1d"
] | [((1724, 1801), 'numpy.sqrt', 'np.sqrt', (['(self.om0 * (1.0 + z) ** 3.0 + self.ok0 * (1.0 + z) ** 2.0 + self.ode)'], {}), '(self.om0 * (1.0 + z) ** 3.0 + self.ok0 * (1.0 + z) ** 2.0 + self.ode)\n', (1731, 1801), True, 'import numpy as np\n'), ((2304, 2320), 'numpy.atleast_1d', 'np.atleast_1d', (['z'], {}), '(z)\n', (2317, 2320), True, 'import numpy as np\n'), ((2575, 2587), 'numpy.size', 'np.size', (['D_c'], {}), '(D_c)\n', (2582, 2587), True, 'import numpy as np\n'), ((3564, 3581), 'numpy.sqrt', 'np.sqrt', (['self.ok0'], {}), '(self.ok0)\n', (3571, 3581), True, 'import numpy as np\n'), ((3715, 3731), 'numpy.abs', 'np.abs', (['self.ok0'], {}), '(self.ok0)\n', (3721, 3731), True, 'import numpy as np\n'), ((5574, 5612), 'numpy.sqrt', 'np.sqrt', (['(1 + self.ok0 * D_ratio ** 2.0)'], {}), '(1 + self.ok0 * D_ratio ** 2.0)\n', (5581, 5612), True, 'import numpy as np\n'), ((5825, 5863), 'numpy.sqrt', 'np.sqrt', (['(1 + self.ok0 * D_ratio ** 2.0)'], {}), '(1 + self.ok0 * D_ratio ** 2.0)\n', (5832, 5863), True, 'import numpy as np\n'), ((3590, 3607), 'numpy.sqrt', 'np.sqrt', (['self.ok0'], {}), '(self.ok0)\n', (3597, 3607), True, 'import numpy as np\n'), ((5674, 5690), 'numpy.abs', 'np.abs', (['self.ok0'], {}), '(self.ok0)\n', (5680, 5690), True, 'import numpy as np\n'), ((5925, 5941), 'numpy.abs', 'np.abs', (['self.ok0'], {}), '(self.ok0)\n', (5931, 5941), True, 'import numpy as np\n'), ((3748, 3764), 'numpy.abs', 'np.abs', (['self.ok0'], {}), '(self.ok0)\n', (3754, 3764), True, 'import numpy as np\n'), ((5639, 5655), 'numpy.abs', 'np.abs', (['self.ok0'], {}), '(self.ok0)\n', (5645, 5655), True, 'import numpy as np\n'), ((5890, 5906), 'numpy.abs', 'np.abs', (['self.ok0'], {}), '(self.ok0)\n', (5896, 5906), True, 'import numpy as np\n')] |
import numpy as np
import argparse
import logging
import time
SUFFIX = '_shuffle.txt'
def load_input(filename):
t = time.time()
with open(filename) as f:
data = f.readlines()
logging.info('load %d lines in %.4f s', len(data), time.time() - t)
t = time.time()
np.random.shuffle(data)
logging.info('shuffle in %.4f s', time.time() - t)
return data
def get_output(filename):
return filename + SUFFIX
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input')
parser.add_argument('-v', action='store_true')
parser.add_argument('-d', action='store_true', help='dry run')
args = parser.parse_args()
if args.v:
logging.basicConfig(level=logging.INFO)
output = get_output(args.input)
if not args.d:
data = load_input(args.input)
t = time.time()
with open(output, 'w') as f:
f.writelines(data)
logging.info('write to %s in %.4f s', output, time.time() - t)
print(output)
| [
"logging.basicConfig",
"time.time",
"argparse.ArgumentParser",
"numpy.random.shuffle"
] | [((123, 134), 'time.time', 'time.time', ([], {}), '()\n', (132, 134), False, 'import time\n'), ((274, 285), 'time.time', 'time.time', ([], {}), '()\n', (283, 285), False, 'import time\n'), ((290, 313), 'numpy.random.shuffle', 'np.random.shuffle', (['data'], {}), '(data)\n', (307, 313), True, 'import numpy as np\n'), ((484, 509), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (507, 509), False, 'import argparse\n'), ((716, 755), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (735, 755), False, 'import logging\n'), ((863, 874), 'time.time', 'time.time', ([], {}), '()\n', (872, 874), False, 'import time\n'), ((249, 260), 'time.time', 'time.time', ([], {}), '()\n', (258, 260), False, 'import time\n'), ((352, 363), 'time.time', 'time.time', ([], {}), '()\n', (361, 363), False, 'import time\n'), ((997, 1008), 'time.time', 'time.time', ([], {}), '()\n', (1006, 1008), False, 'import time\n')] |
from __future__ import division
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import time
import nupic
from nupic.encoders import RandomDistributedScalarEncoder
from nupic.encoders.date import DateEncoder
from nupic.algorithms.spatial_pooler import SpatialPooler
from nupic.algorithms.temporal_memory import TemporalMemory
from scipy.stats import norm
class Encoder:
def __init__(self, variable, encoders):
self.variable = variable
self.encoders = encoders
def multiencode(encoders, Data, iter):
res = [0]
for x in encoders:
for y in x.encoders:
if x.variable != '_index':
exec("enc = " + y + ".encode(Data['" + x.variable + "'][iter])")
else:
exec("enc = " + y + ".encode(Data.index[iter])")
res = np.concatenate([res, enc])
return res[1:] | [
"numpy.concatenate"
] | [((835, 861), 'numpy.concatenate', 'np.concatenate', (['[res, enc]'], {}), '([res, enc])\n', (849, 861), True, 'import numpy as np\n')] |
import os
import argparse
import torch
import random
import numpy as np
from shutil import copyfile
from src.config import Config
from src.grad_match import GradientMatch, GradientMatch2
from src.create_data_list import create_data_list
def load_config(mode = None):
parser = argparse.ArgumentParser()
parser.add_argument("--path", "--checkpoints", type = str, default = "./checkpoints", help = "model checkpoint path, default = ./checkpoints")
parser.add_argument("--train_img_path", type = str, default = "./train_images")
parser.add_argument("--test_img_path", type = str, default = "./test_images")
parser.add_argument("--eval_img_path", type = str, default = "./eval_images")
if mode == 2:
#parser.add_argument("--input", type = str, help = "path to a test image")
parser.add_argument("--output", type = str, help = "path to a output folder")
args = parser.parse_args()
create_data_list(args.train_img_path, args.test_img_path, args.eval_img_path, "./list_folder")
config_path = os.path.join(args.path, "config.yaml")
if not os.path.exists(args.path):
os.makedirs(args.path)
if not os.path.exists(config_path):
copyfile('./config.yaml', config_path)
config = Config(config_path)
#train mode
if mode == 1:
config.MODE = 1
#test mode
elif mode == 2:
config.MODE = 2
#eval mode
elif mode == 3:
config.MODE = 3
return config
def main(mode = None):
config = load_config(mode)
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(str(gpu) for gpu in config.GPU)
if torch.cuda.is_available():
config.DEVICE = torch.device("cuda")
torch.backends.cudnn.benchmarck = True
else:
config.DEVICE = torch.device("cpu")
#fix random seed
torch.manual_seed(config.SEED)
torch.cuda.manual_seed_all(config.SEED)
np.random.seed(config.SEED)
random.seed(config.SEED)
model = GradientMatch(config)
model.load()
# model training
if config.MODE == 1:
config.print()
print("Start training...")
model.train()
# model test
elif config.MODE == 2:
print("Start testing...")
model.test()
# eval mode
else:
print("Start eval...")
model.eval()
if __name__ == "__main__":
main()
| [
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"os.path.exists",
"src.grad_match.GradientMatch",
"src.create_data_list.create_data_list",
"argparse.ArgumentParser",
"os.makedirs",
"src.config.Config",
"os.path.join",
"random.seed",
"shutil.copyfile",
"torch.cuda.is_available",
"numpy.ran... | [((281, 306), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (304, 306), False, 'import argparse\n'), ((927, 1026), 'src.create_data_list.create_data_list', 'create_data_list', (['args.train_img_path', 'args.test_img_path', 'args.eval_img_path', '"""./list_folder"""'], {}), "(args.train_img_path, args.test_img_path, args.\n eval_img_path, './list_folder')\n", (943, 1026), False, 'from src.create_data_list import create_data_list\n'), ((1041, 1079), 'os.path.join', 'os.path.join', (['args.path', '"""config.yaml"""'], {}), "(args.path, 'config.yaml')\n", (1053, 1079), False, 'import os\n'), ((1252, 1271), 'src.config.Config', 'Config', (['config_path'], {}), '(config_path)\n', (1258, 1271), False, 'from src.config import Config\n'), ((1626, 1651), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1649, 1651), False, 'import torch\n'), ((1825, 1855), 'torch.manual_seed', 'torch.manual_seed', (['config.SEED'], {}), '(config.SEED)\n', (1842, 1855), False, 'import torch\n'), ((1860, 1899), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['config.SEED'], {}), '(config.SEED)\n', (1886, 1899), False, 'import torch\n'), ((1904, 1931), 'numpy.random.seed', 'np.random.seed', (['config.SEED'], {}), '(config.SEED)\n', (1918, 1931), True, 'import numpy as np\n'), ((1936, 1960), 'random.seed', 'random.seed', (['config.SEED'], {}), '(config.SEED)\n', (1947, 1960), False, 'import random\n'), ((1974, 1995), 'src.grad_match.GradientMatch', 'GradientMatch', (['config'], {}), '(config)\n', (1987, 1995), False, 'from src.grad_match import GradientMatch, GradientMatch2\n'), ((1092, 1117), 'os.path.exists', 'os.path.exists', (['args.path'], {}), '(args.path)\n', (1106, 1117), False, 'import os\n'), ((1127, 1149), 'os.makedirs', 'os.makedirs', (['args.path'], {}), '(args.path)\n', (1138, 1149), False, 'import os\n'), ((1162, 1189), 'os.path.exists', 'os.path.exists', (['config_path'], {}), '(config_path)\n', (1176, 1189), False, 'import os\n'), ((1199, 1237), 'shutil.copyfile', 'copyfile', (['"""./config.yaml"""', 'config_path'], {}), "('./config.yaml', config_path)\n", (1207, 1237), False, 'from shutil import copyfile\n'), ((1677, 1697), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (1689, 1697), False, 'import torch\n'), ((1779, 1798), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1791, 1798), False, 'import torch\n')] |
"""
Copyright Declaration (C)
From: https://github.com/leeykang/
Use and modification of information, comment(s) or code provided in this document
is granted if and only if this copyright declaration, located between lines 1 to
9 of this document, is preserved at the top of any document where such
information, comment(s) or code is/are used.
"""
import numpy as np
import matplotlib.pyplot as plt
import os
from scipy.stats import poisson
from functools import reduce
from operator import mul
from collections import defaultdict
from mpl_toolkits.mplot3d import Axes3D
from seaborn import heatmap
from matplotlib import cm
from itertools import product
from multiprocessing import Pool
from copy import deepcopy
from time import time
class CarRental:
"""
Provides the definition of the car rental problem.
Parameter(s):
name: Name of the CarRental problem.
max_cars_list: A list containing the maximum number of cars that can be in
each location at any point in time.
transfer_dict: A dictionary containing information about transferring cars
from one location to another. Should be stored in the form of key:
(source location index, destination location index) and value: (maximum
number of cars that can be transfered from the source location to the
destination location, maximum number of cars that can be transfered from the
source location to the destination location for free, cost of transferring a
car from the source location to the destination location.
rental_fee: The cost of renting a vehicle at any location, used as a revenue
for the car rental problem.
add_storage_threshold_list: A list containing the maximum number of cars
that can be stored at each location before additional storage costs are
incurred.
add_storage_fee: The cost of additional storage at any location.
rental_lambda_list: A list containing the expected number of rentals at
each location, based on a Poisson distribution.
return_lambda_list: A list containing the expected number of returns at
each location, based on a Poisson distribution.
discount: The discount rate when considering the subsequent state.
use_multiprocessing: Boolean variable for deciding whether to use
multiprocessing to solve the car rental problem.
num_max_processes (optional, default 8): The maximum number of processes to
use for multiprocessing.
"""
def __init__(self,
name,
max_cars_list,
transfer_dict,
rental_fee,
add_storage_threshold_list,
add_storage_fee,
rental_lambda_list,
return_lambda_list,
discount,
use_multiprocessing,
num_max_processes=8):
# Initialises the car rental problem based on the given parameters.
self.name = name
self.max_cars_list = max_cars_list
self.transfer_dict = transfer_dict
self.rental_fee = rental_fee
self.add_storage_threshold_list = add_storage_threshold_list
self.add_storage_fee = add_storage_fee
self.rental_lambda_list = rental_lambda_list
self.return_lambda_list = return_lambda_list
self.discount = discount
self.use_multiprocessing = use_multiprocessing
self.num_max_processes = num_max_processes
# Computes the number of car rental locations.
self.num_locations = len(max_cars_list)
# Initialises the current available solving methods as a dictionary,
# with key being the method name and value being the specific function
# to call.
self.implemented_solve_methods = {'policy_iteration': self.policy_iteration,
'value_iteration': self.value_iteration}
# Computes values required for solving the car rental problem.
self.__compute_values()
def __compute_values(self):
"""
Computes values required for solving the CarRental problem.
"""
# Initialises the maximum transfer array (maximum number of car
# transfers between two locations), free transfer array (maximum number
# of free car transfers between two locations) and transfer cost array
# (cost of transferring a car from one location to another) with 0.
self.max_transfers_arr = np.zeros((self.num_locations, self.num_locations), int)
self.free_transfers_num_arr = np.zeros((self.num_locations, self.num_locations), int)
self.transfer_cost_arr = np.zeros((self.num_locations, self.num_locations), int)
# Fills the maximum transfer array, free transfer number array and
# transfer cost array using the transfer dictionary input parameter.
for key, value in self.transfer_dict.items():
self.max_transfers_arr[key] = value[0]
self.free_transfers_num_arr[key] = value[1]
self.transfer_cost_arr[key] = value[2]
# Initialises a dictionary that stores information regarding the
# possible number of transfers that can take place between two
# locations. This is the form of key: (location_i, location_j), where
# i ≠ j, and value: an array containing all values from
# -(maximum transfers from location_j to location_i) to
# (maximum transfers from location_i to location_j) inclusive.
transfer_range_dict = {}
# Initialises a list that stores information about the number of
# possible ways in which location_i and location_j can interact, where
# i and j are location indexes and i ≠ j.
transfer_num_list = [0] * (self.num_locations * (self.num_locations-1) // 2)
# Initialises a list that stores information in the form of (i,j), where
# i and j are location indexes, i ≠ j and the kth tuple in
# transfer_index_list corresponds to the kth value in transfer_num_list,
# where 0 <= k < (number of possible location interactions). The length
# of transfer_num_list and transfer_index_list should therefore be the
# same.
self.transfer_index_list = [0] * len(transfer_num_list)
# Intialises a pointer that deduces the index of the next element to
# fill.
transfer_index = len(transfer_num_list) - 1
# Loops across all possible location interactions.
for i in range(self.num_locations-1):
for j in range(i+1, self.num_locations):
# Places the desired range array in transfer_range_dict for the
# current location pair.
transfer_range_dict[(i,j)] = np.arange(-self.max_transfers_arr[j,i], self.max_transfers_arr[i,j]+1)
# Places the number of possible interactions between the current
# location pair into transfer_num_list.
transfer_num_list[transfer_index] = transfer_range_dict[(i,j)].size - 1
# Places the current location pair indexes into
# transfer_index_list.
self.transfer_index_list[transfer_index] = (i,j)
# Moves the pointer down by 1.
transfer_index -= 1
# Initialises a list containing all possible movement matrices. A
# movement matrix is a num_locations x num_locations matrix that
# contains values for row index i and column index j where i < j
# and 0 otherwise. For each non-zero value, if the value is bigger than
# 0, cars equal to the value are moved from location_i to location_j.
# If the value is smaller than 0, cars equal to the absolute of the
# value are moved from location_j to location_i.
self.full_movement_matrix_list = []
# Iterates through all possible transfer states using a generator.
for current_transfer_num in product(*[range(x+1) for x in transfer_num_list]):
# Initialises the current movement matrix.
current_movement_matrix = np.zeros((self.num_locations, self.num_locations),int)
# Iterates through all possible location interactions and fills the
# current movement matrix with the relevant value associated with
# location_i and location_j.
for idx, val in enumerate(self.transfer_index_list):
current_movement_matrix[val] = transfer_range_dict[val][current_transfer_num[idx]]
# Adds the filled current movement matrix to the list of all
# possible movement matrices.
self.full_movement_matrix_list.append(current_movement_matrix)
# Initialises the rental dictionary. The rental dictionary contains
# information about the probabilties and fees associated with rentals
# at different locations and different number of cars per location.
# It is stored in the form of key: location_index, value: rental
# dictionary for the current location.
self.rental_dict = {}
# Initialises the return dictionary. The return dictionary contains
# information about the probabilties associated with returns at
# different locations and different number of cars per location. It is
# stored in the form of key: location_index, value: return dictionary
# for the current location.
self.return_dict = {}
# Iterates through all possible locations.
for idx in range(self.num_locations):
# Initialises the rental dictionary for the current location.
self.rental_dict[idx] = {}
# Initialises the return dictionary for the current location.
self.return_dict[idx] = {}
# Obtains the maximum possible number of cars, expected rental
# number and expected return number of the current location.
num_max_cars = self.max_cars_list[idx]
rental_lambda = self.rental_lambda_list[idx]
return_lambda = self.return_lambda_list[idx]
# Computes all possible probabilties for rentals for the current
# location. This will be a list of probabilties obtained from the
# Poisson distribution from 0 to num_max_cars - 1 with both
# endpoints included, and 1 - (the cumulative Poisson distribution
# function from 0 to num_max_cars - 1 with both endpoints included)
# to represent all other possibilties greater than or equal to
# num_max_cars.
full_rental_prob_arr = np.array([poisson.pmf(rent_num, rental_lambda) for rent_num in range(num_max_cars)] + \
[1-poisson.cdf(num_max_cars - 1, rental_lambda)])
# Computes all possible rental fees for the current location.
full_rental_fee_arr = self.rental_fee * np.arange(num_max_cars+1)
# Computes all possible probabilties for returns for the current
# location. This will be a list of probabilties obtained from the
# Poisson distribution from 0 to num_max_cars - 1 with both
# endpoints included, and 1 - (the cumulative Poisson distribution
# function from 0 to num_max_cars - 1 with both endpoints included)
# to represent all other possibilties greater than or equal to
# num_max_cars.
full_return_prob_arr = np.array([poisson.pmf(return_num, return_lambda) for return_num in range(num_max_cars)] + \
[1-poisson.cdf(num_max_cars - 1, return_lambda)])
# Adds the 0 rental scenario into the rental dictionary for the
# current location. A rental scenario is a dictionary because it
# involves both the rental fee (with key fee) and the probability of
# the various possible rental numbers (with key prob).
self.rental_dict[idx][0] = {'fee': [0.],
'prob': [1.]}
# Adds the 0 return scenario into the return dictionary for the
# current location. A return scenario only contains the probability
# of the various possible return numbers.
self.return_dict[idx][0] = full_return_prob_arr
# Loops through all possibilties between 0 and the maximum number
# of cars in the current location, exclusive of both endpoints.
for possibility_idx in range(1,num_max_cars):
# Adds the current rental scenario into the rental dictionary
# for the current location.
self.rental_dict[idx][possibility_idx] = {'fee': full_rental_fee_arr[:possibility_idx+1],
'prob': np.concatenate([full_rental_prob_arr[:possibility_idx], [full_rental_prob_arr[possibility_idx:].sum()]], axis=0)}
# Adds the current return scenario into the return dictionary
# for the current location.
self.return_dict[idx][possibility_idx] = np.concatenate([full_return_prob_arr[:num_max_cars-possibility_idx], [full_return_prob_arr[num_max_cars-possibility_idx:].sum()]], axis=0)
# Adds the max cars rental scenario into the rental dictionary for
# the current location.
self.rental_dict[idx][num_max_cars] = {'fee': full_rental_fee_arr,
'prob': full_rental_prob_arr}
# Adds the max cars return scenario into the return dictionary for
# the current location.
self.return_dict[idx][num_max_cars] = [1.]
def step(self, state, movement_matrix):
"""
Computes the value estimate of performing a particular movement matrix
given a particular state.
The steps of the state includes car movement, car rental and car return.
At the car return stage, if any location exceeds the maximum number of
cars possible within that location, excess cars are removed from the
analysis.
Parameter(s):
state: A valid state of the CarRental problem. The state in this case
is the number of cars in each location.
movement_matrix: A valid movement matrix by the CarRental agent. A
movement matrix is a num_locations x num_locations matrix that contains
values for row index i and column index j where i < j and 0 otherwise.
For each non-zero value, if the value is bigger than 0, cars equal to
the value are moved from location_i to location_j. If the value is
smaller than 0, cars equal to the absolute of the value are moved from
location_j to location_i. A valid movement matrix ensures that the
number of cars after all possible movement does not exceed the maximum
possible number of cars at any location.
Return(s):
The value estimate of performing a particular movement_matrix given a
particular state.
"""
# Initialises the final value estimate to 0.
final_val = 0.
# Converts the movement matrix to an action. The action contains only
# positive values. To obtain the action, two matrices are summed. The
# first matrix contains all nonnegative values of the movement matrix
# and 0 otherwise, where all such values are kept in their original
# locations. The second matrix is based on all negative values of the
# movement matrix and 0 otherwise. This matrix is transposed and
# converted to nonnegative values by finding the absolute value
# of the tranposed matrix, forming the second matrix.
action = movement_matrix * (movement_matrix > 0) - (movement_matrix * (movement_matrix < 0)).transpose()
# Performs the car movement step. It is assumed that the policy only
# provides valid movement matrices. By the convention of this code,
# losses to each location can be summed in a row and gains to each
# location can be summed in a column.
post_movement = state - movement_matrix.sum(axis=1) + movement_matrix.sum(axis=0)
# Computes the total cost of movement, excluding any free transfers,
# and subtracts it from the final value estimate.
movement_cost = (np.maximum(0., action - self.free_transfers_num_arr) * self.transfer_cost_arr).sum()
final_val -= movement_cost
# Computes the total cost of storage, specifically when it exceeds the
# threshold for additional storage, and subtracts it from the final
# value estimate.
storage_cost = ((post_movement > self.add_storage_threshold_list) * self.add_storage_fee).sum()
final_val -= storage_cost
# Iterates through all rental possibiltiies using a generator.
for current_rent_num in product(*[range(x+1) for x in post_movement]):
# Computes the profit and probability of performing the current
# rental possibility.
rental_profit = sum(self.rental_dict[location_idx][current_post_movement]['fee'][current_rent_num[location_idx]] for location_idx, current_post_movement in enumerate(post_movement))
rental_prob = reduce(mul, (self.rental_dict[location_idx][current_post_movement]['prob'][current_rent_num[location_idx]] for location_idx, current_post_movement in enumerate(post_movement)))
# Performs the rental step for the current rental possibility.
post_rental = post_movement - current_rent_num
# Iterates through all return possibiltiies given the current
# rental possibility using a generator.
for current_return_num in product(*[range(self.max_cars_list[idx] - post_rental[idx]+1) for idx in range(self.num_locations)]):
# Computes the probability of performing current return
# possibility.
return_prob = reduce(mul, (self.return_dict[location_idx][current_post_rental][current_return_num[location_idx]] for location_idx, current_post_rental in enumerate(post_rental)))
# Performs the return step for the current return possibility
# to obtain the final state.
final_state = post_rental + current_return_num
# Computes the final probability by multiplying the rental
# and return probabilties.
final_prob = rental_prob * return_prob
# Adds the value estimate for the current state, action and
# new state to the final value estimate.
final_val += final_prob * (rental_profit + self.discount * self.v[tuple(final_state.tolist())])
return final_val
def find_valid_moves(self, state):
"""
Finds all valid moves in the current state.
Parameter(s):
state: A valid state of the CarRental problem. The state in this case
is the number of cars in each location.
Return(s):
A list containing all valid movement matrices in the current state.
"""
# Initialises a list to contain every possible action for the
# current state.
filtered_movement_matrix_list = []
# For the current state, find every possible action and places
# it in the filtered movement matrix list. This is done by
# looping across each transfer pair in each movement matrix of
# the full movement matrix list and checking if all transfer
# pairs of the current movement matrix leads to a valid result.
for movement_matrix in self.full_movement_matrix_list:
# Assume that the current movement matrix is valid.
valid_movement_matrix = True
# For any transfer pair, if there are insufficient cars to
# perform the transfer or if the number of received cars
# causes a location to exceed the maximum number of cars
# possible in a location, the movement matrix is marked as
# invalid.
for transfer_pair in self.transfer_index_list:
num_movement = movement_matrix[transfer_pair]
if num_movement > 0 and (state[transfer_pair[0]] < num_movement or state[transfer_pair[1]] + num_movement > self.max_cars_list[transfer_pair[1]]):
valid_movement_matrix = False
break
elif num_movement < 0 and (state[transfer_pair[1]] < abs(num_movement) or state[transfer_pair[0]] + abs(num_movement) > self.max_cars_list[transfer_pair[0]]):
valid_movement_matrix = False
break
# Adds the current movement matrix to the filtered movement
# matrix list if the current movement matrix is valid.
if valid_movement_matrix:
filtered_movement_matrix_list.append(movement_matrix)
return filtered_movement_matrix_list
def visualise(self):
"""
Visualises the result of analysing the CarRental problem.
"""
# Obtain the current working directory.
curr_dir = os.path.dirname(os.path.abspath(__file__))
# Creates the required diagram and assigns a title to it, which includes
# the number of iterations performed as part of the specified method.
fig = plt.figure(figsize=(20, 10))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122, projection='3d')
graph_title = ' '.join([substr.title() for substr in self.solve_method.split('_')])
fig.suptitle('Car Rental %s Results: %i Iterations' % (graph_title, self.current_iter), fontsize=30)
# Converts the final policy from a dictionary to an array for
# visualisation.
final_policy = np.zeros_like(self.v, dtype=int)
for key, val in self.pi.items():
# For two locations, the only policy involved is between the
# 0th and 1st location.
final_policy[key] = val[0][0,1]
# Draws the policy in the form of a contour plot in the left subplot.
# Includes the label and tickmarks of the colorbar in the process.
fig = heatmap(final_policy,
cmap=cm.coolwarm,
ax=ax1,
cbar_kws={'label': 'Cars to Transfer',
'ticks': list(range(final_policy.min(), final_policy.max()+1)),
'orientation': 'horizontal'})
# Sets the axes labels, limits and tick marks for the left subplot.
ax1.set_xlabel('Cars At Second Location')
ax1.set_ylabel('Cars At First Location')
ax1.set_ylim(0, self.max_cars_list[0] + 1)
ax1.set_xlim(0, self.max_cars_list[1] + 1)
ax1.set_yticklabels(ax1.get_yticklabels(), rotation=0)
# Prepares the x and y values of the right subplot by obtaining the
# indexes of each value in the final value estimate.
first_arr, second_arr = np.meshgrid(range(self.max_cars_list[0]+1), range(self.max_cars_list[1]+1), indexing='ij')
# Uses the indexes and final value estimate to draw a surface plot
# in the right subplot.
ax2.plot_surface(first_arr, second_arr, self.v)
ax2.text(-0.25, -0.25, self.v[0,0]+10, self.v[0,0].round().astype(int))
ax2.text(self.max_cars_list[0]-0.25, -0.25, self.v[self.max_cars_list[0], 0]-5, self.v[self.max_cars_list[0], 0].round().astype(int))
ax2.text(-0.25, self.max_cars_list[1], self.v[0, self.max_cars_list[1]]+5, self.v[0, self.max_cars_list[1]].round().astype(int))
ax2.text(self.max_cars_list[0]-0.25, self.max_cars_list[1], self.v[self.max_cars_list[0], self.max_cars_list[1]]+5, self.v[self.max_cars_list[0], self.max_cars_list[1]].round().astype(int))
# Sets the axes labels and tick marks for the right subplot.
ax2.set_xlabel('Cars At First Location')
ax2.set_xticks(list(range(self.max_cars_list[0] + 1)))
ax2.set_ylabel('Cars At Second Location')
ax2.set_yticks(list(reversed(range(self.max_cars_list[1] + 1))))
ax2.set_zlabel('Value Estimate ($)')
# Sets the title for both subplots.
title_size = 15
ax1.set_title('Optimal Policy', size=title_size)
ax2.set_title('Optimal Value', size=title_size)
# Saves the plotted diagram with the name of the selected method.
if self.name:
plt.savefig(os.path.join(curr_dir, 'carrental_%s_%s_results.png' % (self.name, self.solve_method)))
else:
plt.savefig(os.path.join(curr_dir, 'carrental_%s_results.png' % self.solve_method))
plt.close()
def policy_evaluation_state_func(self, *args):
"""
Performs the policy evaluation step for the given state. Only used when
multiprocessing is enabled.
Parameter(s):
args: A valid state of the CarRental problem. The state in this case
is the number of cars in each location.
"""
# For the current state, finds the value estimate for every
# movement matrix in the current policy and places the value
# estimates in a list.
values_list = [self.step(args, movement_matrix) for movement_matrix in self.pi[args]]
return args, sum(values_list) / len(values_list)
def policy_evaluation_log_result(self, results):
"""
Stores the results of policy evaluation for every state. Only used when
multiprocessing is enabled.
Parameter(s):
results: List of (state, value) tuples, the result of running policy
evaluation.
"""
# Unpacks the result tuple into state and value.
for s, val in results:
# Updates the value estimate copy of the current state with
# the average value estimate.
self.new_v[s] = val
def policy_improvement_state_func(self, state, tolerance):
"""
Performs the policy improvement step for the given state. Only used when
multiprocessing is enabled.
Parameter(s):
args: A valid state of the CarRental problem. The state in this case
is the number of cars in each location.
tolerance: Used to check if the value estimate has converged and to
decide on policies that result in the highest value estimate (the second
use case is to handle noise in the results caused by floating point
truncation).
"""
# Finds all valid movement matrices for the current state.
filtered_movement_matrix_list = self.find_valid_moves(state)
# For the current state, finds the value estimate for every
# possible moveement matrix and places the value estimates in a
# list.
values_list = [self.step(state, movement_matrix) for movement_matrix in filtered_movement_matrix_list]
# Finds the maximum value estimate.
max_value = max(values_list)
# Finds all movement matrices that correspond to the maximum
# value estimate within a certain tolerance.
new_movement_matrix_list = [movement_matrix for movement_matrix_idx, movement_matrix in enumerate(filtered_movement_matrix_list) if max_value - values_list[movement_matrix_idx] < tolerance]
return state, new_movement_matrix_list
def policy_improvement_log_result(self, results):
"""
Stores the results of policy improvement for every state. Only used when
multiprocessing is enabled.
Parameter(s):
results: List of (state, policy) tuples, the result of running policy
improvement.
"""
# Unpacks the result tuple into state and value.
for s, pol in results:
# Updates the current policy to the new policy for the
# current state.
self.new_pi[s] = pol
def policy_iteration(self, tolerance):
"""
Obtains the optimum value estimate of the CarRental problem and its
corresponding policy using policy iteration. The results are then saved
into a diagram.
Parameter(s):
tolerance: Used to check if the value estimate has converged and to
decide on policies that result in the highest value estimate (the second
use case is to handle noise in the results caused by floating point
truncation).
"""
# Assumes the policy to be unstable at the start.
policy_stable = False
# Infinite loop for performing policy iteration.
while not policy_stable:
# Assumes the policy to be unstable at the start.
value_stable = False
# Infinite loop for performing policy evaluation.
while not value_stable:
# Makes a copy of the value estimate initalised with 0.
self.new_v = np.zeros_like(self.v)
if self.use_multiprocessing:
# Loops through all possible states of the CarRental problem
# using a generator and performs policy evaluation with
# multiprocessing.
with Pool(processes=self.num_max_processes) as p:
res = p.starmap_async(self.policy_evaluation_state_func,
list(product(*[range(x+1) for x in self.max_cars_list])),
callback=self.policy_evaluation_log_result)
res.get()
else:
# Loops through all possible states of the CarRental problem
# using a generator for policy evaluation.
for state in product(*[range(x+1) for x in self.max_cars_list]):
# For the current state, finds the value estimate for every
# movement matrix in the current policy and places the value
# estimates in a list.
values_list = [self.step(state, movement_matrix) for movement_matrix in self.pi[state]]
# Updates the value estimate copy of the current state with
# the average value estimate.
self.new_v[state] = sum(values_list) / len(values_list)
# Checks if the value estimate has converged within the provided
# tolerance.
if np.abs(self.new_v - self.v).max() < tolerance:
# Updates the current value estimate to the final value
# estimate.
self.v = self.new_v
# Ends the infinite policy evaluation loop.
value_stable = True
else:
# Updates the current value estimate to the new value
# estimate.
self.v = self.new_v
# Assumes the current policy to be stable prior to policy
# improvement.
policy_stable = True
if self.use_multiprocessing:
# Makes a copy of the current policy to check for changes to
# the policy.
self.new_pi = deepcopy(self.pi)
# Loops through all possible states of the CarRental problem
# using a generator and performs policy improvement with
# multiprocessing.
with Pool(processes=self.num_max_processes) as p:
res = p.starmap_async(self.policy_improvement_state_func,
product(product(*[range(x+1) for x in self.max_cars_list]), [tolerance]),
callback=self.policy_improvement_log_result)
res.get()
# Checks if the policy has stabilised.
for state in self.pi:
if not np.array_equal(self.pi[state], self.new_pi[state]):
policy_stable = False
break
# Updates the current policy to the new policy for all states.
self.pi = deepcopy(self.new_pi)
else:
# Loops through all possible states of the CarRental problem
# using a generator for policy improvement.
for state in product(*[range(x+1) for x in self.max_cars_list]):
# Obtain the prior policy.
old_movement_matrix_list = self.pi[state]
# Finds all valid movement matrices for the current state.
filtered_movement_matrix_list = self.find_valid_moves(state)
# For the current state, finds the value estimate for every
# possible moveement matrix and places the value estimates in a
# list.
values_list = [self.step(state, movement_matrix) for movement_matrix in filtered_movement_matrix_list]
# Finds the maximum value estimate.
max_value = max(values_list)
# Finds all movement matrices that correspond to the maximum
# value estimate within a certain tolerance.
new_movement_matrix_list = [movement_matrix for movement_matrix_idx, movement_matrix in enumerate(filtered_movement_matrix_list) if max_value - values_list[movement_matrix_idx] < tolerance]
# If the policy is still stable, checks if the policy for
# the current state is stable.
if policy_stable and not np.array_equal(old_movement_matrix_list, new_movement_matrix_list):
# Set the policy_stable flag to False, indicating
# the need to do another policy iteration loop.
policy_stable = False
# Updates the current policy to the new policy for the
# current state.
self.pi[state] = new_movement_matrix_list
# Increments the current iteration counter by 1.
self.current_iter += 1
# Visualises the result of the CarRental problem for the case of two
# locations. Higher number of locations are not as easy to visualise
# and is thus not visualised.
if self.num_locations == 2:
self.visualise()
def value_iteration_state_func(self, state, tolerance):
"""
Performs the value and policy update step for the given state. Only used
when multiprocessing is enabled.
Parameter(s):
state: A valid state of the CarRental problem. The state in this case
is the number of cars in each location.
tolerance: Used to check if the value estimate has converged and to
decide on policies that result in the highest value estimate (the second
use case is to handle noise in the results caused by floating point
truncation).
"""
# Finds all valid movement matrices for the current state.
filtered_movement_matrix_list = self.find_valid_moves(state)
# For the current state, finds the value estimate for every
# movement matrix in the current policy and places the value
# estimates in a list.
values_list = [self.step(state, movement_matrix) for movement_matrix in filtered_movement_matrix_list]
# Finds the maximum value estimate.
max_value = max(values_list)
return state, [movement_matrix for movement_matrix_idx, movement_matrix in enumerate(filtered_movement_matrix_list) if max_value - values_list[movement_matrix_idx] < tolerance], max_value
def value_iteration_log_result(self, results):
"""
Stores the results of value and policy update for every state. Only used
when multiprocessing is enabled.
Parameter(s):
results: List of (state, policy, value) tuples, the result of running
value and policy updates.
"""
# Unpacks the result tuple into state, policy and value.
for s, pol, val in results:
# Finds all movement matrix indexes that correspond to the
# maximum value estimate within a certain tolerance and uses the
# movement matrix indexes to update the policy of the current
# state to the new policy for that state.
self.pi[s] = pol
# Using the maximum value estimate, updates the value estimate
# copy of the current state with the new value estimate for that
# state.
self.new_v[s] = val
def value_iteration(self, tolerance):
"""
Obtains the optimum value estimate of the CarRental problem and its
corresponding policy using value iteration. The results are then saved
into a diagram.
Parameter(s):
tolerance: Used to check if the value estimate has converged and to
decide on policies that result in the highest value estimate (the second
use case is to handle noise in the results caused by floating point
truncation).
"""
# Assumes the policy to be unstable at the start.
value_stable = False
# Infinite loop for performing value iteration.
while not value_stable:
# Makes a copy of the value estimate initialised with 0.
self.new_v = np.zeros_like(self.v)
if self.use_multiprocessing:
# Loops through all possible states of the CarRental problem
# using a generator and performs value and policy updates with
# multiprocessing.
with Pool(processes=self.num_max_processes) as p:
res = p.starmap_async(self.value_iteration_state_func,
product(product(*[range(x+1) for x in self.max_cars_list]), [tolerance]),
callback=self.value_iteration_log_result)
res.get()
else:
# Loops through all possible states of the CarRental problem
# using a generator to perform value and policy updates.
for state in product(*[range(x+1) for x in self.max_cars_list]):
# Finds all valid movement matrices for the current state.
filtered_movement_matrix_list = self.find_valid_moves(state)
# For the current state, finds the value estimate for every
# movement matrix in the current policy and places the value
# estimates in a list.
values_list = [self.step(state, movement_matrix) for movement_matrix in filtered_movement_matrix_list]
# Finds the maximum value estimate.
max_value = max(values_list)
# Finds all movement matrix indexes that correspond to the
# maximum value estimate within a certain tolerance and uses the
# movement matrix indexes to update the policy of the current
# state to the new policy for that state.
self.pi[state] = [movement_matrix for movement_matrix_idx, movement_matrix in enumerate(filtered_movement_matrix_list) if max_value - values_list[movement_matrix_idx] < tolerance]
# Using the maximum value estimate, updates the value estimate
# copy of the current state with the new value estimate for that
# state.
self.new_v[state] = max_value
# Checks if the value estimate has converged within the provided
# tolerance.
if np.abs(self.new_v - self.v).max() < tolerance:
# Visualises the result of the CarRental problem for the case of
# two locations. Higher number of locations are not as easy to
# visualise and is thus not visualised.
if self.num_locations == 2:
self.visualise()
# Ends the infinite value iteration loop.
value_stable = True
else:
# Updates the current value estimate to the new value estimate.
self.v = self.new_v
# Increments the current iteration counter by 1.
self.current_iter += 1
def query(self, state_list=[]):
"""
Provides the policy of queried state(s). This function is used if a
subset of the policy is desired or if the number of locations exceeds
two, in which case a visualisation would be difficult to create.
Parameter(s):
state_list (optional, default []): A list of valid states that will be
queried to obtain the relevant policies. The default is an empty list,
which will display the entire policy.
Return(s):
A dictionary containing the policy of all queried states.
"""
# Checks if only a subset of all states is required.
if state_list:
# Initialises the subset dictionary.
subset_dict = {}
# Loops through all queried states.
for state in state_list:
# Ensures that the state is a valid state.
assert state in self.pi
# Adds the policy to the subset dictionary.
final_dict[state] = self.pi[state]
return final_dict
else:
# Returns the entire policy.
return self.pi
def obtain_optimum(self, solve_method='value_iteration', tolerance=1e-16):
"""
Obtains the optimum value estimate of the CarRental problem and its
corresponding policy by invoking a selected solve method. The results
are then saved into a diagram.
Parameter(s):
solve_method (optional, default value_iteration): Method for solving
the CarRental problem. Currently implemented methods include
policy_iteration and value_iteration.
tolerance (optional, default 1e-16): Used to check if the value estimate
has converged and to decide on policies that result in the highest value
estimate (the second use case is to handle noise in the results
caused by floating point truncation).
"""
# Ensures that the selected solve method has been implemented.
assert solve_method in self.implemented_solve_methods, "%s SOLVE METHOD NOT IMPLEMENTED" % solve_method
# Sets the current solve method to the selected solve method.
self.solve_method = solve_method
# Initialises the value estimate to 0.
self.v = np.zeros([x+1 for x in self.max_cars_list])
# Creates a dictionary to store the policies at every possible state,
# intialising them all to 1. A generator is used to obtain the indexes
# of every possible state.
self.pi = {current_location_num: [np.ones_like(self.max_transfers_arr)] \
for current_location_num in product(*[range(x+1) for x in self.max_cars_list])}
# Initialises the current iteration counter to 0.
self.current_iter = 0
# Obtains the selected solve function and performs that function to
# solve the CarRental problem.
solve_func = self.implemented_solve_methods[solve_method]
solve_func(tolerance)
| [
"numpy.ones_like",
"scipy.stats.poisson.pmf",
"numpy.abs",
"os.path.join",
"matplotlib.pyplot.close",
"scipy.stats.poisson.cdf",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.array_equal",
"multiprocessing.Pool",
"copy.deepcopy",
"os.path.abspath",
"numpy.maximum",
"numpy.zeros_like",
... | [((4138, 4193), 'numpy.zeros', 'np.zeros', (['(self.num_locations, self.num_locations)', 'int'], {}), '((self.num_locations, self.num_locations), int)\n', (4146, 4193), True, 'import numpy as np\n'), ((4227, 4282), 'numpy.zeros', 'np.zeros', (['(self.num_locations, self.num_locations)', 'int'], {}), '((self.num_locations, self.num_locations), int)\n', (4235, 4282), True, 'import numpy as np\n'), ((4311, 4366), 'numpy.zeros', 'np.zeros', (['(self.num_locations, self.num_locations)', 'int'], {}), '((self.num_locations, self.num_locations), int)\n', (4319, 4366), True, 'import numpy as np\n'), ((19361, 19389), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (19371, 19389), True, 'import matplotlib.pyplot as plt\n'), ((19763, 19795), 'numpy.zeros_like', 'np.zeros_like', (['self.v'], {'dtype': 'int'}), '(self.v, dtype=int)\n', (19776, 19795), True, 'import numpy as np\n'), ((22362, 22373), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (22371, 22373), True, 'import matplotlib.pyplot as plt\n'), ((37841, 37888), 'numpy.zeros', 'np.zeros', (['[(x + 1) for x in self.max_cars_list]'], {}), '([(x + 1) for x in self.max_cars_list])\n', (37849, 37888), True, 'import numpy as np\n'), ((7439, 7494), 'numpy.zeros', 'np.zeros', (['(self.num_locations, self.num_locations)', 'int'], {}), '((self.num_locations, self.num_locations), int)\n', (7447, 7494), True, 'import numpy as np\n'), ((19174, 19199), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (19189, 19199), False, 'import os\n'), ((33309, 33330), 'numpy.zeros_like', 'np.zeros_like', (['self.v'], {}), '(self.v)\n', (33322, 33330), True, 'import numpy as np\n'), ((6226, 6300), 'numpy.arange', 'np.arange', (['(-self.max_transfers_arr[j, i])', '(self.max_transfers_arr[i, j] + 1)'], {}), '(-self.max_transfers_arr[j, i], self.max_transfers_arr[i, j] + 1)\n', (6235, 6300), True, 'import numpy as np\n'), ((9963, 9990), 'numpy.arange', 'np.arange', (['(num_max_cars + 1)'], {}), '(num_max_cars + 1)\n', (9972, 9990), True, 'import numpy as np\n'), ((22174, 22265), 'os.path.join', 'os.path.join', (['curr_dir', "('carrental_%s_%s_results.png' % (self.name, self.solve_method))"], {}), "(curr_dir, 'carrental_%s_%s_results.png' % (self.name, self.\n solve_method))\n", (22186, 22265), False, 'import os\n'), ((22287, 22357), 'os.path.join', 'os.path.join', (['curr_dir', "('carrental_%s_results.png' % self.solve_method)"], {}), "(curr_dir, 'carrental_%s_results.png' % self.solve_method)\n", (22299, 22357), False, 'import os\n'), ((26160, 26181), 'numpy.zeros_like', 'np.zeros_like', (['self.v'], {}), '(self.v)\n', (26173, 26181), True, 'import numpy as np\n'), ((27959, 27976), 'copy.deepcopy', 'deepcopy', (['self.pi'], {}), '(self.pi)\n', (27967, 27976), False, 'from copy import deepcopy\n'), ((28676, 28697), 'copy.deepcopy', 'deepcopy', (['self.new_pi'], {}), '(self.new_pi)\n', (28684, 28697), False, 'from copy import deepcopy\n'), ((38101, 38137), 'numpy.ones_like', 'np.ones_like', (['self.max_transfers_arr'], {}), '(self.max_transfers_arr)\n', (38113, 38137), True, 'import numpy as np\n'), ((14853, 14906), 'numpy.maximum', 'np.maximum', (['(0.0)', '(action - self.free_transfers_num_arr)'], {}), '(0.0, action - self.free_transfers_num_arr)\n', (14863, 14906), True, 'import numpy as np\n'), ((28141, 28179), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'self.num_max_processes'}), '(processes=self.num_max_processes)\n', (28145, 28179), False, 'from multiprocessing import Pool\n'), ((33534, 33572), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'self.num_max_processes'}), '(processes=self.num_max_processes)\n', (33538, 33572), False, 'from multiprocessing import Pool\n'), ((9711, 9747), 'scipy.stats.poisson.pmf', 'poisson.pmf', (['rent_num', 'rental_lambda'], {}), '(rent_num, rental_lambda)\n', (9722, 9747), False, 'from scipy.stats import poisson\n'), ((10461, 10499), 'scipy.stats.poisson.pmf', 'poisson.pmf', (['return_num', 'return_lambda'], {}), '(return_num, return_lambda)\n', (10472, 10499), False, 'from scipy.stats import poisson\n'), ((26383, 26421), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'self.num_max_processes'}), '(processes=self.num_max_processes)\n', (26387, 26421), False, 'from multiprocessing import Pool\n'), ((28497, 28547), 'numpy.array_equal', 'np.array_equal', (['self.pi[state]', 'self.new_pi[state]'], {}), '(self.pi[state], self.new_pi[state])\n', (28511, 28547), True, 'import numpy as np\n'), ((35220, 35247), 'numpy.abs', 'np.abs', (['(self.new_v - self.v)'], {}), '(self.new_v - self.v)\n', (35226, 35247), True, 'import numpy as np\n'), ((9804, 9848), 'scipy.stats.poisson.cdf', 'poisson.cdf', (['(num_max_cars - 1)', 'rental_lambda'], {}), '(num_max_cars - 1, rental_lambda)\n', (9815, 9848), False, 'from scipy.stats import poisson\n'), ((10558, 10602), 'scipy.stats.poisson.cdf', 'poisson.cdf', (['(num_max_cars - 1)', 'return_lambda'], {}), '(num_max_cars - 1, return_lambda)\n', (10569, 10602), False, 'from scipy.stats import poisson\n'), ((27364, 27391), 'numpy.abs', 'np.abs', (['(self.new_v - self.v)'], {}), '(self.new_v - self.v)\n', (27370, 27391), True, 'import numpy as np\n'), ((29900, 29966), 'numpy.array_equal', 'np.array_equal', (['old_movement_matrix_list', 'new_movement_matrix_list'], {}), '(old_movement_matrix_list, new_movement_matrix_list)\n', (29914, 29966), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
import pickle
import os
import pytest
import numpy as np
from renormalizer.model import MolList, MolList2, ModelTranslator, Mol, Phonon
from renormalizer.mps import Mpo, Mps
from renormalizer.tests.parameter import mol_list, ph_phys_dim, omega_quantities
from renormalizer.mps.tests import cur_dir
from renormalizer.utils import Quantity, Op
@pytest.mark.parametrize("dt, space, shift", ([30, "GS", 0.0], [30, "EX", 0.0]))
def test_exact_propagator(dt, space, shift):
prop_mpo = Mpo.exact_propagator(mol_list, -1.0j * dt, space, shift)
with open(os.path.join(cur_dir, "test_exact_propagator.pickle"), "rb") as fin:
std_dict = pickle.load(fin)
std_mpo = std_dict[space]
assert prop_mpo == std_mpo
@pytest.mark.parametrize("scheme", (1, 2, 3, 4))
def test_offset(scheme):
ph = Phonon.simple_phonon(Quantity(3.33), Quantity(1), 2)
m = Mol(Quantity(0), [ph] * 2)
mlist = MolList([m] * 2, Quantity(17), scheme=scheme)
mpo1 = Mpo(mlist)
assert mpo1.is_hermitian()
f1 = mpo1.full_operator()
evals1, _ = np.linalg.eigh(f1)
offset = Quantity(0.123)
mpo2 = Mpo(mlist, offset=offset)
f2 = mpo2.full_operator()
evals2, _ = np.linalg.eigh(f2)
assert np.allclose(evals1 - offset.as_au(), evals2)
def test_identity():
identity = Mpo.identity(mol_list)
mps = Mps.random(mol_list, nexciton=1, m_max=5)
assert mps.expectation(identity) == pytest.approx(mps.dmrg_norm) == pytest.approx(1)
def test_scheme4():
ph = Phonon.simple_phonon(Quantity(3.33), Quantity(1), 2)
m1 = Mol(Quantity(0), [ph])
m2 = Mol(Quantity(0), [ph]*2)
mlist1 = MolList([m1, m2], Quantity(17), 4)
mlist2 = MolList([m1, m2], Quantity(17), 3)
mpo4 = Mpo(mlist1)
assert mpo4.is_hermitian()
# for debugging
f = mpo4.full_operator()
mpo3 = Mpo(mlist2)
assert mpo3.is_hermitian()
# makeup two states
mps4 = Mps()
mps4.mol_list = mlist1
mps4.use_dummy_qn = True
mps4.append(np.array([1, 0]).reshape((1,2,1)))
mps4.append(np.array([0, 0, 1]).reshape((1,-1,1)))
mps4.append(np.array([0.707, 0.707]).reshape((1,2,1)))
mps4.append(np.array([1, 0]).reshape((1,2,1)))
mps4.build_empty_qn()
e4 = mps4.expectation(mpo4)
mps3 = Mps()
mps3.mol_list = mlist2
mps3.append(np.array([1, 0]).reshape((1,2,1)))
mps3.append(np.array([1, 0]).reshape((1,2,1)))
mps3.append(np.array([0, 1]).reshape((1,2,1)))
mps3.append(np.array([0.707, 0.707]).reshape((1,2,1)))
mps3.append(np.array([1, 0]).reshape((1,2,1)))
e3 = mps3.expectation(mpo3)
assert pytest.approx(e4) == e3
@pytest.mark.parametrize("scheme", (2, 3, 4))
def test_intersite(scheme):
local_mlist = mol_list.switch_scheme(scheme)
mpo1 = Mpo.intersite(local_mlist, {0:r"a^\dagger"}, {}, Quantity(1.0))
mpo2 = Mpo.onsite(local_mlist, r"a^\dagger", mol_idx_set=[0])
assert mpo1.distance(mpo2) == pytest.approx(0, abs=1e-5)
mpo3 = Mpo.intersite(local_mlist, {2:r"a^\dagger a"}, {}, Quantity(1.0))
mpo4 = Mpo.onsite(local_mlist, r"a^\dagger a", mol_idx_set=[2])
assert mpo3.distance(mpo4) == pytest.approx(0, abs=1e-5)
mpo5 = Mpo.intersite(local_mlist, {2:r"a^\dagger a"}, {}, Quantity(0.5))
assert mpo5.add(mpo5).distance(mpo4) == pytest.approx(0, abs=1e-5)
mpo6 = Mpo.intersite(local_mlist, {0:r"a^\dagger",2:"a"}, {}, Quantity(1.0))
mpo7 = Mpo.onsite(local_mlist, "a", mol_idx_set=[2])
assert mpo2.apply(mpo7).distance(mpo6) == pytest.approx(0, abs=1e-5)
# the tests are based on the similarity between scheme 2 and scheme 3
# so scheme 3 and scheme 4 will be skipped
if scheme == 2:
mpo8 = Mpo(local_mlist)
# a dirty hack to switch from scheme 2 to scheme 3
test_mlist = local_mlist.switch_scheme(2)
test_mlist.scheme = 3
mpo9 = Mpo(test_mlist)
mpo10 = Mpo.intersite(test_mlist, {0:r"a^\dagger",2:"a"}, {},
Quantity(local_mlist.j_matrix[0,2]))
mpo11 = Mpo.intersite(test_mlist, {2:r"a^\dagger",0:"a"}, {},
Quantity(local_mlist.j_matrix[0,2]))
assert mpo11.conj_trans().distance(mpo10) == pytest.approx(0, abs=1e-6)
assert mpo8.distance(mpo9 + mpo10 + mpo11) == pytest.approx(0, abs=1e-6)
test_mlist.periodic = True
mpo12 = Mpo(test_mlist)
assert mpo12.distance(mpo9 + mpo10 + mpo11) == pytest.approx(0, abs=1e-6)
ph_mpo1 = Mpo.ph_onsite(local_mlist, "b", 1, 1)
ph_mpo2 = Mpo.intersite(local_mlist, {}, {(1,1):"b"})
assert ph_mpo1.distance(ph_mpo2) == pytest.approx(0, abs=1e-6)
def test_phonon_onsite():
gs = Mps.gs(mol_list, max_entangled=False)
assert not gs.ph_occupations.any()
b2 = Mpo.ph_onsite(mol_list, r"b^\dagger", 0, 0)
p1 = b2.apply(gs).normalize()
assert np.allclose(p1.ph_occupations, [1, 0, 0, 0, 0, 0])
p2 = b2.apply(p1).normalize()
assert np.allclose(p2.ph_occupations, [2, 0, 0, 0, 0, 0])
b = b2.conj_trans()
assert b.distance(Mpo.ph_onsite(mol_list, r"b", 0, 0)) == 0
assert b.apply(p2).normalize().distance(p1) == pytest.approx(0, abs=1e-5)
from renormalizer.tests.parameter_PBI import construct_mol
@pytest.mark.parametrize("mol_list", (mol_list, construct_mol(10,10,0)))
@pytest.mark.parametrize("scheme", (
123,
4,
))
def test_general_mpo_MolList(mol_list, scheme):
if scheme == 4:
mol_list1 = mol_list.switch_scheme(4)
else:
mol_list1 = mol_list
mol_list1.mol_list2_para()
mpo = Mpo.general_mpo(mol_list1,
const=Quantity(-mol_list1[0].gs_zpe*mol_list1.mol_num))
mpo_std = Mpo(mol_list1)
check_result(mpo, mpo_std)
@pytest.mark.parametrize("mol_list", (mol_list, construct_mol(10,10,0)))
@pytest.mark.parametrize("scheme", (123, 4))
@pytest.mark.parametrize("formula", ("vibronic", "general"))
def test_general_mpo_MolList2(mol_list, scheme, formula):
if scheme == 4:
mol_list1 = mol_list.switch_scheme(4)
else:
mol_list1 = mol_list
# scheme123
mol_list2 = MolList2.MolList_to_MolList2(mol_list1, formula=formula)
mpo_std = Mpo(mol_list1)
# classmethod method
mpo = Mpo.general_mpo(mol_list2, const=Quantity(-mol_list[0].gs_zpe*mol_list.mol_num))
check_result(mpo, mpo_std)
# __init__ method, same api
mpo = Mpo(mol_list2, offset=Quantity(mol_list[0].gs_zpe*mol_list.mol_num))
check_result(mpo, mpo_std)
def test_general_mpo_others():
mol_list2 = MolList2.MolList_to_MolList2(mol_list)
# onsite
mpo_std = Mpo.onsite(mol_list, r"a^\dagger", mol_idx_set=[0])
mpo = Mpo.onsite(mol_list2, r"a^\dagger", mol_idx_set=[0])
check_result(mpo, mpo_std)
# general method
mpo = Mpo.general_mpo(mol_list2, model={("e_0",):[(Op(r"a^\dagger",0),1.0)]},
model_translator=ModelTranslator.general_model)
check_result(mpo, mpo_std)
mpo_std = Mpo.onsite(mol_list, r"a^\dagger a", dipole=True)
mpo = Mpo.onsite(mol_list2, r"a^\dagger a", dipole=True)
check_result(mpo, mpo_std)
mpo = Mpo.general_mpo(mol_list2,
model={("e_0",):[(Op(r"a^\dagger a",0),mol_list2.dipole[("e_0",)])],
("e_1",):[(Op(r"a^\dagger a",0),mol_list2.dipole[("e_1",)])],
("e_2",):[(Op(r"a^\dagger a",0),mol_list2.dipole[("e_2",)])]},
model_translator=ModelTranslator.general_model)
check_result(mpo, mpo_std)
# intersite
mpo_std = Mpo.intersite(mol_list, {0:r"a^\dagger",2:"a"},
{(0,1):"b^\dagger"}, Quantity(2.0))
mpo = Mpo.intersite(mol_list2, {0:r"a^\dagger",2:"a"},
{(0,1):r"b^\dagger"}, Quantity(2.0))
check_result(mpo, mpo_std)
mpo = Mpo.general_mpo(mol_list2,
model={("e_0","e_2","v_1"):[(Op(r"a^\dagger",1), Op(r"a",-1),
Op(r"b^\dagger", 0), 2.0)]},
model_translator=ModelTranslator.general_model)
check_result(mpo, mpo_std)
# phsite
mpo_std = Mpo.ph_onsite(mol_list, r"b^\dagger", 0, 0)
mpo = Mpo.ph_onsite(mol_list2, r"b^\dagger", 0, 0)
check_result(mpo, mpo_std)
mpo = Mpo.general_mpo(mol_list2,
model={(mol_list2.map[(0,0)],):[(Op(r"b^\dagger",0), 1.0)]},
model_translator=ModelTranslator.general_model)
check_result(mpo, mpo_std)
def check_result(mpo, mpo_std):
print("std mpo bond dims:", mpo_std.bond_dims)
print("new mpo bond dims:", mpo.bond_dims)
print("std mpo qn:", mpo_std.qn, mpo_std.qntot)
print("new mpo qn:", mpo.qn, mpo_std.qntot)
assert mpo_std.distance(mpo)/np.sqrt(mpo_std.dot(mpo_std)) == pytest.approx(0, abs=1e-5)
| [
"renormalizer.tests.parameter_PBI.construct_mol",
"renormalizer.mps.Mps.random",
"renormalizer.tests.parameter.mol_list.switch_scheme",
"renormalizer.mps.Mpo",
"numpy.array",
"renormalizer.mps.Mpo.exact_propagator",
"renormalizer.mps.Mps.gs",
"renormalizer.mps.Mpo.onsite",
"numpy.linalg.eigh",
"nu... | [((398, 477), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dt, space, shift"""', "([30, 'GS', 0.0], [30, 'EX', 0.0])"], {}), "('dt, space, shift', ([30, 'GS', 0.0], [30, 'EX', 0.0]))\n", (421, 477), False, 'import pytest\n'), ((777, 824), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""scheme"""', '(1, 2, 3, 4)'], {}), "('scheme', (1, 2, 3, 4))\n", (800, 824), False, 'import pytest\n'), ((2663, 2707), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""scheme"""', '(2, 3, 4)'], {}), "('scheme', (2, 3, 4))\n", (2686, 2707), False, 'import pytest\n'), ((5298, 5341), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""scheme"""', '(123, 4)'], {}), "('scheme', (123, 4))\n", (5321, 5341), False, 'import pytest\n'), ((5795, 5838), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""scheme"""', '(123, 4)'], {}), "('scheme', (123, 4))\n", (5818, 5838), False, 'import pytest\n'), ((5840, 5899), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""formula"""', "('vibronic', 'general')"], {}), "('formula', ('vibronic', 'general'))\n", (5863, 5899), False, 'import pytest\n'), ((538, 594), 'renormalizer.mps.Mpo.exact_propagator', 'Mpo.exact_propagator', (['mol_list', '(-1.0j * dt)', 'space', 'shift'], {}), '(mol_list, -1.0j * dt, space, shift)\n', (558, 594), False, 'from renormalizer.mps import Mpo, Mps\n'), ((1016, 1026), 'renormalizer.mps.Mpo', 'Mpo', (['mlist'], {}), '(mlist)\n', (1019, 1026), False, 'from renormalizer.mps import Mpo, Mps\n'), ((1104, 1122), 'numpy.linalg.eigh', 'np.linalg.eigh', (['f1'], {}), '(f1)\n', (1118, 1122), True, 'import numpy as np\n'), ((1136, 1151), 'renormalizer.utils.Quantity', 'Quantity', (['(0.123)'], {}), '(0.123)\n', (1144, 1151), False, 'from renormalizer.utils import Quantity, Op\n'), ((1163, 1188), 'renormalizer.mps.Mpo', 'Mpo', (['mlist'], {'offset': 'offset'}), '(mlist, offset=offset)\n', (1166, 1188), False, 'from renormalizer.mps import Mpo, Mps\n'), ((1235, 1253), 'numpy.linalg.eigh', 'np.linalg.eigh', (['f2'], {}), '(f2)\n', (1249, 1253), True, 'import numpy as np\n'), ((1348, 1370), 'renormalizer.mps.Mpo.identity', 'Mpo.identity', (['mol_list'], {}), '(mol_list)\n', (1360, 1370), False, 'from renormalizer.mps import Mpo, Mps\n'), ((1381, 1422), 'renormalizer.mps.Mps.random', 'Mps.random', (['mol_list'], {'nexciton': '(1)', 'm_max': '(5)'}), '(mol_list, nexciton=1, m_max=5)\n', (1391, 1422), False, 'from renormalizer.mps import Mpo, Mps\n'), ((1769, 1780), 'renormalizer.mps.Mpo', 'Mpo', (['mlist1'], {}), '(mlist1)\n', (1772, 1780), False, 'from renormalizer.mps import Mpo, Mps\n'), ((1872, 1883), 'renormalizer.mps.Mpo', 'Mpo', (['mlist2'], {}), '(mlist2)\n', (1875, 1883), False, 'from renormalizer.mps import Mpo, Mps\n'), ((1950, 1955), 'renormalizer.mps.Mps', 'Mps', ([], {}), '()\n', (1953, 1955), False, 'from renormalizer.mps import Mpo, Mps\n'), ((2297, 2302), 'renormalizer.mps.Mps', 'Mps', ([], {}), '()\n', (2300, 2302), False, 'from renormalizer.mps import Mpo, Mps\n'), ((2755, 2785), 'renormalizer.tests.parameter.mol_list.switch_scheme', 'mol_list.switch_scheme', (['scheme'], {}), '(scheme)\n', (2777, 2785), False, 'from renormalizer.tests.parameter import mol_list, ph_phys_dim, omega_quantities\n'), ((2873, 2927), 'renormalizer.mps.Mpo.onsite', 'Mpo.onsite', (['local_mlist', '"""a^\\\\dagger"""'], {'mol_idx_set': '[0]'}), "(local_mlist, 'a^\\\\dagger', mol_idx_set=[0])\n", (2883, 2927), False, 'from renormalizer.mps import Mpo, Mps\n'), ((3078, 3134), 'renormalizer.mps.Mpo.onsite', 'Mpo.onsite', (['local_mlist', '"""a^\\\\dagger a"""'], {'mol_idx_set': '[2]'}), "(local_mlist, 'a^\\\\dagger a', mol_idx_set=[2])\n", (3088, 3134), False, 'from renormalizer.mps import Mpo, Mps\n'), ((3438, 3483), 'renormalizer.mps.Mpo.onsite', 'Mpo.onsite', (['local_mlist', '"""a"""'], {'mol_idx_set': '[2]'}), "(local_mlist, 'a', mol_idx_set=[2])\n", (3448, 3483), False, 'from renormalizer.mps import Mpo, Mps\n'), ((4474, 4511), 'renormalizer.mps.Mpo.ph_onsite', 'Mpo.ph_onsite', (['local_mlist', '"""b"""', '(1)', '(1)'], {}), "(local_mlist, 'b', 1, 1)\n", (4487, 4511), False, 'from renormalizer.mps import Mpo, Mps\n'), ((4526, 4571), 'renormalizer.mps.Mpo.intersite', 'Mpo.intersite', (['local_mlist', '{}', "{(1, 1): 'b'}"], {}), "(local_mlist, {}, {(1, 1): 'b'})\n", (4539, 4571), False, 'from renormalizer.mps import Mpo, Mps\n'), ((4674, 4711), 'renormalizer.mps.Mps.gs', 'Mps.gs', (['mol_list'], {'max_entangled': '(False)'}), '(mol_list, max_entangled=False)\n', (4680, 4711), False, 'from renormalizer.mps import Mpo, Mps\n'), ((4760, 4803), 'renormalizer.mps.Mpo.ph_onsite', 'Mpo.ph_onsite', (['mol_list', '"""b^\\\\dagger"""', '(0)', '(0)'], {}), "(mol_list, 'b^\\\\dagger', 0, 0)\n", (4773, 4803), False, 'from renormalizer.mps import Mpo, Mps\n'), ((4849, 4899), 'numpy.allclose', 'np.allclose', (['p1.ph_occupations', '[1, 0, 0, 0, 0, 0]'], {}), '(p1.ph_occupations, [1, 0, 0, 0, 0, 0])\n', (4860, 4899), True, 'import numpy as np\n'), ((4945, 4995), 'numpy.allclose', 'np.allclose', (['p2.ph_occupations', '[2, 0, 0, 0, 0, 0]'], {}), '(p2.ph_occupations, [2, 0, 0, 0, 0, 0])\n', (4956, 4995), True, 'import numpy as np\n'), ((5670, 5684), 'renormalizer.mps.Mpo', 'Mpo', (['mol_list1'], {}), '(mol_list1)\n', (5673, 5684), False, 'from renormalizer.mps import Mpo, Mps\n'), ((6096, 6152), 'renormalizer.model.MolList2.MolList_to_MolList2', 'MolList2.MolList_to_MolList2', (['mol_list1'], {'formula': 'formula'}), '(mol_list1, formula=formula)\n', (6124, 6152), False, 'from renormalizer.model import MolList, MolList2, ModelTranslator, Mol, Phonon\n'), ((6167, 6181), 'renormalizer.mps.Mpo', 'Mpo', (['mol_list1'], {}), '(mol_list1)\n', (6170, 6181), False, 'from renormalizer.mps import Mpo, Mps\n'), ((6529, 6567), 'renormalizer.model.MolList2.MolList_to_MolList2', 'MolList2.MolList_to_MolList2', (['mol_list'], {}), '(mol_list)\n', (6557, 6567), False, 'from renormalizer.model import MolList, MolList2, ModelTranslator, Mol, Phonon\n'), ((6601, 6652), 'renormalizer.mps.Mpo.onsite', 'Mpo.onsite', (['mol_list', '"""a^\\\\dagger"""'], {'mol_idx_set': '[0]'}), "(mol_list, 'a^\\\\dagger', mol_idx_set=[0])\n", (6611, 6652), False, 'from renormalizer.mps import Mpo, Mps\n'), ((6663, 6715), 'renormalizer.mps.Mpo.onsite', 'Mpo.onsite', (['mol_list2', '"""a^\\\\dagger"""'], {'mol_idx_set': '[0]'}), "(mol_list2, 'a^\\\\dagger', mol_idx_set=[0])\n", (6673, 6715), False, 'from renormalizer.mps import Mpo, Mps\n'), ((6957, 7006), 'renormalizer.mps.Mpo.onsite', 'Mpo.onsite', (['mol_list', '"""a^\\\\dagger a"""'], {'dipole': '(True)'}), "(mol_list, 'a^\\\\dagger a', dipole=True)\n", (6967, 7006), False, 'from renormalizer.mps import Mpo, Mps\n'), ((7017, 7067), 'renormalizer.mps.Mpo.onsite', 'Mpo.onsite', (['mol_list2', '"""a^\\\\dagger a"""'], {'dipole': '(True)'}), "(mol_list2, 'a^\\\\dagger a', dipole=True)\n", (7027, 7067), False, 'from renormalizer.mps import Mpo, Mps\n'), ((8014, 8057), 'renormalizer.mps.Mpo.ph_onsite', 'Mpo.ph_onsite', (['mol_list', '"""b^\\\\dagger"""', '(0)', '(0)'], {}), "(mol_list, 'b^\\\\dagger', 0, 0)\n", (8027, 8057), False, 'from renormalizer.mps import Mpo, Mps\n'), ((8068, 8112), 'renormalizer.mps.Mpo.ph_onsite', 'Mpo.ph_onsite', (['mol_list2', '"""b^\\\\dagger"""', '(0)', '(0)'], {}), "(mol_list2, 'b^\\\\dagger', 0, 0)\n", (8081, 8112), False, 'from renormalizer.mps import Mpo, Mps\n'), ((697, 713), 'pickle.load', 'pickle.load', (['fin'], {}), '(fin)\n', (708, 713), False, 'import pickle\n'), ((880, 894), 'renormalizer.utils.Quantity', 'Quantity', (['(3.33)'], {}), '(3.33)\n', (888, 894), False, 'from renormalizer.utils import Quantity, Op\n'), ((896, 907), 'renormalizer.utils.Quantity', 'Quantity', (['(1)'], {}), '(1)\n', (904, 907), False, 'from renormalizer.utils import Quantity, Op\n'), ((924, 935), 'renormalizer.utils.Quantity', 'Quantity', (['(0)'], {}), '(0)\n', (932, 935), False, 'from renormalizer.utils import Quantity, Op\n'), ((976, 988), 'renormalizer.utils.Quantity', 'Quantity', (['(17)'], {}), '(17)\n', (984, 988), False, 'from renormalizer.utils import Quantity, Op\n'), ((1463, 1491), 'pytest.approx', 'pytest.approx', (['mps.dmrg_norm'], {}), '(mps.dmrg_norm)\n', (1476, 1491), False, 'import pytest\n'), ((1495, 1511), 'pytest.approx', 'pytest.approx', (['(1)'], {}), '(1)\n', (1508, 1511), False, 'import pytest\n'), ((1564, 1578), 'renormalizer.utils.Quantity', 'Quantity', (['(3.33)'], {}), '(3.33)\n', (1572, 1578), False, 'from renormalizer.utils import Quantity, Op\n'), ((1580, 1591), 'renormalizer.utils.Quantity', 'Quantity', (['(1)'], {}), '(1)\n', (1588, 1591), False, 'from renormalizer.utils import Quantity, Op\n'), ((1609, 1620), 'renormalizer.utils.Quantity', 'Quantity', (['(0)'], {}), '(0)\n', (1617, 1620), False, 'from renormalizer.utils import Quantity, Op\n'), ((1641, 1652), 'renormalizer.utils.Quantity', 'Quantity', (['(0)'], {}), '(0)\n', (1649, 1652), False, 'from renormalizer.utils import Quantity, Op\n'), ((1693, 1705), 'renormalizer.utils.Quantity', 'Quantity', (['(17)'], {}), '(17)\n', (1701, 1705), False, 'from renormalizer.utils import Quantity, Op\n'), ((1741, 1753), 'renormalizer.utils.Quantity', 'Quantity', (['(17)'], {}), '(17)\n', (1749, 1753), False, 'from renormalizer.utils import Quantity, Op\n'), ((2636, 2653), 'pytest.approx', 'pytest.approx', (['e4'], {}), '(e4)\n', (2649, 2653), False, 'import pytest\n'), ((2847, 2860), 'renormalizer.utils.Quantity', 'Quantity', (['(1.0)'], {}), '(1.0)\n', (2855, 2860), False, 'from renormalizer.utils import Quantity, Op\n'), ((2962, 2989), 'pytest.approx', 'pytest.approx', (['(0)'], {'abs': '(1e-05)'}), '(0, abs=1e-05)\n', (2975, 2989), False, 'import pytest\n'), ((3052, 3065), 'renormalizer.utils.Quantity', 'Quantity', (['(1.0)'], {}), '(1.0)\n', (3060, 3065), False, 'from renormalizer.utils import Quantity, Op\n'), ((3169, 3196), 'pytest.approx', 'pytest.approx', (['(0)'], {'abs': '(1e-05)'}), '(0, abs=1e-05)\n', (3182, 3196), False, 'import pytest\n'), ((3259, 3272), 'renormalizer.utils.Quantity', 'Quantity', (['(0.5)'], {}), '(0.5)\n', (3267, 3272), False, 'from renormalizer.utils import Quantity, Op\n'), ((3318, 3345), 'pytest.approx', 'pytest.approx', (['(0)'], {'abs': '(1e-05)'}), '(0, abs=1e-05)\n', (3331, 3345), False, 'import pytest\n'), ((3412, 3425), 'renormalizer.utils.Quantity', 'Quantity', (['(1.0)'], {}), '(1.0)\n', (3420, 3425), False, 'from renormalizer.utils import Quantity, Op\n'), ((3530, 3557), 'pytest.approx', 'pytest.approx', (['(0)'], {'abs': '(1e-05)'}), '(0, abs=1e-05)\n', (3543, 3557), False, 'import pytest\n'), ((3714, 3730), 'renormalizer.mps.Mpo', 'Mpo', (['local_mlist'], {}), '(local_mlist)\n', (3717, 3730), False, 'from renormalizer.mps import Mpo, Mps\n'), ((3885, 3900), 'renormalizer.mps.Mpo', 'Mpo', (['test_mlist'], {}), '(test_mlist)\n', (3888, 3900), False, 'from renormalizer.mps import Mpo, Mps\n'), ((4361, 4376), 'renormalizer.mps.Mpo', 'Mpo', (['test_mlist'], {}), '(test_mlist)\n', (4364, 4376), False, 'from renormalizer.mps import Mpo, Mps\n'), ((4610, 4637), 'pytest.approx', 'pytest.approx', (['(0)'], {'abs': '(1e-06)'}), '(0, abs=1e-06)\n', (4623, 4637), False, 'import pytest\n'), ((5135, 5162), 'pytest.approx', 'pytest.approx', (['(0)'], {'abs': '(1e-05)'}), '(0, abs=1e-05)\n', (5148, 5162), False, 'import pytest\n'), ((5454, 5479), 'renormalizer.tests.parameter.mol_list.switch_scheme', 'mol_list.switch_scheme', (['(4)'], {}), '(4)\n', (5476, 5479), False, 'from renormalizer.tests.parameter import mol_list, ph_phys_dim, omega_quantities\n'), ((5272, 5296), 'renormalizer.tests.parameter_PBI.construct_mol', 'construct_mol', (['(10)', '(10)', '(0)'], {}), '(10, 10, 0)\n', (5285, 5296), False, 'from renormalizer.tests.parameter_PBI import construct_mol\n'), ((5998, 6023), 'renormalizer.tests.parameter.mol_list.switch_scheme', 'mol_list.switch_scheme', (['(4)'], {}), '(4)\n', (6020, 6023), False, 'from renormalizer.tests.parameter import mol_list, ph_phys_dim, omega_quantities\n'), ((5769, 5793), 'renormalizer.tests.parameter_PBI.construct_mol', 'construct_mol', (['(10)', '(10)', '(0)'], {}), '(10, 10, 0)\n', (5782, 5793), False, 'from renormalizer.tests.parameter_PBI import construct_mol\n'), ((7584, 7597), 'renormalizer.utils.Quantity', 'Quantity', (['(2.0)'], {}), '(2.0)\n', (7592, 7597), False, 'from renormalizer.utils import Quantity, Op\n'), ((7692, 7705), 'renormalizer.utils.Quantity', 'Quantity', (['(2.0)'], {}), '(2.0)\n', (7700, 7705), False, 'from renormalizer.utils import Quantity, Op\n'), ((8643, 8670), 'pytest.approx', 'pytest.approx', (['(0)'], {'abs': '(1e-05)'}), '(0, abs=1e-05)\n', (8656, 8670), False, 'import pytest\n'), ((609, 662), 'os.path.join', 'os.path.join', (['cur_dir', '"""test_exact_propagator.pickle"""'], {}), "(cur_dir, 'test_exact_propagator.pickle')\n", (621, 662), False, 'import os\n'), ((3987, 4023), 'renormalizer.utils.Quantity', 'Quantity', (['local_mlist.j_matrix[0, 2]'], {}), '(local_mlist.j_matrix[0, 2])\n', (3995, 4023), False, 'from renormalizer.utils import Quantity, Op\n'), ((4110, 4146), 'renormalizer.utils.Quantity', 'Quantity', (['local_mlist.j_matrix[0, 2]'], {}), '(local_mlist.j_matrix[0, 2])\n', (4118, 4146), False, 'from renormalizer.utils import Quantity, Op\n'), ((4201, 4228), 'pytest.approx', 'pytest.approx', (['(0)'], {'abs': '(1e-06)'}), '(0, abs=1e-06)\n', (4214, 4228), False, 'import pytest\n'), ((4282, 4309), 'pytest.approx', 'pytest.approx', (['(0)'], {'abs': '(1e-06)'}), '(0, abs=1e-06)\n', (4295, 4309), False, 'import pytest\n'), ((4432, 4459), 'pytest.approx', 'pytest.approx', (['(0)'], {'abs': '(1e-06)'}), '(0, abs=1e-06)\n', (4445, 4459), False, 'import pytest\n'), ((5042, 5076), 'renormalizer.mps.Mpo.ph_onsite', 'Mpo.ph_onsite', (['mol_list', '"""b"""', '(0)', '(0)'], {}), "(mol_list, 'b', 0, 0)\n", (5055, 5076), False, 'from renormalizer.mps import Mpo, Mps\n'), ((5606, 5656), 'renormalizer.utils.Quantity', 'Quantity', (['(-mol_list1[0].gs_zpe * mol_list1.mol_num)'], {}), '(-mol_list1[0].gs_zpe * mol_list1.mol_num)\n', (5614, 5656), False, 'from renormalizer.utils import Quantity, Op\n'), ((6250, 6298), 'renormalizer.utils.Quantity', 'Quantity', (['(-mol_list[0].gs_zpe * mol_list.mol_num)'], {}), '(-mol_list[0].gs_zpe * mol_list.mol_num)\n', (6258, 6298), False, 'from renormalizer.utils import Quantity, Op\n'), ((6393, 6440), 'renormalizer.utils.Quantity', 'Quantity', (['(mol_list[0].gs_zpe * mol_list.mol_num)'], {}), '(mol_list[0].gs_zpe * mol_list.mol_num)\n', (6401, 6440), False, 'from renormalizer.utils import Quantity, Op\n'), ((2028, 2044), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (2036, 2044), True, 'import numpy as np\n'), ((2079, 2098), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (2087, 2098), True, 'import numpy as np\n'), ((2134, 2158), 'numpy.array', 'np.array', (['[0.707, 0.707]'], {}), '([0.707, 0.707])\n', (2142, 2158), True, 'import numpy as np\n'), ((2193, 2209), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (2201, 2209), True, 'import numpy as np\n'), ((2346, 2362), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (2354, 2362), True, 'import numpy as np\n'), ((2397, 2413), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (2405, 2413), True, 'import numpy as np\n'), ((2448, 2464), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (2456, 2464), True, 'import numpy as np\n'), ((2499, 2523), 'numpy.array', 'np.array', (['[0.707, 0.707]'], {}), '([0.707, 0.707])\n', (2507, 2523), True, 'import numpy as np\n'), ((2558, 2574), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (2566, 2574), True, 'import numpy as np\n'), ((6824, 6843), 'renormalizer.utils.Op', 'Op', (['"""a^\\\\dagger"""', '(0)'], {}), "('a^\\\\dagger', 0)\n", (6826, 6843), False, 'from renormalizer.utils import Quantity, Op\n'), ((7168, 7189), 'renormalizer.utils.Op', 'Op', (['"""a^\\\\dagger a"""', '(0)'], {}), "('a^\\\\dagger a', 0)\n", (7170, 7189), False, 'from renormalizer.utils import Quantity, Op\n'), ((7246, 7267), 'renormalizer.utils.Op', 'Op', (['"""a^\\\\dagger a"""', '(0)'], {}), "('a^\\\\dagger a', 0)\n", (7248, 7267), False, 'from renormalizer.utils import Quantity, Op\n'), ((7325, 7346), 'renormalizer.utils.Op', 'Op', (['"""a^\\\\dagger a"""', '(0)'], {}), "('a^\\\\dagger a', 0)\n", (7327, 7346), False, 'from renormalizer.utils import Quantity, Op\n'), ((7817, 7836), 'renormalizer.utils.Op', 'Op', (['"""a^\\\\dagger"""', '(1)'], {}), "('a^\\\\dagger', 1)\n", (7819, 7836), False, 'from renormalizer.utils import Quantity, Op\n'), ((7837, 7848), 'renormalizer.utils.Op', 'Op', (['"""a"""', '(-1)'], {}), "('a', -1)\n", (7839, 7848), False, 'from renormalizer.utils import Quantity, Op\n'), ((7862, 7881), 'renormalizer.utils.Op', 'Op', (['"""b^\\\\dagger"""', '(0)'], {}), "('b^\\\\dagger', 0)\n", (7864, 7881), False, 'from renormalizer.utils import Quantity, Op\n'), ((8227, 8246), 'renormalizer.utils.Op', 'Op', (['"""b^\\\\dagger"""', '(0)'], {}), "('b^\\\\dagger', 0)\n", (8229, 8246), False, 'from renormalizer.utils import Quantity, Op\n')] |
# @<NAME>, SRA 2019
# This Foolbox module has been modified to reflect our "augmented"
# projection algorithm
# Please replace the "iterative_projected_gradient.py" file under
# foolbox Python library directory
from __future__ import division
import numpy as np
from abc import abstractmethod
import logging
import warnings
import os
from bs4 import BeautifulSoup
# for computing distance after each projection
from scipy.spatial import distance
from .base import Attack
from .base import call_decorator
from .. import distances
from ..utils import crossentropy
from .. import nprng
from ..distances import Distance
from ..distances import MSE
from ..map_back import compute_x_after_mapping_back
from ..perturb_html import featureMapbacks
from ..classify import predict
from ..classify import setup_clf, setup_one_hot
from ..classify import read_one_hot_feature_list
import matplotlib.pyplot as plt
import math
import json
from urllib.parse import urlsplit
from urllib.parse import urlparse
LABLE = {"AD": 1, "NONAD": 0}
NORM_MAP = {
301: 59,
302: 60,
303: 61,
304: 62,
305: 63,
306: 64,
307: 65,
309: 67,
310: 68,
311: 69,
}
HOME_DIR = os.getenv("HOME")
class IterativeProjectedGradientBaseAttack(Attack):
"""Base class for iterative (projected) gradient attacks.
Concrete subclasses should implement __call__, _gradient
and _clip_perturbation.
TODO: add support for other loss-functions, e.g. the CW loss function,
see https://github.com/MadryLab/mnist_challenge/blob/master/pgd_attack.py
"""
def __init__(self, *args, **kwargs):
def _reduce_url_to_domain(url):
html_filename = url.split('/')[-1]
html_filename = html_filename.split('_')[-1]
html_filename = html_filename.strip('.source')
html_filename = html_filename.strip('.html')
return html_filename
super(IterativeProjectedGradientBaseAttack, self).__init__(*args, **kwargs)
self.BASE_CRAWLED_DIR = HOME_DIR + "/rendering_stream/html"
self.BASE_HTML_DIR = HOME_DIR + "/rendering_stream/html"
self.BASE_EVAL_HTML_DIR = HOME_DIR + "/rendering_stream/eval_html"
self.all_html_filepaths = os.listdir(self.BASE_CRAWLED_DIR)
self.BASE_MAPPING_DIR = HOME_DIR + "/rendering_stream/mappings"
self.BASE_TIMELINE_DIR = HOME_DIR + "/rendering_stream/timeline"
self.BASE_DATA_DIR = HOME_DIR + "/attack-adgraph-pipeline/data"
self.BASE_MODEL_DIR = HOME_DIR + "/attack-adgraph-pipeline/model"
self.original_dataset_fpath = self.BASE_DATA_DIR + '/dataset_1203.csv'
self.final_domain_to_original_domain_mapping_fpath = HOME_DIR + "/map_local_list_unmod_new.csv"
one_hot_feature_list = read_one_hot_feature_list(self.original_dataset_fpath)
events = sorted(list(one_hot_feature_list["FEATURE_NODE_CATEGORY"]))
tag_1 = sorted(list(one_hot_feature_list["FEATURE_FIRST_PARENT_TAG_NAME"]))
tag_2 = sorted(
list(one_hot_feature_list["FEATURE_FIRST_PARENT_SIBLING_TAG_NAME"]))
tag_3 = sorted(list(one_hot_feature_list["FEATURE_SECOND_PARENT_TAG_NAME"]))
tag_4 = sorted(
list(one_hot_feature_list["FEATURE_SECOND_PARENT_SIBLING_TAG_NAME"]))
setup_one_hot(events, tag_1, tag_2, tag_3, tag_4)
self.final_domain_to_original_domain_mapping = {}
self.original_domain_to_final_domain_mapping = {}
with open(self.final_domain_to_original_domain_mapping_fpath, 'r') as fin:
data = fin.readlines()
for row in data:
row = row.strip()
original_domain, final_url = row.split(',', 1)
final_domain = urlparse(final_url)[1]
self.final_domain_to_original_domain_mapping[final_domain] = original_domain
self.original_domain_to_final_domain_mapping[original_domain] = final_domain
@abstractmethod
def _gradient(self, a, x, class_, strict=True):
raise NotImplementedError
@abstractmethod
def _clip_perturbation(self, a, noise, epsilon):
raise NotImplementedError
@abstractmethod
def _check_distance(self, a):
raise NotImplementedError
def _compute_l2_distance(self, x1, x2):
return distance.cdist(x1, x2, 'euclidean')
def _compute_lp_distance(self, x1, x2):
diff = np.array(x1) - np.array(x2)
value = np.max(np.abs(diff)).astype(np.float64)
return value
def _generate_constrained_perturbation(self, perturbation,
perturbable_idx_set,
only_increase_idx_set,
debug=False):
for i in range(len(perturbation)):
if i not in perturbable_idx_set:
perturbation[i] = 0.0
if i in only_increase_idx_set and perturbation[i] < 0.0:
perturbation[i] = -perturbation[i]
return perturbation
def _unscale_feature(self, val, stats, is_float=False):
[maxn, minn] = stats
maxn, minn = float(maxn), float(minn)
if is_float:
return val * (maxn + minn) + minn
else:
return int(round(val * (maxn + minn) + minn))
def _rescale_feature(self, val, stats):
[maxn, minn] = stats
maxn, minn = float(maxn), float(minn)
if maxn == minn:
return val
return (val - minn) / (maxn - minn)
def _calculate_diff(self, ori, per, stats):
return self._unscale_feature(per, stats) - self._unscale_feature(ori, stats)
def _reject_imperturbable_features(self, candidate,
original, perturbable_idx_set,
debug=False):
assert len(candidate) == len(original), "[ERROR] Lengths of two input arrays not equal!"
rejected_candidate = []
for i in range(len(candidate)):
if i in perturbable_idx_set:
rejected_candidate.append(candidate[i])
else:
rejected_candidate.append(original[i])
return np.array(rejected_candidate)
def _reject_only_increase_features(self, candidate,
original, only_increase_idx_set,
debug=False):
assert len(candidate) == len(original), "[ERROR] Lengths of two input arrays not equal!"
rejected_candidate = []
for i in range(len(candidate)):
if i in only_increase_idx_set and candidate[i] < original[i]:
rejected_candidate.append(original[i])
else:
rejected_candidate.append(candidate[i])
return np.array(rejected_candidate)
def _legalize_candidate(self, candidate,
original,
feature_types,
debug=False):
if debug:
print("[INFO] Entered candidate legalization method!")
print(feature_types)
input("Press Enter to continue...")
adv_x = np.copy(candidate)
processed_adv_x = np.copy(candidate)
for i in range(len(adv_x)):
adv_val = adv_x[i]
ori_val = original[i]
processed_adv_val = None
if feature_types[i] == 'b':
if abs(adv_val - 1.0) > abs(adv_val - 0.0):
processed_adv_val = 1
else:
processed_adv_val = 0
if processed_adv_val is not None:
processed_adv_x[i] = processed_adv_val
lookahead_cnt = 0
for i in range(len(adv_x)):
if lookahead_cnt - 1 > 0:
lookahead_cnt -= 1
continue
if feature_types[i] == 'c':
j = i
while feature_types[j] == 'c' and j + 1 < len(adv_x):
j += 1
categorical_interval_end = j
maxn = -10000
maxn_idx = i
for j in range(i, categorical_interval_end):
if adv_x[j] > maxn:
maxn = adv_x[j]
maxn_idx = j
for j in range(i, categorical_interval_end):
if j == maxn_idx:
processed_adv_val = 1
else:
processed_adv_val = 0
processed_adv_x[j] = processed_adv_val
lookahead_cnt += 1
legalized_candidate = processed_adv_x
return legalized_candidate
def _get_mode_and_class(self, a):
# determine if the attack is targeted or not
target_class = a.target_class()
targeted = target_class is not None
if targeted:
class_ = target_class
else:
class_ = a.original_class
return targeted, class_
def _run(self, a, binary_search,
epsilon, stepsize, iterations,
random_start, return_early,
perturbable_idx_set, only_increase_idx_set,
feature_defs, normalization_ratios,
enforce_interval, request_id, model, browser_id,
map_back_mode, feature_idx_map, logger):
if not a.has_gradient():
warnings.warn('applied gradient-based attack to model that'
' does not provide gradients')
return
self._check_distance(a)
targeted, class_ = self._get_mode_and_class(a)
if binary_search:
if isinstance(binary_search, bool):
k = 20
else:
k = int(binary_search)
return self._run_binary_search(
a, epsilon, stepsize, iterations,
random_start, targeted, class_, return_early, k=k,
perturbable_idx_set=perturbable_idx_set,
only_increase_idx_set=only_increase_idx_set,
feature_defs=feature_defs,
normalization_ratios=normalization_ratios,
enforce_interval=enforce_interval,
request_id=request_id)
else:
return self._run_one(
a, epsilon, stepsize, iterations,
random_start, targeted, class_, return_early,
perturbable_idx_set=perturbable_idx_set,
only_increase_idx_set=only_increase_idx_set,
feature_defs=feature_defs,
normalization_ratios=normalization_ratios,
enforce_interval=enforce_interval,
request_id=request_id, model=model, browser_id=browser_id,
map_back_mode=map_back_mode,
feature_idx_map=feature_idx_map, logger=logger)
def _get_geometric_enforce_interval(self, base, iteations, i):
progress = float(i + 1) / iteations
if progress < 0.1:
growth_ratio = 1
elif progress >= 0.1 and progress < 0.5:
growth_ratio = 3
else:
growth_ratio = 10
grown = base * growth_ratio
return grown
def _run_binary_search(self, a, epsilon, stepsize, iterations,
random_start, targeted, class_, return_early, k,
perturbable_idx_set, only_increase_idx_set, feature_defs,
normalization_ratios, enforce_interval, request_id):
factor = stepsize / epsilon
def try_epsilon(epsilon):
stepsize = factor * epsilon
return self._run_one(
a, epsilon, stepsize, iterations,
random_start, targeted, class_, return_early,
perturbable_idx_set, only_increase_idx_set, feature_defs,
normalization_ratios, enforce_interval, request_id)
for i in range(k):
if try_epsilon(epsilon):
logging.info('successful for eps = {}'.format(epsilon))
break
logging.info('not successful for eps = {}'.format(epsilon))
epsilon = epsilon * 1.5
else:
logging.warning('exponential search failed')
return
bad = 0
good = epsilon
for i in range(k):
epsilon = (good + bad) / 2
if try_epsilon(epsilon):
good = epsilon
logging.info('successful for eps = {}'.format(epsilon))
else:
bad = epsilon
logging.info('not successful for eps = {}'.format(epsilon))
def _is_diff_zero(self, diff):
for feature in diff:
if abs(feature) > 0.00001:
return False
return True
def _get_diff(self, x, original, perturbable_idx_set, normalization_ratios,
debug=False):
if debug:
print(original[0], x[0])
print(original[1], x[1])
delta = {}
for idx in list(perturbable_idx_set):
if normalization_ratios[idx]['type'] == 'B':
diff = float(x[idx]) - float(original[idx])
else:
diff = self._calculate_diff(original[idx], x[idx], normalization_ratios[idx]['val'])
delta[idx] = diff
return delta
def _reverse_a_map(self, map):
reversed = {}
for feature_name, feature_id in map.items():
reversed[feature_id] = feature_name
return reversed
def _read_curr_html(self, domain, read_modified):
print("Reading HTML: %s" % domain)
if read_modified:
with open(self.BASE_CRAWLED_DIR + "/modified_" + domain + '.html', "r") as fin:
curr_html = BeautifulSoup(fin, features="html.parser")
else:
with open(self.BASE_CRAWLED_DIR + "/" + domain + '.html', "r") as fin:
curr_html = BeautifulSoup(fin, features="html.parser")
return curr_html, domain + '.html'
def _get_url_from_url_id(
self,
final_domain,
target_url_id
):
with open(self.BASE_MAPPING_DIR + '/' + self.final_domain_to_original_domain_mapping[final_domain] + '.csv') as fin:
data = fin.readlines()
for row in data:
row = row.strip()
url_id, url = row.split(',', 1)
if url_id == target_url_id:
return url
return None
def _read_url_id_to_url_mapping(
self,
final_domain
):
url_id_to_url_mapping = {}
with open(self.BASE_MAPPING_DIR + '/' + self.final_domain_to_original_domain_mapping[final_domain] + '.csv') as fin:
data = fin.readlines()
for row in data:
row = row.strip()
url_id, url = row.split(',', 1)
url_id_to_url_mapping[url_id] = url
return url_id_to_url_mapping
def _get_x_after_mapping_back(
self,
domain,
final_domain,
url_id,
diff,
browser_id,
working_dir="~/AdGraphAPI/scripts",
feature_idx_map=None,
first_time=False
):
reversed_feature_idx_map = self._reverse_a_map(feature_idx_map)
html, html_fname = self._read_curr_html(domain, read_modified=False)
if first_time:
if not os.path.isfile(self.BASE_TIMELINE_DIR + '/' + domain + '.json'):
cmd = "python3 ~/AdGraphAPI/scripts/load_page_adgraph.py --domain %s --id %s --final-domain %s --mode proxy" % (domain, browser_id, final_domain)
os.system(cmd)
cmd = "python ~/AdGraphAPI/scripts/rules_parser.py --target-dir ~/rendering_stream/timeline --domain %s" % domain
os.system(cmd)
cmd = "~/AdGraphAPI/adgraph ~/rendering_stream/ features/ mappings/ %s parsed_%s" % (domain, domain)
os.system(cmd)
self._url_id_to_url_mapping = self._read_url_id_to_url_mapping(final_domain)
url = self._url_id_to_url_mapping[url_id]
original_url = url
at_least_one_diff_success = False
new_html = None
for feature_id, delta in diff.items():
new_html, modified_url = featureMapbacks(
name=reversed_feature_idx_map[feature_id],
html=html,
url=url,
delta=delta,
domain=final_domain
)
if new_html is None:
continue
else:
html = new_html
url = modified_url
at_least_one_diff_success = True
if not at_least_one_diff_success:
print("[ERROR] No diff was successfully mapped back!")
raise Exception
# Write back to HTML file after circulating all outstanding perturbations in this iteration
mapped_x, mapped_unnormalized_x = compute_x_after_mapping_back(
domain,
url_id,
html,
html_fname,
working_dir=working_dir,
browser_id=browser_id,
final_domain=final_domain
)
return mapped_x, mapped_unnormalized_x, original_url, url
def _deprocess_x(self, x, feature_types, verbal=False):
FEATURE_TYPES = {'F', 'B', 'C', 'S', 'D', 'L', 'FF'}
def deprocess_float_feature(val, ratio, ffloat=True):
val = float(val)
maxn = float(ratio[0])
minn = float(ratio[1])
if ffloat:
return float(val * (maxn - minn) + minn)
else:
return round(val * (maxn - minn) + minn)
def deprocess_nominal_feature(val, category_name):
val = float(val)
if val == 1.0:
return category_name
elif val == 0.0:
return None
else:
print("[ERROR] WTF? val: %s %s" % (str(val), category_name))
raise Exception
def deprocess_shift_feature(val, offset):
val = float(val)
offset = float(offset)
return int(math.ceil(val + offset))
features = x
deprocessed_features = []
for j in range(len(feature_types)):
assert feature_types[j]['type'] in FEATURE_TYPES, "[ERROR] Feature type not supported!"
if feature_types[j]['type'] == 'F':
ratio = feature_types[j]['val']
deprocessed_features.append(
deprocess_float_feature(features[j], ratio, ffloat=False))
elif feature_types[j]['type'] == 'FF':
ratio = feature_types[j]['val']
deprocessed_features.append(
deprocess_float_feature(features[j], ratio, ffloat=True))
elif feature_types[j]['type'] == 'C':
category_name = feature_types[j]['val']
new_val = deprocess_nominal_feature(features[j], category_name)
if new_val is not None:
deprocessed_features.append(new_val)
elif feature_types[j]['type'] == 'S':
offset = feature_types[j]['val']
deprocessed_features.append(
deprocess_shift_feature(features[j], offset))
elif feature_types[j]['type'] == 'B':
val = features[j]
deprocessed_features.append(int(float(val)))
elif feature_types[j]['type'] == 'D':
val = features[j]
deprocessed_features.append(val)
# label column
elif feature_types[j]['type'] == 'L':
label = features[j]
deprocessed_features.append(label)
else:
print("???")
return deprocessed_features
def _compare_x(self, x1, x2, tags=["X1", "X2"], it=None, X3=None):
assert len(x1) == len(x2), "[ERROR] Two inputs have different sizes!"
if it is not None:
print("Iter #%d" % it)
for i in range(len(x1)):
if x1[i] != x2[i]:
if X3 is not None:
print("i:", i, tags[0], ":", x1[i], tags[1], ":", x2[i], "original:", X3[i])
else:
print("i:", i, tags[0], ":", x1[i], tags[1], ":", x2[i])
def _retrain_local_model(self, model, x, y):
def __generate_mini_batch(x, y, whole_train_set):
# import random
train_set = whole_train_set.sample(n=50)
train_y = train_set.pop('CLASS').to_numpy().tolist()
train_x = train_set.to_numpy().tolist()
for _ in range(50):
train_x.append(x)
train_y.append(y)
return np.array(train_x), np.array(train_y)
train_x, train_y = __generate_mini_batch(x, y, self._train_data_set)
model.fit(
x=train_x,
y=train_y,
batch_size=100,
epochs=1
)
def _get_label(self, logits):
if logits[1] > logits[0]:
return "AD"
else:
return "NONAD"
def _run_one(self, a, epsilon, stepsize, iterations,
random_start, targeted, class_, return_early, model, browser_id,
perturbable_idx_set=None, only_increase_idx_set=None,
feature_defs=None, normalization_ratios=None,
enforce_interval=1, request_id="URL_dummy", dynamic_interval=False,
debug=False, draw=False, map_back_mode=True, remote_model=True,
check_correctness=False, check_init_correctness=True, feature_idx_map=None, logger=None):
if draw:
x_axis, y_l2, y_lf = [], [], []
domain, url_id = request_id.split(',')
final_domain = urlparse(domain)[1]
original_domain = self.final_domain_to_original_domain_mapping[final_domain]
min_, max_ = a.bounds()
s = max_ - min_
original = a.unperturbed.copy()
if random_start:
# using uniform noise even if the perturbation clipping uses
# a different norm because cleverhans does it the same way
noise = nprng.uniform(
-epsilon * s, epsilon * s, original.shape).astype(
original.dtype)
x = original + self._clip_perturbation(a, noise, epsilon)
strict = False # because we don't enforce the bounds here
else:
x = original
strict = True # we don't care about the bounds because we are not attacking image clf
success_cent = False
if enforce_interval == iterations - 1:
only_post_process = True
else:
only_post_process = False
is_first_iter = True
for i in range(iterations):
if dynamic_interval:
curr_interval = self._get_geometric_enforce_interval(enforce_interval, iterations, i)
else:
curr_interval = enforce_interval
if i != 0 and (i % curr_interval == 0 or i == iterations - 1):
should_enforce_policy = True
else:
should_enforce_policy = False
gradient = self._gradient(a, x, class_, strict=strict)
# non-strict only for the first call and
# only if random_start is True
if targeted:
gradient = -gradient
# untargeted: gradient ascent on cross-entropy to original class
# targeted: gradient descent on cross-entropy to target class
# (this is the actual attack step)
x = x + stepsize * gradient
if should_enforce_policy:
x_before_projection = x.copy()
# phase 1: reject disallowed perturbations by changing feature values
# back to original
if only_post_process:
if should_enforce_policy and perturbable_idx_set is not None:
x = self._reject_imperturbable_features(
x,
original,
perturbable_idx_set)
else:
if perturbable_idx_set is not None:
x = self._reject_imperturbable_features(
x,
original,
perturbable_idx_set)
# phase 1: reject decreasing perturbations by changing only-increase
# feature values back to original
if should_enforce_policy and only_increase_idx_set is not None:
x = self._reject_only_increase_features(
x,
original,
only_increase_idx_set)
# phase 2: change values back to allowed ranges
if should_enforce_policy and feature_defs is not None:
x = self._legalize_candidate(
x,
original,
feature_defs)
if should_enforce_policy:
l2_dist = self._compute_l2_distance([x_before_projection], [x])
lf_dist = self._compute_lp_distance([x_before_projection], [x])
if draw:
x_axis.append(i)
y_l2.append(l2_dist[0])
y_lf.append(lf_dist)
if debug:
print("Step #%d, L2 distance: %f | LP distance: %f" % (i, l2_dist, lf_dist))
x = original + self._clip_perturbation(a, x - original, original, epsilon, feature_defs)
x = np.clip(x, min_, max_)
if should_enforce_policy and map_back_mode:
diff = self._get_diff(
x,
original,
perturbable_idx_set,
normalization_ratios,
)
print("Delta at iter #%d: %s" % (i, str(diff)))
print("Domain: %s" % original_domain)
print("URL ID: %s" % url_id)
x_before_mapping_back = x.copy()
try:
x_cent, unnorm_x_cent, original_url, url = self._get_x_after_mapping_back(
original_domain,
final_domain,
url_id,
diff,
browser_id=browser_id,
feature_idx_map=feature_idx_map,
first_time=is_first_iter
)
except Exception as err:
print("Error occured mapping: %s" % err)
return False
is_first_iter = False
del unnorm_x_cent[-1] # remove label
for j in range(len(x_cent)):
if j in diff:
if j not in NORM_MAP:
continue
if diff[j] == 1.0:
x_cent[j] = 1.0
unnorm_x_cent[NORM_MAP[j]] = "1"
if diff[j] == -1.0:
x_cent[j] = 0.0
unnorm_x_cent[NORM_MAP[j]] = "0"
print("unnorm_x_cent:", unnorm_x_cent)
mapping_diff = self._get_diff(
x_cent,
original,
perturbable_idx_set,
normalization_ratios,
)
print("Delta between before and after mapping-back: %s" % mapping_diff)
if self._is_diff_zero(mapping_diff):
print("[ERROR] Xs before and after mapping back did not change!")
return False
if should_enforce_policy and remote_model and not map_back_mode:
unnorm_x = self._deprocess_x(x, normalization_ratios)
unnorm_unperturbed = self._deprocess_x(original, normalization_ratios)
if i == 0 and check_init_correctness:
unnorm_unperturbed = self._deprocess_x(original, normalization_ratios)
prediction_unperturbed_local = self._get_label(model.predict(np.array([original]))[0])
prediction_unperturbed_remote = predict(unnorm_unperturbed, self._remote_model)[0]
if prediction_unperturbed_local == prediction_unperturbed_remote == "AD":
print("Sanity check passed!")
else:
print("Local:", prediction_unperturbed_local)
print("Remote:", prediction_unperturbed_remote)
print("Sanity check failed!")
return False
if should_enforce_policy:
logits, is_adversarial = a.forward_one(x)
if logging.getLogger().isEnabledFor(logging.DEBUG):
if targeted:
ce = crossentropy(a.original_class, logits)
logging.debug('crossentropy to {} is {}'.format(
a.original_class, ce))
ce = crossentropy(class_, logits)
logging.debug('crossentropy to {} is {}'.format(class_, ce))
# Use X before mapping back as the starting point for next iteration
# as there might be feature perturbations that we cannot incoperate in
# feature space. That is, we should only "descend" by changing "perturbable"
# features in a "legitimate" fashion
if remote_model:
if map_back_mode:
prediction_remote_cent = predict(unnorm_x_cent, self._remote_model)[0]
else:
prediction_remote = predict(unnorm_x, self._remote_model)[0]
prediction_original = predict(unnorm_unperturbed, self._remote_model)[0]
if map_back_mode:
prediction_local_cent = self._get_label(model.predict(np.array([x_cent]))[0])
else:
prediction_local = self._get_label(model.predict(np.array([x]))[0])
if not only_post_process:
if map_back_mode:
retrain_cnt_cent = 0
print("Remote (cent): %s / local (cent): %s" %
(prediction_remote_cent, prediction_local_cent))
while prediction_remote_cent != prediction_local_cent and retrain_cnt_cent < 10:
retrain_cnt_cent += 1
self._retrain_local_model(model, x_cent, LABLE[prediction_remote_cent])
prediction_local = self._get_label(model.predict(np.array([x_cent]))[0])
print("(cent) iter #%d, has retrained %d time(s)" % (i, retrain_cnt_cent))
else:
retrain_cnt = 0
print("Remote: %s / local: %s" % (prediction_remote, prediction_local))
while prediction_remote != prediction_local and retrain_cnt < 10:
retrain_cnt += 1
self._retrain_local_model(model, x, LABLE[prediction_remote])
prediction_local = self._get_label(model.predict(np.array([x]))[0])
print("iter #%d, has retrained %d time(s)" % (i, retrain_cnt))
if map_back_mode:
if prediction_original == "AD" and prediction_remote_cent == "NONAD":
success_cent = True
if success_cent:
msg = "SUCCESS, iter_%d, %s, %s, %s, %s, %s, %s" % (i, original_domain, final_domain, str(mapping_diff), url_id, original_url, url)
print(msg)
logger.info(msg)
cmd = "cp %s %s" % (
self.BASE_HTML_DIR + '/' + original_domain + '.html',
self.BASE_EVAL_HTML_DIR + '/original_' + original_domain + '.html'
)
os.system(cmd)
cmd = "cp %s %s" % (
self.BASE_HTML_DIR + '/modified_' + original_domain + '.html',
self.BASE_EVAL_HTML_DIR + '/' + original_domain + '_' + url_id + '.html'
)
os.system(cmd)
return True
x = x_before_mapping_back
else:
if prediction_original == "AD" and prediction_remote == "NONAD":
msg = "SUCCESS, iter_%d, %s, %s, %s" % (i, domain, url_id, str(diff))
print(msg)
logger.info(msg)
return True
if draw:
plt.plot(x_axis, y_l2, linewidth=3)
plt.plot(x_axis, y_lf, linewidth=3)
plt.show()
msg = "FAIL, iter_%d, %s, %s, %s, %s, %s, %s" % (i, original_domain, final_domain, str(mapping_diff), url_id, original_url, url)
print(msg)
logger.info(msg)
return False
class LinfinityGradientMixin(object):
def _gradient(self, a, x, class_, strict=True):
gradient = a.gradient_one(x, class_, strict=strict)
gradient = np.sign(gradient)
min_, max_ = a.bounds()
gradient = (max_ - min_) * gradient
return gradient
class L1GradientMixin(object):
def _gradient(self, a, x, class_, strict=True):
gradient = a.gradient_one(x, class_, strict=strict)
# using mean to make range of epsilons comparable to Linf
gradient = gradient / np.mean(np.abs(gradient))
min_, max_ = a.bounds()
gradient = (max_ - min_) * gradient
return gradient
class L2GradientMixin(object):
def _gradient(self, a, x, class_, strict=True):
gradient = a.gradient_one(x, class_, strict=strict)
# using mean to make range of epsilons comparable to Linf
gradient = gradient / np.sqrt(np.mean(np.square(gradient)))
min_, max_ = a.bounds()
gradient = (max_ - min_) * gradient
return gradient
class LinfinityClippingMixin(object):
def _clip_perturbation(self, a, perturbation, original, epsilon, feature_defs):
_LOCAL_EPSILON = 0.8 # hard-coded for now
_LOCAL_FEATURE_IDX_SET = {0, 1} # hard-coded for now as well
original_perturbation = perturbation.copy()
min_, max_ = a.bounds()
s = max_ - min_
clipped = np.clip(perturbation, -epsilon * s, epsilon * s)
for i in range(len(clipped)):
if feature_defs[i] in {"b", "c"}:
clipped[i] = original_perturbation[i]
if i in _LOCAL_FEATURE_IDX_SET:
s_local = original[i]
offset = min(epsilon * s, _LOCAL_EPSILON * s_local) # use the smaller offset
clipped[i] = np.clip([original_perturbation[i]], -offset, offset)[0]
return clipped
class L1ClippingMixin(object):
def _clip_perturbation(self, a, perturbation, epsilon):
# using mean to make range of epsilons comparable to Linf
norm = np.mean(np.abs(perturbation))
norm = max(1e-12, norm) # avoid divsion by zero
min_, max_ = a.bounds()
s = max_ - min_
# clipping, i.e. only decreasing norm
factor = min(1, epsilon * s / norm)
return perturbation * factor
class L2ClippingMixin(object):
def _clip_perturbation(self, a, perturbation, epsilon):
# using mean to make range of epsilons comparable to Linf
norm = np.sqrt(np.mean(np.square(perturbation)))
norm = max(1e-12, norm) # avoid divsion by zero
min_, max_ = a.bounds()
s = max_ - min_
# clipping, i.e. only decreasing norm
factor = min(1, epsilon * s / norm)
# input(str(perturbation * factor))
return perturbation * factor
class LinfinityDistanceCheckMixin(object):
def _check_distance(self, a):
if not isinstance(a.distance, distances.Linfinity):
logging.warning('Running an attack that tries to minimize the'
' Linfinity norm of the perturbation without'
' specifying foolbox.distances.Linfinity as'
' the distance metric might lead to suboptimal'
' results.')
class L1DistanceCheckMixin(object):
def _check_distance(self, a):
if not isinstance(a.distance, distances.MAE):
logging.warning('Running an attack that tries to minimize the'
' L1 norm of the perturbation without'
' specifying foolbox.distances.MAE as'
' the distance metric might lead to suboptimal'
' results.')
class L2DistanceCheckMixin(object):
def _check_distance(self, a):
if not isinstance(a.distance, distances.MSE):
logging.warning('Running an attack that tries to minimize the'
' L2 norm of the perturbation without'
' specifying foolbox.distances.MSE as'
' the distance metric might lead to suboptimal'
' results.')
class LinfinityBasicIterativeAttack(
LinfinityGradientMixin,
LinfinityClippingMixin,
LinfinityDistanceCheckMixin,
IterativeProjectedGradientBaseAttack):
"""The Basic Iterative Method introduced in [1]_.
This attack is also known as Projected Gradient
Descent (PGD) (without random start) or FGMS^k.
References
----------
.. [1] <NAME>, <NAME>, <NAME>,
"Adversarial examples in the physical world",
https://arxiv.org/abs/1607.02533
.. seealso:: :class:`ProjectedGradientDescentAttack`
"""
@call_decorator
def __call__(self, input_or_adv, label=None, unpack=True,
binary_search=True,
epsilon=0.3,
stepsize=0.05,
iterations=10,
random_start=False,
return_early=True):
"""Simple iterative gradient-based attack known as
Basic Iterative Method, Projected Gradient Descent or FGSM^k.
Parameters
----------
input_or_adv : `numpy.ndarray` or :class:`Adversarial`
The original, unperturbed input as a `numpy.ndarray` or
an :class:`Adversarial` instance.
label : int
The reference label of the original input. Must be passed
if `a` is a `numpy.ndarray`, must not be passed if `a` is
an :class:`Adversarial` instance.
unpack : bool
If true, returns the adversarial input, otherwise returns
the Adversarial object.
binary_search : bool or int
Whether to perform a binary search over epsilon and stepsize,
keeping their ratio constant and using their values to start
the search. If False, hyperparameters are not optimized.
Can also be an integer, specifying the number of binary
search steps (default 20).
epsilon : float
Limit on the perturbation size; if binary_search is True,
this value is only for initialization and automatically
adapted.
stepsize : float
Step size for gradient descent; if binary_search is True,
this value is only for initialization and automatically
adapted.
iterations : int
Number of iterations for each gradient descent run.
random_start : bool
Start the attack from a random point rather than from the
original input.
return_early : bool
Whether an individual gradient descent run should stop as
soon as an adversarial is found.
"""
a = input_or_adv
del input_or_adv
del label
del unpack
assert epsilon > 0
self._run(a, binary_search,
epsilon, stepsize, iterations,
random_start, return_early)
BasicIterativeMethod = LinfinityBasicIterativeAttack
BIM = BasicIterativeMethod
class L1BasicIterativeAttack(
L1GradientMixin,
L1ClippingMixin,
L1DistanceCheckMixin,
IterativeProjectedGradientBaseAttack):
"""Modified version of the Basic Iterative Method
that minimizes the L1 distance.
.. seealso:: :class:`LinfinityBasicIterativeAttack`
"""
@call_decorator
def __call__(self, input_or_adv, label=None, unpack=True,
binary_search=True,
epsilon=0.3,
stepsize=0.05,
iterations=10,
random_start=False,
return_early=True):
"""Simple iterative gradient-based attack known as
Basic Iterative Method, Projected Gradient Descent or FGSM^k.
Parameters
----------
input_or_adv : `numpy.ndarray` or :class:`Adversarial`
The original, unperturbed input as a `numpy.ndarray` or
an :class:`Adversarial` instance.
label : int
The reference label of the original input. Must be passed
if `a` is a `numpy.ndarray`, must not be passed if `a` is
an :class:`Adversarial` instance.
unpack : bool
If true, returns the adversarial input, otherwise returns
the Adversarial object.
binary_search : bool or int
Whether to perform a binary search over epsilon and stepsize,
keeping their ratio constant and using their values to start
the search. If False, hyperparameters are not optimized.
Can also be an integer, specifying the number of binary
search steps (default 20).
epsilon : float
Limit on the perturbation size; if binary_search is True,
this value is only for initialization and automatically
adapted.
stepsize : float
Step size for gradient descent; if binary_search is True,
this value is only for initialization and automatically
adapted.
iterations : int
Number of iterations for each gradient descent run.
random_start : bool
Start the attack from a random point rather than from the
original input.
return_early : bool
Whether an individual gradient descent run should stop as
soon as an adversarial is found.
"""
a = input_or_adv
del input_or_adv
del label
del unpack
assert epsilon > 0
self._run(a, binary_search,
epsilon, stepsize, iterations,
random_start, return_early)
class L2BasicIterativeAttack(
L2GradientMixin,
L2ClippingMixin,
L2DistanceCheckMixin,
IterativeProjectedGradientBaseAttack):
"""Modified version of the Basic Iterative Method
that minimizes the L2 distance.
.. seealso:: :class:`LinfinityBasicIterativeAttack`
"""
def _read_dataset(self, fname):
import pandas as pd
from sklearn.utils import shuffle
train_dataframe = pd.read_csv(fname)
dataframe = shuffle(train_dataframe)
return dataframe
# overriden init method in ABC
def _initialize(self):
self._remote_model = setup_clf(
self.BASE_MODEL_DIR + "/rf.pkl"
)
self._train_data_set = self._read_dataset(
self.BASE_DATA_DIR + "/hand_preprocessed_trimmed_label_gt_augmented_train_set.csv"
)
@call_decorator
def __call__(self, input_or_adv, label=None, unpack=True,
binary_search=True,
epsilon=0.3,
stepsize=0.01,
iterations=40,
random_start=False,
return_early=True,
perturbable_idx_set=None,
only_increase_idx_set=None,
feature_defs=None,
normalization_ratios=None,
enforce_interval=1,
request_id="URL_dummy",
model=None,
browser_id=None,
map_back_mode=False):
"""Simple iterative gradient-based attack known as
Basic Iterative Method, Projected Gradient Descent or FGSM^k.
Parameters
----------
input_or_adv : `numpy.ndarray` or :class:`Adversarial`
The original, unperturbed input as a `numpy.ndarray` or
an :class:`Adversarial` instance.
label : int
The reference label of the original input. Must be passed
if `a` is a `numpy.ndarray`, must not be passed if `a` is
an :class:`Adversarial` instance.
unpack : bool
If true, returns the adversarial input, otherwise returns
the Adversarial object.
binary_search : bool or int
Whether to perform a binary search over epsilon and stepsize,
keeping their ratio constant and using their values to start
the search. If False, hyperparameters are not optimized.
Can also be an integer, specifying the number of binary
search steps (default 20).
epsilon : float
Limit on the perturbation size; if binary_search is True,
this value is only for initialization and automatically
adapted.
stepsize : float
Step size for gradient descent; if binary_search is True,
this value is only for initialization and automatically
adapted.
iterations : int
Number of iterations for each gradient descent run.
random_start : bool
Start the attack from a random point rather than from the
original input.
return_early : bool
Whether an individual gradient descent run should stop as
soon as an adversarial is found.
"""
a = input_or_adv
del input_or_adv
del label
del unpack
assert epsilon > 0
print("Parameters:", epsilon, stepsize, iterations, enforce_interval, request_id)
self._run(a, binary_search,
epsilon, stepsize, iterations,
random_start, return_early,
perturbable_idx_set, only_increase_idx_set,
feature_defs, normalization_ratios,
enforce_interval, request_id,
model, browser_id, map_back_mode)
class ProjectedGradientDescentAttack(
LinfinityGradientMixin,
LinfinityClippingMixin,
LinfinityDistanceCheckMixin,
IterativeProjectedGradientBaseAttack):
"""The Projected Gradient Descent Attack
introduced in [1]_ without random start.
When used without a random start, this attack
is also known as Basic Iterative Method (BIM)
or FGSM^k.
References
----------
.. [1] <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, "Towards Deep Learning
Models Resistant to Adversarial Attacks",
https://arxiv.org/abs/1706.06083
.. seealso::
:class:`LinfinityBasicIterativeAttack` and
:class:`RandomStartProjectedGradientDescentAttack`
"""
def _read_dataset(self, fname):
import pandas as pd
from sklearn.utils import shuffle
train_dataframe = pd.read_csv(fname)
dataframe = shuffle(train_dataframe)
return dataframe
# overriden init method in ABC
def _initialize(self):
self._remote_model = setup_clf("../model/rf.pkl")
self.BASE_DATA_DIR = HOME_DIR + "/attack-adgraph-pipeline/data"
self._train_data_set = self._read_dataset(
self.BASE_DATA_DIR + "/hand_preprocessed_trimmed_label_gt_augmented_train_set.csv")
@call_decorator
def __call__(self, input_or_adv, label=None, unpack=True,
binary_search=True,
epsilon=0.3,
stepsize=0.01,
iterations=40,
random_start=False,
return_early=True,
perturbable_idx_set=None,
only_increase_idx_set=None,
feature_defs=None,
normalization_ratios=None,
enforce_interval=1,
request_id="URL_dummy",
model=None,
browser_id=None,
map_back_mode=False,
feature_idx_map=None,
logger=None):
"""Simple iterative gradient-based attack known as
Basic Iterative Method, Projected Gradient Descent or FGSM^k.
Parameters
----------
input_or_adv : `numpy.ndarray` or :class:`Adversarial`
The original, unperturbed input as a `numpy.ndarray` or
an :class:`Adversarial` instance.
label : int
The reference label of the original input. Must be passed
if `a` is a `numpy.ndarray`, must not be passed if `a` is
an :class:`Adversarial` instance.
unpack : bool
If true, returns the adversarial input, otherwise returns
the Adversarial object.
binary_search : bool or int
Whether to perform a binary search over epsilon and stepsize,
keeping their ratio constant and using their values to start
the search. If False, hyperparameters are not optimized.
Can also be an integer, specifying the number of binary
search steps (default 20).
epsilon : float
Limit on the perturbation size; if binary_search is True,
this value is only for initialization and automatically
adapted.
stepsize : float
Step size for gradient descent; if binary_search is True,
this value is only for initialization and automatically
adapted.
iterations : int
Number of iterations for each gradient descent run.
random_start : bool
Start the attack from a random point rather than from the
original input.
return_early : bool
Whether an individual gradient descent run should stop as
soon as an adversarial is found.
"""
a = input_or_adv
del input_or_adv
del label
del unpack
assert epsilon > 0
# if self._remote_model is None:
# self._remote_model = setup_clf("/home/shitong/Desktop/AdGraphAPI/scripts/model/rf.pkl")
print("Parameters:", epsilon, stepsize, iterations, enforce_interval, request_id)
self._run(a, binary_search,
epsilon, stepsize, iterations,
random_start, return_early,
perturbable_idx_set, only_increase_idx_set,
feature_defs, normalization_ratios,
enforce_interval, request_id,
model, browser_id, map_back_mode, feature_idx_map, logger)
ProjectedGradientDescent = ProjectedGradientDescentAttack
PGD = ProjectedGradientDescent
class RandomStartProjectedGradientDescentAttack(
LinfinityGradientMixin,
LinfinityClippingMixin,
LinfinityDistanceCheckMixin,
IterativeProjectedGradientBaseAttack):
"""The Projected Gradient Descent Attack
introduced in [1]_ with random start.
References
----------
.. [1] <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, "Towards Deep Learning
Models Resistant to Adversarial Attacks",
https://arxiv.org/abs/1706.06083
.. seealso:: :class:`ProjectedGradientDescentAttack`
"""
@call_decorator
def __call__(self, input_or_adv, label=None, unpack=True,
binary_search=True,
epsilon=0.3,
stepsize=0.01,
iterations=40,
random_start=True,
return_early=True):
"""Simple iterative gradient-based attack known as
Basic Iterative Method, Projected Gradient Descent or FGSM^k.
Parameters
----------
input_or_adv : `numpy.ndarray` or :class:`Adversarial`
The original, unperturbed input as a `numpy.ndarray` or
an :class:`Adversarial` instance.
label : int
The reference label of the original input. Must be passed
if `a` is a `numpy.ndarray`, must not be passed if `a` is
an :class:`Adversarial` instance.
unpack : bool
If true, returns the adversarial input, otherwise returns
the Adversarial object.
binary_search : bool or int
Whether to perform a binary search over epsilon and stepsize,
keeping their ratio constant and using their values to start
the search. If False, hyperparameters are not optimized.
Can also be an integer, specifying the number of binary
search steps (default 20).
epsilon : float
Limit on the perturbation size; if binary_search is True,
this value is only for initialization and automatically
adapted.
stepsize : float
Step size for gradient descent; if binary_search is True,
this value is only for initialization and automatically
adapted.
iterations : int
Number of iterations for each gradient descent run.
random_start : bool
Start the attack from a random point rather than from the
original input.
return_early : bool
Whether an individual gradient descent run should stop as
soon as an adversarial is found.
"""
a = input_or_adv
del input_or_adv
del label
del unpack
assert epsilon > 0
self._run(a, binary_search,
epsilon, stepsize, iterations,
random_start, return_early)
RandomProjectedGradientDescent = RandomStartProjectedGradientDescentAttack
RandomPGD = RandomProjectedGradientDescent
class MomentumIterativeAttack(
LinfinityClippingMixin,
LinfinityDistanceCheckMixin,
IterativeProjectedGradientBaseAttack):
"""The Momentum Iterative Method attack
introduced in [1]_. It's like the Basic
Iterative Method or Projected Gradient
Descent except that it uses momentum.
References
----------
.. [1] <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, "Boosting Adversarial
Attacks with Momentum",
https://arxiv.org/abs/1710.06081
"""
def _gradient(self, a, x, class_, strict=True):
# get current gradient
gradient = a.gradient_one(x, class_, strict=strict)
gradient = gradient / max(1e-12, np.mean(np.abs(gradient)))
# combine with history of gradient as new history
self._momentum_history = \
self._decay_factor * self._momentum_history + gradient
# use history
gradient = self._momentum_history
gradient = np.sign(gradient)
min_, max_ = a.bounds()
gradient = (max_ - min_) * gradient
return gradient
def _run_one(self, *args, **kwargs):
# reset momentum history every time we restart
# gradient descent
self._momentum_history = 0
return super(MomentumIterativeAttack, self)._run_one(*args, **kwargs)
@call_decorator
def __call__(self, input_or_adv, label=None, unpack=True,
binary_search=True,
epsilon=0.3,
stepsize=0.06,
iterations=10,
decay_factor=1.0,
random_start=False,
return_early=True):
"""Momentum-based iterative gradient attack known as
Momentum Iterative Method.
Parameters
----------
input_or_adv : `numpy.ndarray` or :class:`Adversarial`
The original, unperturbed input as a `numpy.ndarray` or
an :class:`Adversarial` instance.
label : int
The reference label of the original input. Must be passed
if `a` is a `numpy.ndarray`, must not be passed if `a` is
an :class:`Adversarial` instance.
unpack : bool
If true, returns the adversarial input, otherwise returns
the Adversarial object.
binary_search : bool
Whether to perform a binary search over epsilon and stepsize,
keeping their ratio constant and using their values to start
the search. If False, hyperparameters are not optimized.
Can also be an integer, specifying the number of binary
search steps (default 20).
epsilon : float
Limit on the perturbation size; if binary_search is True,
this value is only for initialization and automatically
adapted.
stepsize : float
Step size for gradient descent; if binary_search is True,
this value is only for initialization and automatically
adapted.
iterations : int
Number of iterations for each gradient descent run.
decay_factor : float
Decay factor used by the momentum term.
random_start : bool
Start the attack from a random point rather than from the
original input.
return_early : bool
Whether an individual gradient descent run should stop as
soon as an adversarial is found.
"""
a = input_or_adv
del input_or_adv
del label
del unpack
assert epsilon > 0
self._decay_factor = decay_factor
self._run(a, binary_search,
epsilon, stepsize, iterations,
random_start, return_early)
MomentumIterativeMethod = MomentumIterativeAttack
| [
"numpy.clip",
"logging.getLogger",
"pandas.read_csv",
"numpy.array",
"os.listdir",
"matplotlib.pyplot.plot",
"warnings.warn",
"numpy.abs",
"logging.warning",
"numpy.square",
"os.path.isfile",
"numpy.sign",
"matplotlib.pyplot.show",
"numpy.copy",
"math.ceil",
"urllib.parse.urlparse",
... | [((1187, 1204), 'os.getenv', 'os.getenv', (['"""HOME"""'], {}), "('HOME')\n", (1196, 1204), False, 'import os\n'), ((2244, 2277), 'os.listdir', 'os.listdir', (['self.BASE_CRAWLED_DIR'], {}), '(self.BASE_CRAWLED_DIR)\n', (2254, 2277), False, 'import os\n'), ((4304, 4339), 'scipy.spatial.distance.cdist', 'distance.cdist', (['x1', 'x2', '"""euclidean"""'], {}), "(x1, x2, 'euclidean')\n", (4318, 4339), False, 'from scipy.spatial import distance\n'), ((6194, 6222), 'numpy.array', 'np.array', (['rejected_candidate'], {}), '(rejected_candidate)\n', (6202, 6222), True, 'import numpy as np\n'), ((6796, 6824), 'numpy.array', 'np.array', (['rejected_candidate'], {}), '(rejected_candidate)\n', (6804, 6824), True, 'import numpy as np\n'), ((7176, 7194), 'numpy.copy', 'np.copy', (['candidate'], {}), '(candidate)\n', (7183, 7194), True, 'import numpy as np\n'), ((7221, 7239), 'numpy.copy', 'np.copy', (['candidate'], {}), '(candidate)\n', (7228, 7239), True, 'import numpy as np\n'), ((33673, 33690), 'numpy.sign', 'np.sign', (['gradient'], {}), '(gradient)\n', (33680, 33690), True, 'import numpy as np\n'), ((34909, 34957), 'numpy.clip', 'np.clip', (['perturbation', '(-epsilon * s)', '(epsilon * s)'], {}), '(perturbation, -epsilon * s, epsilon * s)\n', (34916, 34957), True, 'import numpy as np\n'), ((43760, 43778), 'pandas.read_csv', 'pd.read_csv', (['fname'], {}), '(fname)\n', (43771, 43778), True, 'import pandas as pd\n'), ((43799, 43823), 'sklearn.utils.shuffle', 'shuffle', (['train_dataframe'], {}), '(train_dataframe)\n', (43806, 43823), False, 'from sklearn.utils import shuffle\n'), ((48004, 48022), 'pandas.read_csv', 'pd.read_csv', (['fname'], {}), '(fname)\n', (48015, 48022), True, 'import pandas as pd\n'), ((48043, 48067), 'sklearn.utils.shuffle', 'shuffle', (['train_dataframe'], {}), '(train_dataframe)\n', (48050, 48067), False, 'from sklearn.utils import shuffle\n'), ((55723, 55740), 'numpy.sign', 'np.sign', (['gradient'], {}), '(gradient)\n', (55730, 55740), True, 'import numpy as np\n'), ((4400, 4412), 'numpy.array', 'np.array', (['x1'], {}), '(x1)\n', (4408, 4412), True, 'import numpy as np\n'), ((4415, 4427), 'numpy.array', 'np.array', (['x2'], {}), '(x2)\n', (4423, 4427), True, 'import numpy as np\n'), ((9393, 9485), 'warnings.warn', 'warnings.warn', (['"""applied gradient-based attack to model that does not provide gradients"""'], {}), "(\n 'applied gradient-based attack to model that does not provide gradients')\n", (9406, 9485), False, 'import warnings\n'), ((12208, 12252), 'logging.warning', 'logging.warning', (['"""exponential search failed"""'], {}), "('exponential search failed')\n", (12223, 12252), False, 'import logging\n'), ((15825, 15839), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (15834, 15839), False, 'import os\n'), ((15965, 15979), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (15974, 15979), False, 'import os\n'), ((21874, 21890), 'urllib.parse.urlparse', 'urlparse', (['domain'], {}), '(domain)\n', (21882, 21890), False, 'from urllib.parse import urlparse\n'), ((25683, 25705), 'numpy.clip', 'np.clip', (['x', 'min_', 'max_'], {}), '(x, min_, max_)\n', (25690, 25705), True, 'import numpy as np\n'), ((33193, 33228), 'matplotlib.pyplot.plot', 'plt.plot', (['x_axis', 'y_l2'], {'linewidth': '(3)'}), '(x_axis, y_l2, linewidth=3)\n', (33201, 33228), True, 'import matplotlib.pyplot as plt\n'), ((33241, 33276), 'matplotlib.pyplot.plot', 'plt.plot', (['x_axis', 'y_lf'], {'linewidth': '(3)'}), '(x_axis, y_lf, linewidth=3)\n', (33249, 33276), True, 'import matplotlib.pyplot as plt\n'), ((33289, 33299), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (33297, 33299), True, 'import matplotlib.pyplot as plt\n'), ((35564, 35584), 'numpy.abs', 'np.abs', (['perturbation'], {}), '(perturbation)\n', (35570, 35584), True, 'import numpy as np\n'), ((36477, 36689), 'logging.warning', 'logging.warning', (['"""Running an attack that tries to minimize the Linfinity norm of the perturbation without specifying foolbox.distances.Linfinity as the distance metric might lead to suboptimal results."""'], {}), "(\n 'Running an attack that tries to minimize the Linfinity norm of the perturbation without specifying foolbox.distances.Linfinity as the distance metric might lead to suboptimal results.'\n )\n", (36492, 36689), False, 'import logging\n'), ((36942, 37141), 'logging.warning', 'logging.warning', (['"""Running an attack that tries to minimize the L1 norm of the perturbation without specifying foolbox.distances.MAE as the distance metric might lead to suboptimal results."""'], {}), "(\n 'Running an attack that tries to minimize the L1 norm of the perturbation without specifying foolbox.distances.MAE as the distance metric might lead to suboptimal results.'\n )\n", (36957, 37141), False, 'import logging\n'), ((37394, 37593), 'logging.warning', 'logging.warning', (['"""Running an attack that tries to minimize the L2 norm of the perturbation without specifying foolbox.distances.MSE as the distance metric might lead to suboptimal results."""'], {}), "(\n 'Running an attack that tries to minimize the L2 norm of the perturbation without specifying foolbox.distances.MSE as the distance metric might lead to suboptimal results.'\n )\n", (37409, 37593), False, 'import logging\n'), ((3739, 3758), 'urllib.parse.urlparse', 'urlparse', (['final_url'], {}), '(final_url)\n', (3747, 3758), False, 'from urllib.parse import urlparse\n'), ((13776, 13818), 'bs4.BeautifulSoup', 'BeautifulSoup', (['fin'], {'features': '"""html.parser"""'}), "(fin, features='html.parser')\n", (13789, 13818), False, 'from bs4 import BeautifulSoup\n'), ((13944, 13986), 'bs4.BeautifulSoup', 'BeautifulSoup', (['fin'], {'features': '"""html.parser"""'}), "(fin, features='html.parser')\n", (13957, 13986), False, 'from bs4 import BeautifulSoup\n'), ((15429, 15492), 'os.path.isfile', 'os.path.isfile', (["(self.BASE_TIMELINE_DIR + '/' + domain + '.json')"], {}), "(self.BASE_TIMELINE_DIR + '/' + domain + '.json')\n", (15443, 15492), False, 'import os\n'), ((15672, 15686), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (15681, 15686), False, 'import os\n'), ((18202, 18225), 'math.ceil', 'math.ceil', (['(val + offset)'], {}), '(val + offset)\n', (18211, 18225), False, 'import math\n'), ((20822, 20839), 'numpy.array', 'np.array', (['train_x'], {}), '(train_x)\n', (20830, 20839), True, 'import numpy as np\n'), ((20841, 20858), 'numpy.array', 'np.array', (['train_y'], {}), '(train_y)\n', (20849, 20858), True, 'import numpy as np\n'), ((34040, 34056), 'numpy.abs', 'np.abs', (['gradient'], {}), '(gradient)\n', (34046, 34056), True, 'import numpy as np\n'), ((36016, 36039), 'numpy.square', 'np.square', (['perturbation'], {}), '(perturbation)\n', (36025, 36039), True, 'import numpy as np\n'), ((4451, 4463), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (4457, 4463), True, 'import numpy as np\n'), ((34415, 34434), 'numpy.square', 'np.square', (['gradient'], {}), '(gradient)\n', (34424, 34434), True, 'import numpy as np\n'), ((35302, 35354), 'numpy.clip', 'np.clip', (['[original_perturbation[i]]', '(-offset)', 'offset'], {}), '([original_perturbation[i]], -offset, offset)\n', (35309, 35354), True, 'import numpy as np\n'), ((55459, 55475), 'numpy.abs', 'np.abs', (['gradient'], {}), '(gradient)\n', (55465, 55475), True, 'import numpy as np\n'), ((28901, 28920), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (28918, 28920), False, 'import logging\n'), ((28281, 28301), 'numpy.array', 'np.array', (['[original]'], {}), '([original])\n', (28289, 28301), True, 'import numpy as np\n'), ((32398, 32412), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (32407, 32412), False, 'import os\n'), ((32721, 32735), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (32730, 32735), False, 'import os\n'), ((30115, 30133), 'numpy.array', 'np.array', (['[x_cent]'], {}), '([x_cent])\n', (30123, 30133), True, 'import numpy as np\n'), ((30238, 30251), 'numpy.array', 'np.array', (['[x]'], {}), '([x])\n', (30246, 30251), True, 'import numpy as np\n'), ((30901, 30919), 'numpy.array', 'np.array', (['[x_cent]'], {}), '([x_cent])\n', (30909, 30919), True, 'import numpy as np\n'), ((31524, 31537), 'numpy.array', 'np.array', (['[x]'], {}), '([x])\n', (31532, 31537), True, 'import numpy as np\n')] |
'''
Tests for API Utilities. These are codes shared with other
files in core.
'''
from scisheets.core import helpers_test as ht
import mysite.settings as settings
from CommonUtil.util import stripFileExtension
from scisheets.core.helpers_test import TEST_DIR
import api_util as api_util
from extended_array import ExtendedArray
import numpy as np
import os
import unittest
ARRAY_INT = np.array(range(4))
ARRAY_INT_LONG = np.array(range(5))
ARRAY_FLOAT = np.array([0.01*x for x in range(4)])
TESTFILE = "test_api_util.%s" % settings.SCISHEETS_EXT
#############################
# Tests
#############################
# pylint: disable=W0212,C0111,R0904
class TestAPIUtil(unittest.TestCase):
def setUp(self):
ht.setupTableInitialization(self)
def testCompareIterables(self):
float_list = ARRAY_FLOAT.tolist()
float_list.append(np.nan)
new_array_float = np.array(float_list)
self.assertTrue(api_util.compareIterables(ARRAY_FLOAT, new_array_float))
self.assertFalse(api_util.compareIterables(ARRAY_INT, ARRAY_INT_LONG))
self.assertFalse(api_util.compareIterables(ARRAY_INT,
np.array([0.1*n for n in range(4)])))
self.assertTrue(api_util.compareIterables(ARRAY_INT, ARRAY_INT))
self.assertTrue(api_util.compareIterables(ARRAY_FLOAT, ARRAY_FLOAT))
def testCopyTableToFile(self):
filename = ht.TEST_FILENAME[:-4] # Exclude ".pcl"
table = ht.createTable("test_table")
new_filepath = api_util.copyTableToFile(table,
filename,
ht.TEST_DIR)
path = os.path.join(ht.TEST_DIR, ht.TEST_FILENAME)
self.assertEqual(stripFileExtension(new_filepath),
stripFileExtension(path))
self.assertTrue(os.path.exists(new_filepath))
new_table = api_util.readObjectFromFile(new_filepath, verify=False)
self.assertTrue(table.isEquivalent(new_table))
os.remove(new_filepath)
def testWriteObjectToFileAndReadObjectFromFile(self):
path = os.path.join(TEST_DIR, TESTFILE)
self.table.setFilepath(path)
api_util.writeObjectToFile(self.table)
new_table = api_util.readObjectFromFile(path)
self.assertTrue(self.table.isEquivalent(new_table))
#
self.table.setFilepath(path)
api_util.writeObjectToFile(self.table)
new_table = api_util.readObjectFromFile(path)
self.assertTrue(self.table.isEquivalent(new_table))
#
a_dict = {"a": range(5), "b": range(10)}
api_util.writeObjectToFile(a_dict, filepath=path)
new_a_dict = api_util.readObjectFromFile(path)
self.assertEqual(a_dict, new_a_dict)
if __name__ == '__main__':
unittest.main()
| [
"api_util.copyTableToFile",
"os.path.exists",
"api_util.readObjectFromFile",
"os.path.join",
"CommonUtil.util.stripFileExtension",
"numpy.array",
"api_util.writeObjectToFile",
"scisheets.core.helpers_test.createTable",
"unittest.main",
"scisheets.core.helpers_test.setupTableInitialization",
"api... | [((2638, 2653), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2651, 2653), False, 'import unittest\n'), ((718, 751), 'scisheets.core.helpers_test.setupTableInitialization', 'ht.setupTableInitialization', (['self'], {}), '(self)\n', (745, 751), True, 'from scisheets.core import helpers_test as ht\n'), ((877, 897), 'numpy.array', 'np.array', (['float_list'], {}), '(float_list)\n', (885, 897), True, 'import numpy as np\n'), ((1397, 1425), 'scisheets.core.helpers_test.createTable', 'ht.createTable', (['"""test_table"""'], {}), "('test_table')\n", (1411, 1425), True, 'from scisheets.core import helpers_test as ht\n'), ((1445, 1499), 'api_util.copyTableToFile', 'api_util.copyTableToFile', (['table', 'filename', 'ht.TEST_DIR'], {}), '(table, filename, ht.TEST_DIR)\n', (1469, 1499), True, 'import api_util as api_util\n'), ((1603, 1646), 'os.path.join', 'os.path.join', (['ht.TEST_DIR', 'ht.TEST_FILENAME'], {}), '(ht.TEST_DIR, ht.TEST_FILENAME)\n', (1615, 1646), False, 'import os\n'), ((1803, 1858), 'api_util.readObjectFromFile', 'api_util.readObjectFromFile', (['new_filepath'], {'verify': '(False)'}), '(new_filepath, verify=False)\n', (1830, 1858), True, 'import api_util as api_util\n'), ((1914, 1937), 'os.remove', 'os.remove', (['new_filepath'], {}), '(new_filepath)\n', (1923, 1937), False, 'import os\n'), ((2006, 2038), 'os.path.join', 'os.path.join', (['TEST_DIR', 'TESTFILE'], {}), '(TEST_DIR, TESTFILE)\n', (2018, 2038), False, 'import os\n'), ((2076, 2114), 'api_util.writeObjectToFile', 'api_util.writeObjectToFile', (['self.table'], {}), '(self.table)\n', (2102, 2114), True, 'import api_util as api_util\n'), ((2131, 2164), 'api_util.readObjectFromFile', 'api_util.readObjectFromFile', (['path'], {}), '(path)\n', (2158, 2164), True, 'import api_util as api_util\n'), ((2264, 2302), 'api_util.writeObjectToFile', 'api_util.writeObjectToFile', (['self.table'], {}), '(self.table)\n', (2290, 2302), True, 'import api_util as api_util\n'), ((2319, 2352), 'api_util.readObjectFromFile', 'api_util.readObjectFromFile', (['path'], {}), '(path)\n', (2346, 2352), True, 'import api_util as api_util\n'), ((2464, 2513), 'api_util.writeObjectToFile', 'api_util.writeObjectToFile', (['a_dict'], {'filepath': 'path'}), '(a_dict, filepath=path)\n', (2490, 2513), True, 'import api_util as api_util\n'), ((2531, 2564), 'api_util.readObjectFromFile', 'api_util.readObjectFromFile', (['path'], {}), '(path)\n', (2558, 2564), True, 'import api_util as api_util\n'), ((918, 973), 'api_util.compareIterables', 'api_util.compareIterables', (['ARRAY_FLOAT', 'new_array_float'], {}), '(ARRAY_FLOAT, new_array_float)\n', (943, 973), True, 'import api_util as api_util\n'), ((996, 1048), 'api_util.compareIterables', 'api_util.compareIterables', (['ARRAY_INT', 'ARRAY_INT_LONG'], {}), '(ARRAY_INT, ARRAY_INT_LONG)\n', (1021, 1048), True, 'import api_util as api_util\n'), ((1174, 1221), 'api_util.compareIterables', 'api_util.compareIterables', (['ARRAY_INT', 'ARRAY_INT'], {}), '(ARRAY_INT, ARRAY_INT)\n', (1199, 1221), True, 'import api_util as api_util\n'), ((1243, 1294), 'api_util.compareIterables', 'api_util.compareIterables', (['ARRAY_FLOAT', 'ARRAY_FLOAT'], {}), '(ARRAY_FLOAT, ARRAY_FLOAT)\n', (1268, 1294), True, 'import api_util as api_util\n'), ((1668, 1700), 'CommonUtil.util.stripFileExtension', 'stripFileExtension', (['new_filepath'], {}), '(new_filepath)\n', (1686, 1700), False, 'from CommonUtil.util import stripFileExtension\n'), ((1711, 1735), 'CommonUtil.util.stripFileExtension', 'stripFileExtension', (['path'], {}), '(path)\n', (1729, 1735), False, 'from CommonUtil.util import stripFileExtension\n'), ((1757, 1785), 'os.path.exists', 'os.path.exists', (['new_filepath'], {}), '(new_filepath)\n', (1771, 1785), False, 'import os\n')] |
import pandas as pd
import os
import numpy as np
from matplotlib import pyplot as plt
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from model import LSTM
from prepare_data import Data, Dataset
stock = "MC.PA"
input_size = 4
output_size = 1
nb_neurons = 200
learning_rate = 0.001
nb_epochs = 150
output_path = "/Users/baptiste/Desktop/training"
class StockPrediction():
def __init__(self, stock, time_window, batch_size, learning_rate=0.001):
self.stock = stock
self.time_window = time_window
self.batch_size = batch_size
self.learning_rate = learning_rate
self.input_size = 4
self.output_size = 1
self.nb_neurons = 200
self.prepare_data()
self.output = "/Users/baptiste/Desktop/training"
def validate(self):
self.lstm_model.eval()
error = []
loss_function = nn.MSELoss()
it = iter(self.real_data_dataloader)
real_data = next(it)
loss = []
for i, (x,_) in enumerate(self.testing_dataloader):
try:
with torch.no_grad():
pred = self.lstm_model(x.float())
pred = self.data.unnormalizeData(pred)
real_data = real_data.view(-1,1)
error = self.compute_error(error, pred, real_data)
real_data = next(it)
except:
pass
error_mean = np.mean(error) * 100
print("Mean error percentage : ", error_mean)
self.lstm_model.train()
def compute_error(self, error, pred, target):
for i in range(self.batch_size):
error.append(abs(pred[i,0]-target[i,0])/target[i,0])
return(error)
def prepare_data(self):
validation_split = 0
test_split = 0.1
train_split = 1 - validation_split - test_split
self.data = Data(self.stock)
df = self.data.getData()
df_normalized = self.data.normalizeData(df)
df_normalized = torch.FloatTensor(df_normalized.to_numpy())
train_split = int(train_split*df.shape[0])
validation_split = int(validation_split*df.shape[0])
test_split = int(test_split*df.shape[0])
training_split = df_normalized[:train_split,:]
training_data = Dataset(training_split, self.time_window)
self.training_dataloader = DataLoader(training_data, batch_size=self.batch_size)
#testing_data
real_data_tensor = torch.FloatTensor(df.to_numpy())
self.real_data_test = torch.FloatTensor(real_data_tensor[-test_split:-self.time_window,3])
testing_dataset = Dataset(df_normalized[-test_split:,:], self.time_window)
self.testing_dataloader = DataLoader(testing_dataset, batch_size=self.batch_size)
self.real_data_dataloader = DataLoader(self.real_data_test, batch_size=self.batch_size)
def train(self):
#Model
self.lstm_model = LSTM(self.input_size, self.output_size, self.nb_neurons)
self.lstm_model.load_state_dict(torch.load("/Users/baptiste/Desktop/training/AAPL_36.pth"))
loss_function = nn.MSELoss()
optimizer = torch.optim.Adam(self.lstm_model.parameters(), lr=self.learning_rate)
print("Start training")
for epoch in range(nb_epochs):
for (x,y) in self.training_dataloader:
optimizer.zero_grad()
self.lstm_model.hidden_cell = (torch.zeros(1,self.batch_size,self.lstm_model.nb_neurons), torch.zeros(1,self.batch_size,self.lstm_model.nb_neurons))
pred = self.lstm_model(x.float())
y=y.view(self.batch_size, 1)
loss = loss_function(pred, y)
loss.backward()
optimizer.step()
print("epoch n°%s : loss = %s"%(epoch, loss.item()))
self.validate()
if epoch%5 == 1:
model_name = "%s_%s.pth"%(self.stock, epoch)
torch.save(self.lstm_model.state_dict(), os.path.join(output_path,model_name))
def show_result(self):
files = os.listdir(self.output)
for file in files:
if ".pth" in file:
path = os.path.join(self.output, file)
lstm_model = LSTM(self.input_size, self.output_size, self.nb_neurons)
lstm_model.load_state_dict(torch.load(path))
lstm_model.eval()
print("model : %s loaded"%path)
predictions = []
for (x,_) in self.testing_dataloader:
if x.shape[0] == self.batch_size:
with torch.no_grad():
lstm_model.hidden_cell = (torch.zeros(1,self.batch_size,lstm_model.nb_neurons), torch.zeros(1,self.batch_size,lstm_model.nb_neurons))
output = lstm_model(x.float())
output = self.data.unnormalizeData(output).squeeze()
predictions+=output.tolist()
plt.plot(predictions, label="prediction")
plt.plot(self.real_data_test, label="target")
plt.title(file)
plt.legend()
plt.show()
def main():
stockprediction = StockPrediction(stock="AAPL", time_window=5, batch_size=5)
#stockprediction.train()
#stockprediction.show_result()
if __name__ == '__main__':
main()
else:
pass | [
"numpy.mean",
"os.listdir",
"model.LSTM",
"torch.load",
"prepare_data.Data",
"os.path.join",
"matplotlib.pyplot.plot",
"torch.nn.MSELoss",
"torch.no_grad",
"torch.zeros",
"torch.utils.data.DataLoader",
"prepare_data.Dataset",
"matplotlib.pyplot.title",
"torch.FloatTensor",
"matplotlib.py... | [((905, 917), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (915, 917), True, 'import torch.nn as nn\n'), ((1919, 1935), 'prepare_data.Data', 'Data', (['self.stock'], {}), '(self.stock)\n', (1923, 1935), False, 'from prepare_data import Data, Dataset\n'), ((2332, 2373), 'prepare_data.Dataset', 'Dataset', (['training_split', 'self.time_window'], {}), '(training_split, self.time_window)\n', (2339, 2373), False, 'from prepare_data import Data, Dataset\n'), ((2409, 2462), 'torch.utils.data.DataLoader', 'DataLoader', (['training_data'], {'batch_size': 'self.batch_size'}), '(training_data, batch_size=self.batch_size)\n', (2419, 2462), False, 'from torch.utils.data import DataLoader\n'), ((2576, 2645), 'torch.FloatTensor', 'torch.FloatTensor', (['real_data_tensor[-test_split:-self.time_window, 3]'], {}), '(real_data_tensor[-test_split:-self.time_window, 3])\n', (2593, 2645), False, 'import torch\n'), ((2671, 2728), 'prepare_data.Dataset', 'Dataset', (['df_normalized[-test_split:, :]', 'self.time_window'], {}), '(df_normalized[-test_split:, :], self.time_window)\n', (2678, 2728), False, 'from prepare_data import Data, Dataset\n'), ((2762, 2817), 'torch.utils.data.DataLoader', 'DataLoader', (['testing_dataset'], {'batch_size': 'self.batch_size'}), '(testing_dataset, batch_size=self.batch_size)\n', (2772, 2817), False, 'from torch.utils.data import DataLoader\n'), ((2854, 2913), 'torch.utils.data.DataLoader', 'DataLoader', (['self.real_data_test'], {'batch_size': 'self.batch_size'}), '(self.real_data_test, batch_size=self.batch_size)\n', (2864, 2913), False, 'from torch.utils.data import DataLoader\n'), ((2986, 3042), 'model.LSTM', 'LSTM', (['self.input_size', 'self.output_size', 'self.nb_neurons'], {}), '(self.input_size, self.output_size, self.nb_neurons)\n', (2990, 3042), False, 'from model import LSTM\n'), ((3167, 3179), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (3177, 3179), True, 'import torch.nn as nn\n'), ((4131, 4154), 'os.listdir', 'os.listdir', (['self.output'], {}), '(self.output)\n', (4141, 4154), False, 'import os\n'), ((1461, 1475), 'numpy.mean', 'np.mean', (['error'], {}), '(error)\n', (1468, 1475), True, 'import numpy as np\n'), ((3083, 3141), 'torch.load', 'torch.load', (['"""/Users/baptiste/Desktop/training/AAPL_36.pth"""'], {}), "('/Users/baptiste/Desktop/training/AAPL_36.pth')\n", (3093, 3141), False, 'import torch\n'), ((4236, 4267), 'os.path.join', 'os.path.join', (['self.output', 'file'], {}), '(self.output, file)\n', (4248, 4267), False, 'import os\n'), ((4297, 4353), 'model.LSTM', 'LSTM', (['self.input_size', 'self.output_size', 'self.nb_neurons'], {}), '(self.input_size, self.output_size, self.nb_neurons)\n', (4301, 4353), False, 'from model import LSTM\n'), ((5077, 5118), 'matplotlib.pyplot.plot', 'plt.plot', (['predictions'], {'label': '"""prediction"""'}), "(predictions, label='prediction')\n", (5085, 5118), True, 'from matplotlib import pyplot as plt\n'), ((5135, 5180), 'matplotlib.pyplot.plot', 'plt.plot', (['self.real_data_test'], {'label': '"""target"""'}), "(self.real_data_test, label='target')\n", (5143, 5180), True, 'from matplotlib import pyplot as plt\n'), ((5197, 5212), 'matplotlib.pyplot.title', 'plt.title', (['file'], {}), '(file)\n', (5206, 5212), True, 'from matplotlib import pyplot as plt\n'), ((5229, 5241), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5239, 5241), True, 'from matplotlib import pyplot as plt\n'), ((5258, 5268), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5266, 5268), True, 'from matplotlib import pyplot as plt\n'), ((1108, 1123), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1121, 1123), False, 'import torch\n'), ((3483, 3542), 'torch.zeros', 'torch.zeros', (['(1)', 'self.batch_size', 'self.lstm_model.nb_neurons'], {}), '(1, self.batch_size, self.lstm_model.nb_neurons)\n', (3494, 3542), False, 'import torch\n'), ((3542, 3601), 'torch.zeros', 'torch.zeros', (['(1)', 'self.batch_size', 'self.lstm_model.nb_neurons'], {}), '(1, self.batch_size, self.lstm_model.nb_neurons)\n', (3553, 3601), False, 'import torch\n'), ((4048, 4085), 'os.path.join', 'os.path.join', (['output_path', 'model_name'], {}), '(output_path, model_name)\n', (4060, 4085), False, 'import os\n'), ((4397, 4413), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (4407, 4413), False, 'import torch\n'), ((4668, 4683), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4681, 4683), False, 'import torch\n'), ((4739, 4793), 'torch.zeros', 'torch.zeros', (['(1)', 'self.batch_size', 'lstm_model.nb_neurons'], {}), '(1, self.batch_size, lstm_model.nb_neurons)\n', (4750, 4793), False, 'import torch\n'), ((4793, 4847), 'torch.zeros', 'torch.zeros', (['(1)', 'self.batch_size', 'lstm_model.nb_neurons'], {}), '(1, self.batch_size, lstm_model.nb_neurons)\n', (4804, 4847), False, 'import torch\n')] |
import os
import numpy as np
import holoviews as hv
import pandas as pd
import logging
from bokeh.models import HoverTool
import holoviews as hv
import datashader as ds
from holoviews.operation.datashader import aggregate, datashade, dynspread
import colorcet as cc
import param
import parambokeh
from lsst.pipe.tasks.functors import (Mag, CustomFunctor, DeconvolvedMoments,
StarGalaxyLabeller, RAColumn, DecColumn,
Column, SdssTraceSize, PsfSdssTraceSizeDiff,
HsmTraceSize, PsfHsmTraceSizeDiff, CompositeFunctor)
default_xFuncs = {'base_PsfFlux' : Mag('base_PsfFlux'),
'modelfit_CModel' : Mag('modelfit_CModel')}
default_yFuncs = {'modelfit_CModel - base_PsfFlux' : CustomFunctor('mag(modelfit_CModel) - mag(base_PsfFlux)'),
'Deconvolved Moments' : DeconvolvedMoments(),
'Footprint NPix' : Column('base_Footprint_nPix'),
'ext_photometryKron_KronFlux - base_PsfFlux' : \
CustomFunctor('mag(ext_photometryKron_KronFlux) - mag(base_PsfFlux)'),
'base_GaussianFlux - base_PsfFlux' : CustomFunctor('mag(base_GaussianFlux) - mag(base_PsfFlux)'),
'SDSS Trace Size' : SdssTraceSize(),
'PSF - SDSS Trace Size' : PsfSdssTraceSizeDiff(),
'HSM Trace Size' : HsmTraceSize(),
'PSF - HSM Trace Size': PsfHsmTraceSizeDiff()}
default_labellers = {'default':StarGalaxyLabeller()}
def getFunc(funcName):
if funcName in default_xFuncs:
return default_xFuncs[funcName]
elif funcName in default_yFuncs:
return default_yFuncs[funcName]
else:
return CustomFunctor(funcName)
def getLabeller(labellerName):
return default_labellers[labellerName]
def write_selected(explorer, filename):
print(explorer._selected.head())
def get_default_range(x, y):
x = pd.Series(x).dropna()
y = pd.Series(y).dropna()
xMed = np.median(x)
yMed = np.median(y)
xMAD = np.median(np.absolute(x - xMed))
yMAD = np.median(np.absolute(y - yMed))
ylo = yMed - 10*yMAD
yhi = yMed + 10*yMAD
xlo, xhi = x.quantile([0., 0.99])
xBuffer = xMAD/4.
xlo -= xBuffer
xhi += xBuffer
return (xlo, xhi), (ylo, yhi)
class QAExplorer(hv.streams.Stream):
catalog = param.Path(default='forced_big.parq', search_paths=['.','data'])
query = param.String(default='')
id_list = param.String(default='')
x_data = param.ObjectSelector(default='base_PsfFlux',
objects=list(default_xFuncs.keys()))
x_data_custom = param.String(default='')
y_data = param.ObjectSelector(default='modelfit_CModel - base_PsfFlux',
objects=list(default_yFuncs.keys()))
y_data_custom = param.String(default='')
labeller = param.ObjectSelector(default='default',
objects = list(default_labellers.keys()))
object_type = param.ObjectSelector(default='all',
objects=['all', 'star', 'galaxy'])
nbins = param.Integer(default=20, bounds=(10,100))
# write_selected = param.Action(default=write_selected)
output = parambokeh.view.Plot()
def __init__(self, rootdir='.', *args, **kwargs):
super(QAExplorer, self).__init__(*args, **kwargs)
self.rootdir = rootdir
self._ds = None
self._selected = None
# Sets self.ds property
# self._set_data(self.catalog, self.query, self.id_list,
# self.x_data, self.x_data_custom,
# self.y_data, self.y_data_custom, self.labeller)
@property
def funcs(self):
return self._get_funcs(self.x_data, self.x_data_custom,
self.y_data, self.y_data_custom,
self.labeller)
def _get_funcs(self, x_data, x_data_custom, y_data, y_data_custom,
labeller):
if self.x_data_custom:
xFunc = getFunc(self.x_data_custom)
else:
xFunc = getFunc(self.x_data)
if self.y_data_custom:
yFunc = getFunc(self.y_data_custom)
else:
yFunc = getFunc(self.y_data)
labeller = getLabeller(self.labeller)
return CompositeFunctor({'x' : xFunc,
'y' : yFunc,
'label' : labeller,
'id' : Column('id'),
'ra' : RAColumn(),
'dec': DecColumn()})
def _set_data(self, catalog, query, id_list,
x_data, x_data_custom, y_data, y_data_custom,
labeller, **kwargs):
funcs = self._get_funcs(x_data, x_data_custom,
y_data, y_data_custom,
labeller)
df = funcs(catalog, query=query)
df.index = df['id']
if id_list:
ids = self.get_saved_ids(id_list)
df = df.loc[ids]
ok = np.isfinite(df.x) & np.isfinite(df.y)
xdim = hv.Dimension('x', label=funcs['x'].name)
ydim = hv.Dimension('y', label=funcs['y'].name)
self._ds = hv.Dataset(df[ok], kdims=[xdim, ydim, 'ra', 'dec', 'id'], vdims=['label'])
@property
def ds(self):
if self._ds is None:
self._set_data(self.catalog, self.query, self.id_list,
self.x_data, self.x_data_custom,
self.y_data, self.y_data_custom,
self.labeller)
return self._ds
@property
def selected(self):
return self._selected
@property
def id_path(self):
return os.path.join(self.rootdir, 'data', 'ids')
def save_selected(self, name):
filename = os.path.join(self.id_path, '{}.h5'.format(name))
logging.info('writing {} ids to {}'.format(len(self.selected), filename))
self.selected.to_hdf(filename, 'ids', mode='w')
@property
def saved_ids(self):
"""Returns list of names of selected IDs
"""
return [os.path.splitext(f)[0] for f in os.listdir(self.id_path)]
def get_saved_ids(self, id_list):
id_list = id_list.split(',')
files = [os.path.join(self.id_path, '{}.h5'.format(f.strip())) for f in id_list]
return pd.concat([pd.read_hdf(f, 'ids') for f in files]).unique()
def make_scatter(self, object_type, x_range=None, y_range=None, **kwargs):
self._set_data(**kwargs)
logging.info('x_range={}, y_range={}'.format(x_range, y_range))
if object_type == 'all':
dset = self.ds
else:
dset = self.ds.select(label=object_type)
pts = dset.to(hv.Points, kdims=['x', 'y'], vdims=['label'], groupby=[])
print(pts.dimensions())
scatter = dynspread(datashade(pts, x_range=x_range, y_range=y_range, dynamic=False, normalization='log'))
hover = HoverTool(tooltips=[("(x,y)", "($x, $y)")])
# hv.opts({'RGB': {'plot' : {'tools' : [hover]}}}, scatter)
# scatter = scatter.opts(plot=dict(tools=[hover]))
title = '{} ({}) {}'.format(object_type, len(dset), pts.get_dimension('y').label)
scatter = scatter.opts('RGB [width=600, height=400]').relabel(title)
return scatter
def make_sky(self, object_type, ra_range=None, dec_range=None, x_range=None, y_range=None, **kwargs):
if object_type == 'all':
dset = self.ds
else:
dset = self.ds.select(label=object_type)
if x_range is not None and y_range is not None:
dset = dset.select(x=x_range, y=y_range)
self._selected = dset.data.id
pts = dset.to(hv.Points, kdims=['ra', 'dec'], vdims=['y'], groupby=[])
agg = aggregate(pts, width=100, height=100, x_range=ra_range, y_range=dec_range, aggregator=ds.mean('y'), dynamic=False)
hover = hv.QuadMesh(agg).opts('[tools=["hover"]] (alpha=0 hover_alpha=0.2)')
shaded = dynspread(datashade(pts, x_range=ra_range, y_range=dec_range, dynamic=False,
cmap=cc.palette['coolwarm'], aggregator=ds.mean('y')))
shaded = shaded.opts('RGB [width=400, height=400]')
return (shaded*hover).relabel('{} ({})'.format(object_type, len(dset)))
def _make_hist(self, dim, rng, **kwargs):
if kwargs['object_type'] == 'all':
dset = self.ds
else:
dset = self.ds.select(label=kwargs['object_type'])
if rng is None:
lo, hi = dset.data[dim].quantile([0.005, 0.995])
rng = [lo, hi]
opts = 'Histogram [yaxis=None] (alpha=0.3)' + \
' {+framewise +axiswise} '
h = hv.operation.histogram(dset, num_bins=kwargs['nbins'],
dimension=dim, normed='height',
bin_range=rng).opts(opts)
return h
def make_xhist(self, **kwargs):
x_range = kwargs.pop('x_range')
return self._make_hist('x', x_range, **kwargs)
def make_yhist(self, **kwargs):
y_range = kwargs.pop('y_range')
return self._make_hist('y', y_range, **kwargs)
@property
def default_range(self):
x = self.ds.data.x.dropna()
y = self.ds.data.y.dropna()
xMed = np.median(x)
yMed = np.median(y)
xMAD = np.median(np.absolute(x - xMed))
yMAD = np.median(np.absolute(y - yMed))
ylo = yMed - 10*yMAD
yhi = yMed + 10*yMAD
xlo, xhi = x.quantile([0., 0.99])
xBuffer = xMAD/4.
xlo -= xBuffer
xhi += xBuffer
# print(xlo, xhi, ylo, yhi)
return (xlo, xhi), (ylo, yhi)
def view(self):
x_range, y_range = self.default_range
range_xy = hv.streams.RangeXY()
range_sky = hv.streams.RangeXY().rename(x_range='ra_range', y_range='dec_range')
scatter = hv.DynamicMap(self.make_scatter, streams=[self, range_xy])
sky = hv.DynamicMap(self.make_sky, streams=[self, range_sky, range_xy])
xhist = hv.DynamicMap(self.make_xhist, kdims=[], streams=[self, range_xy])
yhist = hv.DynamicMap(self.make_yhist, kdims=[], streams=[self, range_xy])
l = (scatter + sky + yhist + xhist).cols(2)
return l
| [
"datashader.mean",
"param.ObjectSelector",
"lsst.pipe.tasks.functors.PsfSdssTraceSizeDiff",
"holoviews.operation.histogram",
"numpy.isfinite",
"holoviews.streams.RangeXY",
"holoviews.Dimension",
"lsst.pipe.tasks.functors.PsfHsmTraceSizeDiff",
"os.listdir",
"lsst.pipe.tasks.functors.RAColumn",
"p... | [((670, 689), 'lsst.pipe.tasks.functors.Mag', 'Mag', (['"""base_PsfFlux"""'], {}), "('base_PsfFlux')\n", (673, 689), False, 'from lsst.pipe.tasks.functors import Mag, CustomFunctor, DeconvolvedMoments, StarGalaxyLabeller, RAColumn, DecColumn, Column, SdssTraceSize, PsfSdssTraceSizeDiff, HsmTraceSize, PsfHsmTraceSizeDiff, CompositeFunctor\n'), ((729, 751), 'lsst.pipe.tasks.functors.Mag', 'Mag', (['"""modelfit_CModel"""'], {}), "('modelfit_CModel')\n", (732, 751), False, 'from lsst.pipe.tasks.functors import Mag, CustomFunctor, DeconvolvedMoments, StarGalaxyLabeller, RAColumn, DecColumn, Column, SdssTraceSize, PsfSdssTraceSizeDiff, HsmTraceSize, PsfHsmTraceSizeDiff, CompositeFunctor\n'), ((806, 863), 'lsst.pipe.tasks.functors.CustomFunctor', 'CustomFunctor', (['"""mag(modelfit_CModel) - mag(base_PsfFlux)"""'], {}), "('mag(modelfit_CModel) - mag(base_PsfFlux)')\n", (819, 863), False, 'from lsst.pipe.tasks.functors import Mag, CustomFunctor, DeconvolvedMoments, StarGalaxyLabeller, RAColumn, DecColumn, Column, SdssTraceSize, PsfSdssTraceSizeDiff, HsmTraceSize, PsfHsmTraceSizeDiff, CompositeFunctor\n'), ((907, 927), 'lsst.pipe.tasks.functors.DeconvolvedMoments', 'DeconvolvedMoments', ([], {}), '()\n', (925, 927), False, 'from lsst.pipe.tasks.functors import Mag, CustomFunctor, DeconvolvedMoments, StarGalaxyLabeller, RAColumn, DecColumn, Column, SdssTraceSize, PsfSdssTraceSizeDiff, HsmTraceSize, PsfHsmTraceSizeDiff, CompositeFunctor\n'), ((966, 995), 'lsst.pipe.tasks.functors.Column', 'Column', (['"""base_Footprint_nPix"""'], {}), "('base_Footprint_nPix')\n", (972, 995), False, 'from lsst.pipe.tasks.functors import Mag, CustomFunctor, DeconvolvedMoments, StarGalaxyLabeller, RAColumn, DecColumn, Column, SdssTraceSize, PsfSdssTraceSizeDiff, HsmTraceSize, PsfHsmTraceSizeDiff, CompositeFunctor\n'), ((1088, 1157), 'lsst.pipe.tasks.functors.CustomFunctor', 'CustomFunctor', (['"""mag(ext_photometryKron_KronFlux) - mag(base_PsfFlux)"""'], {}), "('mag(ext_photometryKron_KronFlux) - mag(base_PsfFlux)')\n", (1101, 1157), False, 'from lsst.pipe.tasks.functors import Mag, CustomFunctor, DeconvolvedMoments, StarGalaxyLabeller, RAColumn, DecColumn, Column, SdssTraceSize, PsfSdssTraceSizeDiff, HsmTraceSize, PsfHsmTraceSizeDiff, CompositeFunctor\n'), ((1214, 1273), 'lsst.pipe.tasks.functors.CustomFunctor', 'CustomFunctor', (['"""mag(base_GaussianFlux) - mag(base_PsfFlux)"""'], {}), "('mag(base_GaussianFlux) - mag(base_PsfFlux)')\n", (1227, 1273), False, 'from lsst.pipe.tasks.functors import Mag, CustomFunctor, DeconvolvedMoments, StarGalaxyLabeller, RAColumn, DecColumn, Column, SdssTraceSize, PsfSdssTraceSizeDiff, HsmTraceSize, PsfHsmTraceSizeDiff, CompositeFunctor\n'), ((1313, 1328), 'lsst.pipe.tasks.functors.SdssTraceSize', 'SdssTraceSize', ([], {}), '()\n', (1326, 1328), False, 'from lsst.pipe.tasks.functors import Mag, CustomFunctor, DeconvolvedMoments, StarGalaxyLabeller, RAColumn, DecColumn, Column, SdssTraceSize, PsfSdssTraceSizeDiff, HsmTraceSize, PsfHsmTraceSizeDiff, CompositeFunctor\n'), ((1374, 1396), 'lsst.pipe.tasks.functors.PsfSdssTraceSizeDiff', 'PsfSdssTraceSizeDiff', ([], {}), '()\n', (1394, 1396), False, 'from lsst.pipe.tasks.functors import Mag, CustomFunctor, DeconvolvedMoments, StarGalaxyLabeller, RAColumn, DecColumn, Column, SdssTraceSize, PsfSdssTraceSizeDiff, HsmTraceSize, PsfHsmTraceSizeDiff, CompositeFunctor\n'), ((1435, 1449), 'lsst.pipe.tasks.functors.HsmTraceSize', 'HsmTraceSize', ([], {}), '()\n', (1447, 1449), False, 'from lsst.pipe.tasks.functors import Mag, CustomFunctor, DeconvolvedMoments, StarGalaxyLabeller, RAColumn, DecColumn, Column, SdssTraceSize, PsfSdssTraceSizeDiff, HsmTraceSize, PsfHsmTraceSizeDiff, CompositeFunctor\n'), ((1493, 1514), 'lsst.pipe.tasks.functors.PsfHsmTraceSizeDiff', 'PsfHsmTraceSizeDiff', ([], {}), '()\n', (1512, 1514), False, 'from lsst.pipe.tasks.functors import Mag, CustomFunctor, DeconvolvedMoments, StarGalaxyLabeller, RAColumn, DecColumn, Column, SdssTraceSize, PsfSdssTraceSizeDiff, HsmTraceSize, PsfHsmTraceSizeDiff, CompositeFunctor\n'), ((1548, 1568), 'lsst.pipe.tasks.functors.StarGalaxyLabeller', 'StarGalaxyLabeller', ([], {}), '()\n', (1566, 1568), False, 'from lsst.pipe.tasks.functors import Mag, CustomFunctor, DeconvolvedMoments, StarGalaxyLabeller, RAColumn, DecColumn, Column, SdssTraceSize, PsfSdssTraceSizeDiff, HsmTraceSize, PsfHsmTraceSizeDiff, CompositeFunctor\n'), ((2050, 2062), 'numpy.median', 'np.median', (['x'], {}), '(x)\n', (2059, 2062), True, 'import numpy as np\n'), ((2074, 2086), 'numpy.median', 'np.median', (['y'], {}), '(y)\n', (2083, 2086), True, 'import numpy as np\n'), ((2414, 2479), 'param.Path', 'param.Path', ([], {'default': '"""forced_big.parq"""', 'search_paths': "['.', 'data']"}), "(default='forced_big.parq', search_paths=['.', 'data'])\n", (2424, 2479), False, 'import param\n'), ((2492, 2516), 'param.String', 'param.String', ([], {'default': '""""""'}), "(default='')\n", (2504, 2516), False, 'import param\n'), ((2532, 2556), 'param.String', 'param.String', ([], {'default': '""""""'}), "(default='')\n", (2544, 2556), False, 'import param\n'), ((2712, 2736), 'param.String', 'param.String', ([], {'default': '""""""'}), "(default='')\n", (2724, 2736), False, 'import param\n'), ((2910, 2934), 'param.String', 'param.String', ([], {'default': '""""""'}), "(default='')\n", (2922, 2934), False, 'import param\n'), ((3088, 3158), 'param.ObjectSelector', 'param.ObjectSelector', ([], {'default': '"""all"""', 'objects': "['all', 'star', 'galaxy']"}), "(default='all', objects=['all', 'star', 'galaxy'])\n", (3108, 3158), False, 'import param\n'), ((3211, 3254), 'param.Integer', 'param.Integer', ([], {'default': '(20)', 'bounds': '(10, 100)'}), '(default=20, bounds=(10, 100))\n', (3224, 3254), False, 'import param\n'), ((3329, 3351), 'parambokeh.view.Plot', 'parambokeh.view.Plot', ([], {}), '()\n', (3349, 3351), False, 'import parambokeh\n'), ((2108, 2129), 'numpy.absolute', 'np.absolute', (['(x - xMed)'], {}), '(x - xMed)\n', (2119, 2129), True, 'import numpy as np\n'), ((2152, 2173), 'numpy.absolute', 'np.absolute', (['(y - yMed)'], {}), '(y - yMed)\n', (2163, 2173), True, 'import numpy as np\n'), ((5255, 5295), 'holoviews.Dimension', 'hv.Dimension', (['"""x"""'], {'label': "funcs['x'].name"}), "('x', label=funcs['x'].name)\n", (5267, 5295), True, 'import holoviews as hv\n'), ((5311, 5351), 'holoviews.Dimension', 'hv.Dimension', (['"""y"""'], {'label': "funcs['y'].name"}), "('y', label=funcs['y'].name)\n", (5323, 5351), True, 'import holoviews as hv\n'), ((5371, 5445), 'holoviews.Dataset', 'hv.Dataset', (['df[ok]'], {'kdims': "[xdim, ydim, 'ra', 'dec', 'id']", 'vdims': "['label']"}), "(df[ok], kdims=[xdim, ydim, 'ra', 'dec', 'id'], vdims=['label'])\n", (5381, 5445), True, 'import holoviews as hv\n'), ((5884, 5925), 'os.path.join', 'os.path.join', (['self.rootdir', '"""data"""', '"""ids"""'], {}), "(self.rootdir, 'data', 'ids')\n", (5896, 5925), False, 'import os\n'), ((7141, 7184), 'bokeh.models.HoverTool', 'HoverTool', ([], {'tooltips': "[('(x,y)', '($x, $y)')]"}), "(tooltips=[('(x,y)', '($x, $y)')])\n", (7150, 7184), False, 'from bokeh.models import HoverTool\n'), ((9525, 9537), 'numpy.median', 'np.median', (['x'], {}), '(x)\n', (9534, 9537), True, 'import numpy as np\n'), ((9553, 9565), 'numpy.median', 'np.median', (['y'], {}), '(y)\n', (9562, 9565), True, 'import numpy as np\n'), ((9999, 10019), 'holoviews.streams.RangeXY', 'hv.streams.RangeXY', ([], {}), '()\n', (10017, 10019), True, 'import holoviews as hv\n'), ((10128, 10186), 'holoviews.DynamicMap', 'hv.DynamicMap', (['self.make_scatter'], {'streams': '[self, range_xy]'}), '(self.make_scatter, streams=[self, range_xy])\n', (10141, 10186), True, 'import holoviews as hv\n'), ((10201, 10266), 'holoviews.DynamicMap', 'hv.DynamicMap', (['self.make_sky'], {'streams': '[self, range_sky, range_xy]'}), '(self.make_sky, streams=[self, range_sky, range_xy])\n', (10214, 10266), True, 'import holoviews as hv\n'), ((10284, 10350), 'holoviews.DynamicMap', 'hv.DynamicMap', (['self.make_xhist'], {'kdims': '[]', 'streams': '[self, range_xy]'}), '(self.make_xhist, kdims=[], streams=[self, range_xy])\n', (10297, 10350), True, 'import holoviews as hv\n'), ((10367, 10433), 'holoviews.DynamicMap', 'hv.DynamicMap', (['self.make_yhist'], {'kdims': '[]', 'streams': '[self, range_xy]'}), '(self.make_yhist, kdims=[], streams=[self, range_xy])\n', (10380, 10433), True, 'import holoviews as hv\n'), ((1771, 1794), 'lsst.pipe.tasks.functors.CustomFunctor', 'CustomFunctor', (['funcName'], {}), '(funcName)\n', (1784, 1794), False, 'from lsst.pipe.tasks.functors import Mag, CustomFunctor, DeconvolvedMoments, StarGalaxyLabeller, RAColumn, DecColumn, Column, SdssTraceSize, PsfSdssTraceSizeDiff, HsmTraceSize, PsfHsmTraceSizeDiff, CompositeFunctor\n'), ((1987, 1999), 'pandas.Series', 'pd.Series', (['x'], {}), '(x)\n', (1996, 1999), True, 'import pandas as pd\n'), ((2017, 2029), 'pandas.Series', 'pd.Series', (['y'], {}), '(y)\n', (2026, 2029), True, 'import pandas as pd\n'), ((5202, 5219), 'numpy.isfinite', 'np.isfinite', (['df.x'], {}), '(df.x)\n', (5213, 5219), True, 'import numpy as np\n'), ((5222, 5239), 'numpy.isfinite', 'np.isfinite', (['df.y'], {}), '(df.y)\n', (5233, 5239), True, 'import numpy as np\n'), ((7039, 7127), 'holoviews.operation.datashader.datashade', 'datashade', (['pts'], {'x_range': 'x_range', 'y_range': 'y_range', 'dynamic': '(False)', 'normalization': '"""log"""'}), "(pts, x_range=x_range, y_range=y_range, dynamic=False,\n normalization='log')\n", (7048, 7127), False, 'from holoviews.operation.datashader import aggregate, datashade, dynspread\n'), ((9591, 9612), 'numpy.absolute', 'np.absolute', (['(x - xMed)'], {}), '(x - xMed)\n', (9602, 9612), True, 'import numpy as np\n'), ((9639, 9660), 'numpy.absolute', 'np.absolute', (['(y - yMed)'], {}), '(y - yMed)\n', (9650, 9660), True, 'import numpy as np\n'), ((4597, 4609), 'lsst.pipe.tasks.functors.Column', 'Column', (['"""id"""'], {}), "('id')\n", (4603, 4609), False, 'from lsst.pipe.tasks.functors import Mag, CustomFunctor, DeconvolvedMoments, StarGalaxyLabeller, RAColumn, DecColumn, Column, SdssTraceSize, PsfSdssTraceSizeDiff, HsmTraceSize, PsfHsmTraceSizeDiff, CompositeFunctor\n'), ((4651, 4661), 'lsst.pipe.tasks.functors.RAColumn', 'RAColumn', ([], {}), '()\n', (4659, 4661), False, 'from lsst.pipe.tasks.functors import Mag, CustomFunctor, DeconvolvedMoments, StarGalaxyLabeller, RAColumn, DecColumn, Column, SdssTraceSize, PsfSdssTraceSizeDiff, HsmTraceSize, PsfHsmTraceSizeDiff, CompositeFunctor\n'), ((4703, 4714), 'lsst.pipe.tasks.functors.DecColumn', 'DecColumn', ([], {}), '()\n', (4712, 4714), False, 'from lsst.pipe.tasks.functors import Mag, CustomFunctor, DeconvolvedMoments, StarGalaxyLabeller, RAColumn, DecColumn, Column, SdssTraceSize, PsfSdssTraceSizeDiff, HsmTraceSize, PsfHsmTraceSizeDiff, CompositeFunctor\n'), ((6285, 6304), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (6301, 6304), False, 'import os\n'), ((6317, 6341), 'os.listdir', 'os.listdir', (['self.id_path'], {}), '(self.id_path)\n', (6327, 6341), False, 'import os\n'), ((8067, 8079), 'datashader.mean', 'ds.mean', (['"""y"""'], {}), "('y')\n", (8074, 8079), True, 'import datashader as ds\n'), ((8112, 8128), 'holoviews.QuadMesh', 'hv.QuadMesh', (['agg'], {}), '(agg)\n', (8123, 8128), True, 'import holoviews as hv\n'), ((8927, 9032), 'holoviews.operation.histogram', 'hv.operation.histogram', (['dset'], {'num_bins': "kwargs['nbins']", 'dimension': 'dim', 'normed': '"""height"""', 'bin_range': 'rng'}), "(dset, num_bins=kwargs['nbins'], dimension=dim,\n normed='height', bin_range=rng)\n", (8949, 9032), True, 'import holoviews as hv\n'), ((10040, 10060), 'holoviews.streams.RangeXY', 'hv.streams.RangeXY', ([], {}), '()\n', (10058, 10060), True, 'import holoviews as hv\n'), ((8353, 8365), 'datashader.mean', 'ds.mean', (['"""y"""'], {}), "('y')\n", (8360, 8365), True, 'import datashader as ds\n'), ((6536, 6557), 'pandas.read_hdf', 'pd.read_hdf', (['f', '"""ids"""'], {}), "(f, 'ids')\n", (6547, 6557), True, 'import pandas as pd\n')] |
import logging
import os
from os.path import isfile, join
import numpy as np
from data_io import file_reading
from data_io import x_y_spliting
#import matplotlib.pyplot as plt
def data_plot(data_file, class_column=0, delimiter=' '):
x_matrix, attr_num = file_reading(data_file, delimiter, True)
x_matrix, y_vector = x_y_spliting(x_matrix, class_column)
y_min = min(y_vector)
y_max = max(y_vector)
x_row, x_col = x_matrix.shape
attr_len = x_col/attr_num
x_matrix = x_matrix.reshape(x_row, attr_num, attr_len)
for label in range(y_min, y_max):
out_pdf = "asl_class_" + str(label) + ".pdf"
fig = plt.figure()
label_index = np.where(y_vector==label)[0]
label_row = x_matrix[label_index[0], :, :]
for attr in range(0, attr_num):
plot_series = label_row[attr, :]
plot_len = len(plot_series)
stop_i = plot_len
for i in range(0, plot_len):
re_i = plot_len - i - 1
if plot_series[re_i] == 0:
stop_i = stop_i - 1
else:
break
plt.plot(plot_series[0:stop_i])
fig.savefig(out_pdf, dpi=fig.dpi)
def data_checking(data_file, class_column=0, delimiter=' '):
ret_str = ""
x_matrix, attr_num = file_reading(data_file, delimiter, True)
x_matrix, y_vector = x_y_spliting(x_matrix, class_column)
ret_str = 'x_matrix shape: ' + str(x_matrix.shape)
y_min = min(y_vector)
y_max = max(y_vector)
ret_str = ret_str + "\nclass labels from " + str(y_min) + " to " + str(y_max)
#for i in range(y_min, y_max+1):
# ret_str = ret_str + '\nclass '+ str(i) + ': '+str(y_vector.count(i))
unique, counts = np.unique(y_vector, return_counts=True)
ret_str = ret_str +'\n'+ str(dict(zip(unique, counts)))
return ret_str
def arc_reduce_null(fname, null_class=1, null_max=1000, class_column=0, delimiter=' ', header = True):
num = 0
data_matrix = []
null_count = 0
with open(fname) as f:
data_row = []
for line in f:
if header == True:
attr_num = int(line.strip())
header = False
continue
data_row = line.split(delimiter)
if int(data_row[class_column]) == null_class:
null_count = null_count + 1
if null_count < null_max:
data_matrix.append(data_row)
else:
data_matrix.append(data_row)
row_num = len(data_matrix)
col_num = len(data_matrix[0])
data_matrix = np.array(data_matrix, dtype=float).reshape(row_num, col_num)
data_matrix.astype(float)
y_vector = data_matrix[:, class_column].astype(int)
return np.delete(data_matrix, class_column, 1), y_vector
if __name__ == '__main__':
#data_file = '../../data/gesture_data/processed_data/data.txt_trainTest10/train_0.txt'
#data_file = '../../data/arc_activity_recognition/s1_ijcal/train.txt'
#class_column = 0
#delimiter = ' '
#ret_str = data_checking(data_file, class_column, delimiter)
#print ret_str
#data_file = '../../data/arc_activity_recognition/s1_ijcal/test.txt'
#class_column = 0
#delimiter = ' '
#ret_str = data_checking(data_file, class_column, delimiter)
#print ret_str
data_file = '../../data/evn/ds/DS_all_ready_to_model.csv_trainTest2_weekly_3attr/test_0.txt'
#data_file = '../../data/human/subject10_ideal.log'
#class_column = 119
#delimiter = '\t'
##null_class=1
##null_max=1000
##x_matrix, y_vector = readFile(data_file, null_class, null_max, class_column);
##print x_matrix.shape
##print y_vector.shape
#
#data_file = '../../data/human/processed/ready/data.txt'#_trainTest10/train_0.txt'
#class_column = 0
#delimiter = ' '
#ret_str = data_checking(data_file, class_column, delimiter)
#print ret_str
data_file = '../../data/dsa/train_test_10_fold/test_0.txt'
#data_file = '../../data/dsa/output.txt'
#data_file = '../../data/rar/train_test_10_fold_class_based/train_0.txt_class_0.txt'
#data_file = "../../data/arabic/train_test_1_fold/train_0.txt"
#data_file = "../../data/arabic/train_test_1_fold/test_0.txt"
#data_file = "../../data/asl/train_test_3_fold/train_0.txt"
#data_file = '../../data/rar/train_test_10_fold/test_0.txt'
#data_file = '../../data/arc/train_test_10_fold/test_0.txt'
#data_file = '../../data/fixed_arc/train_test_1_fold/test_0.txt'
data_key = "phs"
data_key = "eeg"
#data_key = "fad"
data_file = "../../data/" + data_key +"/train.txt"
class_column = 0
delimiter = ' '
#data_plot(data_file, class_column, delimiter)
ret_str = data_checking(data_file, class_column, delimiter)
print(ret_str)
| [
"numpy.unique",
"numpy.where",
"numpy.delete",
"numpy.array",
"data_io.x_y_spliting",
"data_io.file_reading"
] | [((259, 299), 'data_io.file_reading', 'file_reading', (['data_file', 'delimiter', '(True)'], {}), '(data_file, delimiter, True)\n', (271, 299), False, 'from data_io import file_reading\n'), ((325, 361), 'data_io.x_y_spliting', 'x_y_spliting', (['x_matrix', 'class_column'], {}), '(x_matrix, class_column)\n', (337, 361), False, 'from data_io import x_y_spliting\n'), ((1338, 1378), 'data_io.file_reading', 'file_reading', (['data_file', 'delimiter', '(True)'], {}), '(data_file, delimiter, True)\n', (1350, 1378), False, 'from data_io import file_reading\n'), ((1404, 1440), 'data_io.x_y_spliting', 'x_y_spliting', (['x_matrix', 'class_column'], {}), '(x_matrix, class_column)\n', (1416, 1440), False, 'from data_io import x_y_spliting\n'), ((1768, 1807), 'numpy.unique', 'np.unique', (['y_vector'], {'return_counts': '(True)'}), '(y_vector, return_counts=True)\n', (1777, 1807), True, 'import numpy as np\n'), ((2791, 2830), 'numpy.delete', 'np.delete', (['data_matrix', 'class_column', '(1)'], {}), '(data_matrix, class_column, 1)\n', (2800, 2830), True, 'import numpy as np\n'), ((678, 705), 'numpy.where', 'np.where', (['(y_vector == label)'], {}), '(y_vector == label)\n', (686, 705), True, 'import numpy as np\n'), ((2633, 2667), 'numpy.array', 'np.array', (['data_matrix'], {'dtype': 'float'}), '(data_matrix, dtype=float)\n', (2641, 2667), True, 'import numpy as np\n')] |
"""
-------------------------------------
# -*- coding: utf-8 -*-
# @Time : 2021/4/16 12:03:46
# @Author : Giyn
# @Email : <EMAIL>
# @File : mobility_model_construction.py
# @Software: PyCharm
-------------------------------------
"""
import numpy as np
from utils import ProgressBar
def markov_model(trajs: list, n_grid: int, _epsilon: float) -> np.ndarray:
"""
markov model
Args:
trajs : trajectory data
n_grid : number of secondary grids
_epsilon: privacy budget
Returns:
O_: intermediate point transition probability matrix
"""
O_ = np.zeros((n_grid, n_grid)) # establish n_grid * n_grid transition probability matrix
for t in trajs:
O_sub = np.zeros((n_grid, n_grid))
for i in range(len(t) - 1):
curr_point = t[i]
next_point = t[i + 1]
O_sub[curr_point][next_point] += 1
O_sub /= (len(t) - 1) # transition probability of the trajectory
O_ += O_sub
p = ProgressBar(n_grid, 'Generate midpoint transition probability matrix')
for i in range(n_grid):
p.update(i)
for j in range(n_grid):
noise = np.random.laplace(0, 1 / _epsilon) # add laplacian noise
O_[i][j] += noise
if O_[i][j] < 0:
O_[i][j] = 0
# compute X
row_sum = [sum(O_[i]) for i in range(n_grid)]
for j in range(n_grid):
O_[j] /= row_sum[j]
return O_
def mobility_model_main(n_grid: int, _epsilon: float, grid_trajs_path: str,
midpoint_movement_path: str):
"""
main function
Args:
n_grid : number of grids
_epsilon : privacy budget
grid_trajs_path : grid track file path
midpoint_movement_path: midpoint transition probability matrix file path
Returns:
"""
with open(grid_trajs_path, 'r') as grid_trajs_file:
T = [eval(traj) for traj in grid_trajs_file.readlines()]
with open(midpoint_movement_path, 'w') as midpoint_movement_file:
midpoint_movement_matrix = markov_model(T, n_grid, _epsilon)
for item in midpoint_movement_matrix:
each_line = ' '.join([str(i) for i in item]) + '\n'
midpoint_movement_file.writelines(each_line)
if __name__ == '__main__':
epsilon = 0.1
mobility_model_main(64, epsilon * 3 / 9,
f'../data/Geolife Trajectories 1.3/Middleware/grid_trajs_epsilon_{epsilon}.txt',
f'../data/Geolife Trajectories 1.3/Middleware/midpoint_movement_epsilon_{epsilon}.txt')
| [
"utils.ProgressBar",
"numpy.zeros",
"numpy.random.laplace"
] | [((613, 639), 'numpy.zeros', 'np.zeros', (['(n_grid, n_grid)'], {}), '((n_grid, n_grid))\n', (621, 639), True, 'import numpy as np\n'), ((1012, 1082), 'utils.ProgressBar', 'ProgressBar', (['n_grid', '"""Generate midpoint transition probability matrix"""'], {}), "(n_grid, 'Generate midpoint transition probability matrix')\n", (1023, 1082), False, 'from utils import ProgressBar\n'), ((735, 761), 'numpy.zeros', 'np.zeros', (['(n_grid, n_grid)'], {}), '((n_grid, n_grid))\n', (743, 761), True, 'import numpy as np\n'), ((1183, 1217), 'numpy.random.laplace', 'np.random.laplace', (['(0)', '(1 / _epsilon)'], {}), '(0, 1 / _epsilon)\n', (1200, 1217), True, 'import numpy as np\n')] |
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# regulations governing limitations on product liability.
#
# THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS
# PART OF THIS FILE AT ALL TIMES.
import json
import cv2
import numpy as np
import sys
import scipy
import math
from scipy.ndimage.filters import gaussian_filter
import argparse
import os
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', default=0, type=int, help='gpu id')
parser.add_argument('--data', help='anno image path')
parser.add_argument('--caffe', help='load a model for training or evaluation')
parser.add_argument('--cpu', action='store_true', help='CPU ONLY')
parser.add_argument('--f', default=0, type=int, help="from id")
parser.add_argument('--e', default=30000, type=int, help="end id")
parser.add_argument('--weights', help='weights path')
parser.add_argument('--model', help='model path')
parser.add_argument('--anno', help='anno file path')
parser.add_argument('--name', help='output name, default eval', default='eval')
parser.add_argument('--input', help='input name in the first layer', default='image')
args = parser.parse_args()
caffe_path = os.path.join(args.caffe, 'python')
add_path(caffe_path)
import caffe
class Rect:
def __init__(self, x, y, w, h):
self.x = x
self.y = y
self.w = w
self.h = h
def padRightDownCorner(img, stride, padValue):
h = img.shape[0]
w = img.shape[1]
pad = 4 * [None]
pad[0] = 0
pad[1] = 0
pad[2] = 0 if (h % stride == 0) else stride - (h % stride)
pad[3] = 0 if (w % stride == 0) else stride - (w % stride)
img_padded = img
pad_up = np.tile(img_padded[0:1,:,:] * 0 + padValue, (pad[0], 1, 1))
img_padded = np.concatenate((pad_up, img_padded), axis = 0)
pad_left = np.tile(img_padded[:,0:1,:] * 0 + padValue, (1, pad[1], 1))
img_padded = np.concatenate((pad_left, img_padded), axis = 1)
pad_down = np.tile(img_padded[-2:-1,:,:] * 0 + padValue, (pad[2], 1, 1))
img_padded = np.concatenate((img_padded, pad_down), axis = 0)
pad_right = np.tile(img_padded[:,-2:-1,:] * 0 + padValue, (1, pad[3], 1))
img_padded = np.concatenate((img_padded, pad_right), axis = 1)
return img_padded, pad
def preprocess(img):
img_out = np.float32(img)
img_out, pad = padRightDownCorner(img_out, 32, 127.5)
img_out[:, :, 0] = img_out[:, :, 0] - 127.5
img_out[:, :, 1] = img_out[:, :, 1] - 127.5
img_out[:, :, 2] = img_out[:, :, 2] - 127.5
img_out = img_out / 128.0
# change H*W*C -> C*H*W
return np.transpose(img_out, (2, 0, 1)), pad
def applymodel(net, image):
npoints = 14
oriImg = image.copy()
orishape = oriImg.shape
imageToTest_padded, pad = preprocess(oriImg)
sz = imageToTest_padded.shape
height = sz[1]
width = sz[2]
net.blobs[args.input].reshape(1,3,height,width)
net.blobs[args.input].data[...] = imageToTest_padded.reshape((1, 3, height, width))
net.forward()
heatmap = net.blobs['Mconv7_stage6_L2'].data[0,...]
heatmap = np.transpose(np.squeeze(heatmap), (1,2,0))
heatmap = cv2.resize(heatmap, (0,0), fx=8, fy=8, interpolation=cv2.INTER_CUBIC)
heatmap = heatmap[:height-pad[2], :width-pad[3], :]
heatmap = cv2.resize(heatmap, (orishape[1], orishape[0]), interpolation=cv2.INTER_CUBIC)
paf = net.blobs['Mconv7_stage6_L1'].data[0,...]
paf = np.transpose(np.squeeze(paf), (1,2,0)) # output 0 is PAFs
paf = cv2.resize(paf, (0,0), fx=8, fy=8, interpolation=cv2.INTER_CUBIC)
paf = paf[:sz[1]-pad[2], :sz[2]-pad[3], :]
paf = cv2.resize(paf, (orishape[1], orishape[0]), interpolation=cv2.INTER_CUBIC)
return heatmap, paf
def get_results(oriImg, heatmap, paf):
limbSeq = [[0,1], [1,2], [2,3], [3,4], [1,5], [5,6], [6,7], [1,8], \
[8,9], [9,10], [1,11], [11,12], [12,13]]
# the middle joints heatmap correpondence
mapIdx = [[15,16], [17,18], [19,20], [21,22], [23,24], [25,26], [27,28], [29,30], \
[31,32], [33,34], [35,36], [37,38], [39,40]]
heatmap_avg = heatmap
paf_avg = paf
all_peaks = []
peak_counter = 0
for part in range(14):
x_list = []
y_list = []
map_ori = heatmap_avg[:,:,part]
map = gaussian_filter(map_ori, sigma=3)
map_left = np.zeros(map.shape)
map_left[1:,:] = map[:-1,:]
map_right = np.zeros(map.shape)
map_right[:-1,:] = map[1:,:]
map_up = np.zeros(map.shape)
map_up[:,1:] = map[:,:-1]
map_down = np.zeros(map.shape)
map_down[:,:-1] = map[:,1:]
peaks_binary = np.logical_and.reduce((map>=map_left, map>=map_right, map>=map_up, map>=map_down, map > 0.1))
peaks = list(zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])) # note reverse
peaks_with_score = [x + (map_ori[x[1],x[0]],) for x in peaks]
id = list(range(peak_counter, peak_counter + len(peaks)))
peaks_with_score_and_id = [peaks_with_score[i] + (id[i],) for i in range(len(id))]
all_peaks.append(peaks_with_score_and_id)
peak_counter += len(peaks)
connection_all = []
special_k = []
mid_num = 10
for k in range(len(mapIdx)):
score_mid = paf_avg[:,:,[x-15 for x in mapIdx[k]]]
candA = all_peaks[limbSeq[k][0]]
candB = all_peaks[limbSeq[k][1]]
nA = len(candA)
nB = len(candB)
indexA, indexB = limbSeq[k]
indexA += 1
indexB += 1
if(nA != 0 and nB != 0):
connection_candidate = []
for i in range(nA):
for j in range(nB):
vec = np.subtract(candB[j][:2], candA[i][:2])
if (vec == [0,0]).all():
vec = [1,1]
norm = math.sqrt(vec[0]*vec[0] + vec[1]*vec[1])
vec = np.divide(vec, norm)
startend = zip(np.linspace(candA[i][0], candB[j][0], num=mid_num), \
np.linspace(candA[i][1], candB[j][1], num=mid_num))
startend = list(startend)
vec_x = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 0] \
for I in range(len(startend))])
vec_y = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 1] \
for I in range(len(startend))])
score_midpts = np.multiply(vec_x, vec[0]) + np.multiply(vec_y, vec[1])
score_with_dist_prior = sum(score_midpts)/len(score_midpts) + min(0.5*oriImg.shape[0]/norm-1, 0)
criterion1 = len(np.nonzero(score_midpts > 0.05)[0]) > 0.8 * len(score_midpts)
criterion2 = score_with_dist_prior > 0
#print(score_with_dist_prior)
if criterion1 and criterion2:
connection_candidate.append([i, j, score_with_dist_prior, score_with_dist_prior+candA[i][2]+candB[j][2]])
#print(connection_candidate)
#print(candA,candB)
connection_candidate = sorted(connection_candidate, key=lambda x: x[2], reverse=True)
connection = np.zeros((0,5))
for c in range(len(connection_candidate)):
i,j,s = connection_candidate[c][0:3]
if(i not in connection[:,3] and j not in connection[:,4]):
connection = np.vstack([connection, [candA[i][3], candB[j][3], s, i, j]])
if(len(connection) >= min(nA, nB)):
break
connection_all.append(connection)
else:
special_k.append(k)
connection_all.append([])
#print(connection_all)
# last number in each row is the total parts number of that person
# the second last number in each row is the score of the overall configuration
subset = -1 * np.ones((0, 16))
candidate = np.array([item for sublist in all_peaks for item in sublist])
#print(candidate)
for k in range(len(mapIdx)):
if k not in special_k:
partAs = connection_all[k][:,0]
partBs = connection_all[k][:,1]
indexA, indexB = np.array(limbSeq[k])
#print(partAs, partBs,len(connection_all[k]))
for i in range(len(connection_all[k])): #= 1:size(temp,1)
found = 0
subset_idx = [-1, -1]
for j in range(len(subset)): #1:size(subset,1):
if subset[j][indexA] == partAs[i] or subset[j][indexB] == partBs[i]:
subset_idx[found] = j
found += 1
#print(found)
if found == 1:
j = subset_idx[0]
if(subset[j][indexB] != partBs[i]):
subset[j][indexB] = partBs[i]
subset[j][-1] += 1
subset[j][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
if(subset[j][indexA] != partAs[i]):
subset[j][indexA] = partAs[i]
subset[j][-1] += 1
subset[j][-2] += candidate[partAs[i].astype(int), 2] + connection_all[k][i][2]
elif found == 2: # if found 2 and disjoint, merge them
j1, j2 = subset_idx
print("found = 2")
membership = ((subset[j1]>=0).astype(int) + (subset[j2]>=0).astype(int))[:-2]
if len(np.nonzero(membership == 2)[0]) == 0: #merge
subset[j1][:-2] += (subset[j2][:-2] + 1)
subset[j1][-2:] += subset[j2][-2:]
subset[j1][-2] += connection_all[k][i][2]
subset = np.delete(subset, j2, 0)
else: # as like found == 1
subset[j1][indexB] = partBs[i]
subset[j1][-1] += 1
subset[j1][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
# if find no partA in the subset, create a new subset
elif not found and k < 14:
row = -1 * np.ones(16)
row[indexA] = partAs[i]
row[indexB] = partBs[i]
row[-1] = 2
row[-2] = sum(candidate[connection_all[k][i,:2].astype(int), 2]) + connection_all[k][i][2]
subset = np.vstack([subset, row])
#print(subset)
# delete some rows of subset which has few parts occur
deleteIdx = [];
for i in range(len(subset)):
if subset[i][-1] < 4 or subset[i][-2]/subset[i][-1] < 0.4:
deleteIdx.append(i)
subset = np.delete(subset, deleteIdx, axis=0)
return subset, candidate
if __name__ == '__main__':
from_id = args.f
end_id = args.e
save_id = '{}'.format(args.gpu)
save_file = args.name + '.json'
if not args.cpu:
caffe.set_mode_gpu()
caffe.set_device(args.gpu)
else:
caffe.set_mode_cpu()
save_id = ''
fp = open(args.anno,'r')
anno_str = fp.read()
fp.close()
anno = json.loads(anno_str)
net = caffe.Net(args.model, args.weights, caffe.TEST)
net.name = "openpose"
img_path = args.data
result = []
for iii in range(from_id, end_id):
a = anno[iii]
test_image = os.path.join(img_path,a['image_id']+'.jpg')
image = cv2.imread(test_image)
imageToTest = cv2.resize(image, (0,0), fx=0.5, fy=0.5, interpolation=cv2.INTER_CUBIC)
heatmap, paf = applymodel(net, imageToTest)
subset, candidate = get_results(imageToTest, heatmap, paf)
res = {}
res[u'image_id'] = a[u'image_id']
res[u'keypoint_annotations'] = {}
#print(subset)
for i in range(len(subset)):
res[u'keypoint_annotations'][u'human%d'%(len(subset)-i)] = list(np.zeros(14*3))
for j in range(14):
if subset[i][j] == -1:
res[u'keypoint_annotations'][u'human%d'%(len(subset)-i)][j*3+2] = 3
continue
res[u'keypoint_annotations'][u'human%d'%(len(subset)-i)][j*3] = int(candidate[int(subset[i][j])][0])*2
res[u'keypoint_annotations'][u'human%d'%(len(subset)-i)][j*3+1] = int(candidate[int(subset[i][j])][1])*2
res[u'keypoint_annotations'][u'human%d'%(len(subset)-i)][j*3+2] = 1
result.append(res)
print('completed:%d/%d, %d person was detected'%(iii+1,end_id,len(subset)))
#break
print('tranform to ascii')
#print(anno)
#print(result)
dic_json = json.dumps(result)
print('saving to' + save_file)
if not args.cpu:
save_file = save_file+'.'+save_id
fw = open(save_file, 'w')
fw.write(dic_json)
fw.close()
print('completed!')
| [
"sys.path.insert",
"scipy.ndimage.filters.gaussian_filter",
"math.sqrt",
"numpy.array",
"numpy.logical_and.reduce",
"caffe.set_mode_cpu",
"numpy.divide",
"numpy.multiply",
"argparse.ArgumentParser",
"numpy.delete",
"json.dumps",
"numpy.subtract",
"numpy.linspace",
"numpy.vstack",
"numpy.... | [((977, 1002), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1000, 1002), False, 'import argparse\n'), ((1764, 1798), 'os.path.join', 'os.path.join', (['args.caffe', '"""python"""'], {}), "(args.caffe, 'python')\n", (1776, 1798), False, 'import os\n'), ((2278, 2339), 'numpy.tile', 'np.tile', (['(img_padded[0:1, :, :] * 0 + padValue)', '(pad[0], 1, 1)'], {}), '(img_padded[0:1, :, :] * 0 + padValue, (pad[0], 1, 1))\n', (2285, 2339), True, 'import numpy as np\n'), ((2355, 2399), 'numpy.concatenate', 'np.concatenate', (['(pad_up, img_padded)'], {'axis': '(0)'}), '((pad_up, img_padded), axis=0)\n', (2369, 2399), True, 'import numpy as np\n'), ((2417, 2478), 'numpy.tile', 'np.tile', (['(img_padded[:, 0:1, :] * 0 + padValue)', '(1, pad[1], 1)'], {}), '(img_padded[:, 0:1, :] * 0 + padValue, (1, pad[1], 1))\n', (2424, 2478), True, 'import numpy as np\n'), ((2494, 2540), 'numpy.concatenate', 'np.concatenate', (['(pad_left, img_padded)'], {'axis': '(1)'}), '((pad_left, img_padded), axis=1)\n', (2508, 2540), True, 'import numpy as np\n'), ((2558, 2621), 'numpy.tile', 'np.tile', (['(img_padded[-2:-1, :, :] * 0 + padValue)', '(pad[2], 1, 1)'], {}), '(img_padded[-2:-1, :, :] * 0 + padValue, (pad[2], 1, 1))\n', (2565, 2621), True, 'import numpy as np\n'), ((2637, 2683), 'numpy.concatenate', 'np.concatenate', (['(img_padded, pad_down)'], {'axis': '(0)'}), '((img_padded, pad_down), axis=0)\n', (2651, 2683), True, 'import numpy as np\n'), ((2702, 2765), 'numpy.tile', 'np.tile', (['(img_padded[:, -2:-1, :] * 0 + padValue)', '(1, pad[3], 1)'], {}), '(img_padded[:, -2:-1, :] * 0 + padValue, (1, pad[3], 1))\n', (2709, 2765), True, 'import numpy as np\n'), ((2781, 2828), 'numpy.concatenate', 'np.concatenate', (['(img_padded, pad_right)'], {'axis': '(1)'}), '((img_padded, pad_right), axis=1)\n', (2795, 2828), True, 'import numpy as np\n'), ((2900, 2915), 'numpy.float32', 'np.float32', (['img'], {}), '(img)\n', (2910, 2915), True, 'import numpy as np\n'), ((3741, 3811), 'cv2.resize', 'cv2.resize', (['heatmap', '(0, 0)'], {'fx': '(8)', 'fy': '(8)', 'interpolation': 'cv2.INTER_CUBIC'}), '(heatmap, (0, 0), fx=8, fy=8, interpolation=cv2.INTER_CUBIC)\n', (3751, 3811), False, 'import cv2\n'), ((3881, 3959), 'cv2.resize', 'cv2.resize', (['heatmap', '(orishape[1], orishape[0])'], {'interpolation': 'cv2.INTER_CUBIC'}), '(heatmap, (orishape[1], orishape[0]), interpolation=cv2.INTER_CUBIC)\n', (3891, 3959), False, 'import cv2\n'), ((4091, 4157), 'cv2.resize', 'cv2.resize', (['paf', '(0, 0)'], {'fx': '(8)', 'fy': '(8)', 'interpolation': 'cv2.INTER_CUBIC'}), '(paf, (0, 0), fx=8, fy=8, interpolation=cv2.INTER_CUBIC)\n', (4101, 4157), False, 'import cv2\n'), ((4214, 4288), 'cv2.resize', 'cv2.resize', (['paf', '(orishape[1], orishape[0])'], {'interpolation': 'cv2.INTER_CUBIC'}), '(paf, (orishape[1], orishape[0]), interpolation=cv2.INTER_CUBIC)\n', (4224, 4288), False, 'import cv2\n'), ((8638, 8699), 'numpy.array', 'np.array', (['[item for sublist in all_peaks for item in sublist]'], {}), '([item for sublist in all_peaks for item in sublist])\n', (8646, 8699), True, 'import numpy as np\n'), ((11465, 11501), 'numpy.delete', 'np.delete', (['subset', 'deleteIdx'], {'axis': '(0)'}), '(subset, deleteIdx, axis=0)\n', (11474, 11501), True, 'import numpy as np\n'), ((11908, 11928), 'json.loads', 'json.loads', (['anno_str'], {}), '(anno_str)\n', (11918, 11928), False, 'import json\n'), ((11948, 11995), 'caffe.Net', 'caffe.Net', (['args.model', 'args.weights', 'caffe.TEST'], {}), '(args.model, args.weights, caffe.TEST)\n', (11957, 11995), False, 'import caffe\n'), ((13434, 13452), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (13444, 13452), False, 'import json\n'), ((942, 966), 'sys.path.insert', 'sys.path.insert', (['(0)', 'path'], {}), '(0, path)\n', (957, 966), False, 'import sys\n'), ((3192, 3224), 'numpy.transpose', 'np.transpose', (['img_out', '(2, 0, 1)'], {}), '(img_out, (2, 0, 1))\n', (3204, 3224), True, 'import numpy as np\n'), ((3697, 3716), 'numpy.squeeze', 'np.squeeze', (['heatmap'], {}), '(heatmap)\n', (3707, 3716), True, 'import numpy as np\n'), ((4036, 4051), 'numpy.squeeze', 'np.squeeze', (['paf'], {}), '(paf)\n', (4046, 4051), True, 'import numpy as np\n'), ((4896, 4929), 'scipy.ndimage.filters.gaussian_filter', 'gaussian_filter', (['map_ori'], {'sigma': '(3)'}), '(map_ori, sigma=3)\n', (4911, 4929), False, 'from scipy.ndimage.filters import gaussian_filter\n'), ((4950, 4969), 'numpy.zeros', 'np.zeros', (['map.shape'], {}), '(map.shape)\n', (4958, 4969), True, 'import numpy as np\n'), ((5026, 5045), 'numpy.zeros', 'np.zeros', (['map.shape'], {}), '(map.shape)\n', (5034, 5045), True, 'import numpy as np\n'), ((5100, 5119), 'numpy.zeros', 'np.zeros', (['map.shape'], {}), '(map.shape)\n', (5108, 5119), True, 'import numpy as np\n'), ((5173, 5192), 'numpy.zeros', 'np.zeros', (['map.shape'], {}), '(map.shape)\n', (5181, 5192), True, 'import numpy as np\n'), ((5253, 5359), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(map >= map_left, map >= map_right, map >= map_up, map >= map_down, map > 0.1)'], {}), '((map >= map_left, map >= map_right, map >= map_up, \n map >= map_down, map > 0.1))\n', (5274, 5359), True, 'import numpy as np\n'), ((8605, 8621), 'numpy.ones', 'np.ones', (['(0, 16)'], {}), '((0, 16))\n', (8612, 8621), True, 'import numpy as np\n'), ((11703, 11723), 'caffe.set_mode_gpu', 'caffe.set_mode_gpu', ([], {}), '()\n', (11721, 11723), False, 'import caffe\n'), ((11732, 11758), 'caffe.set_device', 'caffe.set_device', (['args.gpu'], {}), '(args.gpu)\n', (11748, 11758), False, 'import caffe\n'), ((11777, 11797), 'caffe.set_mode_cpu', 'caffe.set_mode_cpu', ([], {}), '()\n', (11795, 11797), False, 'import caffe\n'), ((12156, 12202), 'os.path.join', 'os.path.join', (['img_path', "(a['image_id'] + '.jpg')"], {}), "(img_path, a['image_id'] + '.jpg')\n", (12168, 12202), False, 'import os\n'), ((12216, 12238), 'cv2.imread', 'cv2.imread', (['test_image'], {}), '(test_image)\n', (12226, 12238), False, 'import cv2\n'), ((12261, 12333), 'cv2.resize', 'cv2.resize', (['image', '(0, 0)'], {'fx': '(0.5)', 'fy': '(0.5)', 'interpolation': 'cv2.INTER_CUBIC'}), '(image, (0, 0), fx=0.5, fy=0.5, interpolation=cv2.INTER_CUBIC)\n', (12271, 12333), False, 'import cv2\n'), ((7896, 7912), 'numpy.zeros', 'np.zeros', (['(0, 5)'], {}), '((0, 5))\n', (7904, 7912), True, 'import numpy as np\n'), ((8903, 8923), 'numpy.array', 'np.array', (['limbSeq[k]'], {}), '(limbSeq[k])\n', (8911, 8923), True, 'import numpy as np\n'), ((12692, 12708), 'numpy.zeros', 'np.zeros', (['(14 * 3)'], {}), '(14 * 3)\n', (12700, 12708), True, 'import numpy as np\n'), ((5372, 5396), 'numpy.nonzero', 'np.nonzero', (['peaks_binary'], {}), '(peaks_binary)\n', (5382, 5396), True, 'import numpy as np\n'), ((5401, 5425), 'numpy.nonzero', 'np.nonzero', (['peaks_binary'], {}), '(peaks_binary)\n', (5411, 5425), True, 'import numpy as np\n'), ((6285, 6324), 'numpy.subtract', 'np.subtract', (['candB[j][:2]', 'candA[i][:2]'], {}), '(candB[j][:2], candA[i][:2])\n', (6296, 6324), True, 'import numpy as np\n'), ((6433, 6477), 'math.sqrt', 'math.sqrt', (['(vec[0] * vec[0] + vec[1] * vec[1])'], {}), '(vec[0] * vec[0] + vec[1] * vec[1])\n', (6442, 6477), False, 'import math\n'), ((6500, 6520), 'numpy.divide', 'np.divide', (['vec', 'norm'], {}), '(vec, norm)\n', (6509, 6520), True, 'import numpy as np\n'), ((8128, 8188), 'numpy.vstack', 'np.vstack', (['[connection, [candA[i][3], candB[j][3], s, i, j]]'], {}), '([connection, [candA[i][3], candB[j][3], s, i, j]])\n', (8137, 8188), True, 'import numpy as np\n'), ((6557, 6607), 'numpy.linspace', 'np.linspace', (['candA[i][0]', 'candB[j][0]'], {'num': 'mid_num'}), '(candA[i][0], candB[j][0], num=mid_num)\n', (6568, 6607), True, 'import numpy as np\n'), ((6646, 6696), 'numpy.linspace', 'np.linspace', (['candA[i][1]', 'candB[j][1]'], {'num': 'mid_num'}), '(candA[i][1], candB[j][1], num=mid_num)\n', (6657, 6696), True, 'import numpy as np\n'), ((7138, 7164), 'numpy.multiply', 'np.multiply', (['vec_x', 'vec[0]'], {}), '(vec_x, vec[0])\n', (7149, 7164), True, 'import numpy as np\n'), ((7167, 7193), 'numpy.multiply', 'np.multiply', (['vec_y', 'vec[1]'], {}), '(vec_y, vec[1])\n', (7178, 7193), True, 'import numpy as np\n'), ((10504, 10528), 'numpy.delete', 'np.delete', (['subset', 'j2', '(0)'], {}), '(subset, j2, 0)\n', (10513, 10528), True, 'import numpy as np\n'), ((11196, 11220), 'numpy.vstack', 'np.vstack', (['[subset, row]'], {}), '([subset, row])\n', (11205, 11220), True, 'import numpy as np\n'), ((7348, 7379), 'numpy.nonzero', 'np.nonzero', (['(score_midpts > 0.05)'], {}), '(score_midpts > 0.05)\n', (7358, 7379), True, 'import numpy as np\n'), ((10924, 10935), 'numpy.ones', 'np.ones', (['(16)'], {}), '(16)\n', (10931, 10935), True, 'import numpy as np\n'), ((10236, 10263), 'numpy.nonzero', 'np.nonzero', (['(membership == 2)'], {}), '(membership == 2)\n', (10246, 10263), True, 'import numpy as np\n')] |
import argparse
import json
import os
from pprint import pprint
import numpy as np
from sklearn.metrics import precision_recall_fscore_support
from a2t.topic_classification.mlm import MLMTopicClassifier
from a2t.topic_classification.mnli import (
NLITopicClassifier,
NLITopicClassifierWithMappingHead,
)
from a2t.topic_classification.nsp import NSPTopicClassifier
CLASSIFIERS = {
"mnli": NLITopicClassifier,
"nsp": NSPTopicClassifier,
"mlm": MLMTopicClassifier,
"mnli-mapping": NLITopicClassifierWithMappingHead,
}
def top_k_accuracy(output, labels, k=5):
preds = np.argsort(output)[:, ::-1][:, :k]
return sum(l in p for l, p in zip(labels, preds)) / len(labels)
parser = argparse.ArgumentParser(
prog="run_evaluation",
description="Run a evaluation for each configuration.",
)
parser.add_argument("dataset", type=str, help="Dataset file.")
parser.add_argument("topics", type=str, help="Topics or classes file.")
parser.add_argument(
"--config",
type=str,
dest="config",
help="Configuration file for the experiment.",
)
args = parser.parse_args()
with open(args.topics, "rt") as f:
topics = [topic.rstrip().replace("_", " ") for topic in f]
topic2id = {topic: i for i, topic in enumerate(topics)}
with open(args.dataset, "rt") as f:
contexts, labels = [], []
for line in f:
_, label, context = line.strip().split("\t")
contexts.append(context)
labels.append(topic2id[label])
labels = np.array(labels)
with open(args.config, "rt") as f:
config = json.load(f)
for configuration in config:
os.makedirs(f"experiments/{configuration['name']}", exist_ok=True)
classifier = CLASSIFIERS[configuration["classification_model"]](labels=topics, **configuration)
output = classifier(contexts, batch_size=configuration["batch_size"])
np.save(f"experiments/{configuration['name']}/output.npy", output)
np.save(f"experiments/{configuration['name']}/labels.npy", labels)
pre, rec, f1, _ = precision_recall_fscore_support(labels, np.argmax(output, -1), average="weighted")
configuration["precision"] = pre
configuration["recall"] = rec
configuration["f1-score"] = f1
configuration["top-1"] = top_k_accuracy(output, labels, k=1)
configuration["top-3"] = top_k_accuracy(output, labels, k=3)
configuration["top-5"] = top_k_accuracy(output, labels, k=5)
configuration["topk-curve"] = [top_k_accuracy(output, labels, k=i) for i in range(len(topics))]
pprint(configuration)
with open(args.config, "wt") as f:
json.dump(config, f, indent=4)
| [
"argparse.ArgumentParser",
"os.makedirs",
"json.dump",
"numpy.argmax",
"numpy.argsort",
"numpy.array",
"json.load",
"pprint.pprint",
"numpy.save"
] | [((711, 818), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""run_evaluation"""', 'description': '"""Run a evaluation for each configuration."""'}), "(prog='run_evaluation', description=\n 'Run a evaluation for each configuration.')\n", (734, 818), False, 'import argparse\n'), ((1488, 1504), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (1496, 1504), True, 'import numpy as np\n'), ((1554, 1566), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1563, 1566), False, 'import json\n'), ((1601, 1667), 'os.makedirs', 'os.makedirs', (['f"""experiments/{configuration[\'name\']}"""'], {'exist_ok': '(True)'}), '(f"experiments/{configuration[\'name\']}", exist_ok=True)\n', (1612, 1667), False, 'import os\n'), ((1846, 1912), 'numpy.save', 'np.save', (['f"""experiments/{configuration[\'name\']}/output.npy"""', 'output'], {}), '(f"experiments/{configuration[\'name\']}/output.npy", output)\n', (1853, 1912), True, 'import numpy as np\n'), ((1917, 1983), 'numpy.save', 'np.save', (['f"""experiments/{configuration[\'name\']}/labels.npy"""', 'labels'], {}), '(f"experiments/{configuration[\'name\']}/labels.npy", labels)\n', (1924, 1983), True, 'import numpy as np\n'), ((2494, 2515), 'pprint.pprint', 'pprint', (['configuration'], {}), '(configuration)\n', (2500, 2515), False, 'from pprint import pprint\n'), ((2556, 2586), 'json.dump', 'json.dump', (['config', 'f'], {'indent': '(4)'}), '(config, f, indent=4)\n', (2565, 2586), False, 'import json\n'), ((2046, 2067), 'numpy.argmax', 'np.argmax', (['output', '(-1)'], {}), '(output, -1)\n', (2055, 2067), True, 'import numpy as np\n'), ((597, 615), 'numpy.argsort', 'np.argsort', (['output'], {}), '(output)\n', (607, 615), True, 'import numpy as np\n')] |
'''
Author: <NAME>
'''
import numpy as np
from qpsolvers import solve_qp
def linear(x1,x2,p = None):
return np.dot(x1,x2)
def polynomial(x1,x2,d):
return ( 1+np.dot(x1,x2) )**d
def rbf(x1,x2,l):
return np.exp( -np.divide(np.dot(x1-x2,x1-x2), 2*(l**2 ) ) )
def ND_hyperplane(x1,svectors,labels,alphas,kernel,p=None):
output = 0
num = int( np.size(svectors,0) )
for i in range(0,num):
output += alphas[i]*labels[i]*kernel(x1,svectors[i],p)
return output
def SVM_learner(data,C,kernel,p = None ):
# INPUT :
# data - m X n+1 matrix, where m is the number of training points and n+1 th column corresponds to vector of training labels for the training data
# C - SVM regularization parameter (positive real number)
#
#
# OUTPUT :
# returns the structure 'model' which has the following fields:
#
# b - SVM bias term
# sv - the subset of training data, which are the support vectors
# sv_alphas - m X 1 vector of support vector coefficients
# sv_labels - corresponding labels of the support vectors
#
# Install "qpsolvers" package
# Github link: https://github.com/stephane-caron/qpsolvers
# alphas = solve_qp(P, q, G, h, A, u)
#==============================================================================
traindata = np.asarray(data[:,:2])
trainlabels = np.asarray(data[:,2:])
numdata = int( np.size(traindata,0) )
#=========Gram matrix======================
X = np.zeros((numdata,numdata))
for i in range(0,numdata):
for j in range(0,numdata):
X[i,j] = kernel(traindata[i,:],traindata[j,:],p)
#==========Kernal matrix==================
Y = np.outer(trainlabels,trainlabels)
P = np.multiply(X,Y)
#==========minus of p1 norm===================================
q = np.ones( numdata )*(-1)
#============equality constraits==============================
A = trainlabels.reshape( (1,numdata) )
b = np.zeros(1)
#================ineqaulity constraits========================
G = np.vstack( (np.identity(numdata)*(-1),np.identity(numdata) ) )
h = np.hstack( (np.zeros(numdata),np.ones(numdata)*C) )
#=================quadratic minimization======================
try:
alphas = solve_qp(P, q, G, h, A, b)
except ValueError:
P = P + (np.identity(numdata))*(1e-5)
alphas = solve_qp(P, q, G, h, A, b)
#all alphas not approximately equal to zero are support vectors
index = np.where(alphas > 1e-5)
support_vector_alphas = alphas[index[0]]
support_vector_labels = trainlabels[index[0]]
support_vectors = traindata[index[0],:]
#==================bias==============================================
b1 = []
for i in range(0,len(index[0]) ):
if(support_vector_alphas[i]< C- 1e-5):
b1.append( support_vector_labels[i] - ND_hyperplane(support_vectors[i],support_vectors,support_vector_labels,support_vector_alphas,kernel,p) )
b = np.mean(b1)
#==============================================================================
class model_struct:
pass
model = model_struct()
model.b = b
model.sv = support_vectors
model.sv_alphas = support_vector_alphas
model.sv_labels = support_vector_labels
model.kernel = kernel
model.p = p
return model
def SVM_classifier(data, model):
# INPUT
# testdata - m X n matrix of the test data samples
# # model - SVM model structure returned by SVM_learner
#
# OUTPUT
# predictedlabels - m x 1 vector of predicted labels
#
# Write code here to find predictedlabels
testdata = np.asarray(data[:,:2])
#==============================================================================
b = model.b
numdata = int( np.size(testdata,0) )
distance_from_hyperplane = np.empty((numdata,1))
#calculate perpendicular distance of testvector to hyperplane
for i in range(0,numdata):
distance_from_hyperplane[i] = ND_hyperplane(testdata[i,:],model.sv,model.sv_labels,model.sv_alphas,model.kernel,model.p) + b
#sign function for the labels
predictedlabels = np.sign(distance_from_hyperplane)
return predictedlabels
| [
"numpy.identity",
"numpy.mean",
"numpy.multiply",
"numpy.ones",
"qpsolvers.solve_qp",
"numpy.where",
"numpy.size",
"numpy.asarray",
"numpy.dot",
"numpy.outer",
"numpy.zeros",
"numpy.empty",
"numpy.sign"
] | [((122, 136), 'numpy.dot', 'np.dot', (['x1', 'x2'], {}), '(x1, x2)\n', (128, 136), True, 'import numpy as np\n'), ((1429, 1452), 'numpy.asarray', 'np.asarray', (['data[:, :2]'], {}), '(data[:, :2])\n', (1439, 1452), True, 'import numpy as np\n'), ((1471, 1494), 'numpy.asarray', 'np.asarray', (['data[:, 2:]'], {}), '(data[:, 2:])\n', (1481, 1494), True, 'import numpy as np\n'), ((1603, 1631), 'numpy.zeros', 'np.zeros', (['(numdata, numdata)'], {}), '((numdata, numdata))\n', (1611, 1631), True, 'import numpy as np\n'), ((1825, 1859), 'numpy.outer', 'np.outer', (['trainlabels', 'trainlabels'], {}), '(trainlabels, trainlabels)\n', (1833, 1859), True, 'import numpy as np\n'), ((1868, 1885), 'numpy.multiply', 'np.multiply', (['X', 'Y'], {}), '(X, Y)\n', (1879, 1885), True, 'import numpy as np\n'), ((2123, 2134), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (2131, 2134), True, 'import numpy as np\n'), ((2693, 2717), 'numpy.where', 'np.where', (['(alphas > 1e-05)'], {}), '(alphas > 1e-05)\n', (2701, 2717), True, 'import numpy as np\n'), ((3212, 3223), 'numpy.mean', 'np.mean', (['b1'], {}), '(b1)\n', (3219, 3223), True, 'import numpy as np\n'), ((3957, 3980), 'numpy.asarray', 'np.asarray', (['data[:, :2]'], {}), '(data[:, :2])\n', (3967, 3980), True, 'import numpy as np\n'), ((4179, 4201), 'numpy.empty', 'np.empty', (['(numdata, 1)'], {}), '((numdata, 1))\n', (4187, 4201), True, 'import numpy as np\n'), ((4503, 4536), 'numpy.sign', 'np.sign', (['distance_from_hyperplane'], {}), '(distance_from_hyperplane)\n', (4510, 4536), True, 'import numpy as np\n'), ((385, 405), 'numpy.size', 'np.size', (['svectors', '(0)'], {}), '(svectors, 0)\n', (392, 405), True, 'import numpy as np\n'), ((1521, 1542), 'numpy.size', 'np.size', (['traindata', '(0)'], {}), '(traindata, 0)\n', (1528, 1542), True, 'import numpy as np\n'), ((1972, 1988), 'numpy.ones', 'np.ones', (['numdata'], {}), '(numdata)\n', (1979, 1988), True, 'import numpy as np\n'), ((2456, 2482), 'qpsolvers.solve_qp', 'solve_qp', (['P', 'q', 'G', 'h', 'A', 'b'], {}), '(P, q, G, h, A, b)\n', (2464, 2482), False, 'from qpsolvers import solve_qp\n'), ((4125, 4145), 'numpy.size', 'np.size', (['testdata', '(0)'], {}), '(testdata, 0)\n', (4132, 4145), True, 'import numpy as np\n'), ((180, 194), 'numpy.dot', 'np.dot', (['x1', 'x2'], {}), '(x1, x2)\n', (186, 194), True, 'import numpy as np\n'), ((2262, 2282), 'numpy.identity', 'np.identity', (['numdata'], {}), '(numdata)\n', (2273, 2282), True, 'import numpy as np\n'), ((2308, 2325), 'numpy.zeros', 'np.zeros', (['numdata'], {}), '(numdata)\n', (2316, 2325), True, 'import numpy as np\n'), ((2572, 2598), 'qpsolvers.solve_qp', 'solve_qp', (['P', 'q', 'G', 'h', 'A', 'b'], {}), '(P, q, G, h, A, b)\n', (2580, 2598), False, 'from qpsolvers import solve_qp\n'), ((252, 276), 'numpy.dot', 'np.dot', (['(x1 - x2)', '(x1 - x2)'], {}), '(x1 - x2, x1 - x2)\n', (258, 276), True, 'import numpy as np\n'), ((2236, 2256), 'numpy.identity', 'np.identity', (['numdata'], {}), '(numdata)\n', (2247, 2256), True, 'import numpy as np\n'), ((2326, 2342), 'numpy.ones', 'np.ones', (['numdata'], {}), '(numdata)\n', (2333, 2342), True, 'import numpy as np\n'), ((2525, 2545), 'numpy.identity', 'np.identity', (['numdata'], {}), '(numdata)\n', (2536, 2545), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 7 14:28:52 2017
@author: ning
This script is to do two things,
1. converting epochs to power spectrograms
2. fit and test a linear model to the data
"""
import numpy as np
from matplotlib import pyplot as plt
import os
import mne
from glob import glob
from sklearn.preprocessing import MinMaxScaler
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import StratifiedShuffleSplit,cross_val_score
from sklearn import metrics
from mne.decoding import Vectorizer,SlidingEstimator,cross_val_multiscore,LinearModel,get_coef
from sklearn import utils
from tqdm import tqdm
def make_clf(pattern=False,vectorized=False):
clf = []
if vectorized:
clf.append(('vectorizer',Vectorizer()))
clf.append(('scaler',MinMaxScaler()))
# use linear SVM as the estimator
estimator = SVC(max_iter=-1,kernel='linear',random_state=12345,class_weight='balanced',probability=True)
if pattern:
estimator = LinearModel(estimator)
clf.append(('estimator',estimator))
clf = Pipeline(clf)
return clf
os.chdir('D:/Ning - spindle/training set')
# define data working directory and the result saving directory
working_dir='D:\\NING - spindle\\Spindle_by_Graphical_Features\\eventRelated_12_20_2017\\'
saving_dir = 'D:\\NING - spindle\\SpindleClassification_DeepConvolutionalNeuralNets\\Baseline models\Results\\Linear model\\'
if not os.path.exists(saving_dir):
os.mkdir(saving_dir)
# we don't get the data and labels simultaneously
# get the labels first because we need to do some other preprocessing before
# we put all the data together
labels = []
for e in glob(os.path.join(working_dir,'*-epo.fif')):
temp_epochs = mne.read_epochs(e,preload=False)
labels.append(temp_epochs.events[:,-1])
# save memory
del temp_epochs
labels = np.concatenate(labels)
# get the data
# sacale the data to (0,1)
data = []
for tf in glob(os.path.join(working_dir,'*-tfr.h5')):
tfcs = mne.time_frequency.read_tfrs(tf)[0]
data_ = tfcs.data
# define a (0,1) scaler
scaler = MinMaxScaler(feature_range=(0,1))
# define a vectorizer so we can transform the data from 3D to 2D
vectorizer = Vectorizer()
data_vec = vectorizer.fit_transform(data_)
data_scaled = scaler.fit_transform(data_vec)
# after we scale the data to (0,1), we transform the data from 2D back to 3D
data_scaled = vectorizer.inverse_transform(data_scaled)
del tfcs
del data_, data_vec
data.append(data_scaled)
del data_scaled
data = np.concatenate(data,axis=0)
# shuffle the order of the feature matrix and the labels
for _ in range(10):
data, labels = utils.shuffle(data,labels)
# customized the temporal decoding process
# define 10-fold cross validation
cv = StratifiedShuffleSplit(n_splits=10,random_state=12345)
coefs = []
scores = []
for time_ in tqdm(range(data.shape[-1]),desc='temporal decoding'):
coef = []
scores_=[]
# at each time point, we use the frequency information in each channel as the features
for train,test in cv.split(data,labels):
data_ = data[train,:,:,time_]
clf = make_clf(True,True)
clf.fit(data_,labels[train])
# print(time_,metrics.classification_report(clf.predict(data[test,:,:,time_]),labels[test]))
# get the patterns decoded by the classifier
coef_ = get_coef(clf,'patterns_',True)
coef.append(coef_)
temp = metrics.roc_auc_score(labels[test],clf.predict_proba(data[test,:,:,time_])[:,-1])
scores_.append(temp)
print('\n','%d'%time_,'auc = ',np.mean(scores_),'\n')
coefs.append(np.array(coef))
scores.append(scores_)
coefs = np.array(coefs)
scores = np.array(scores)
# get info object to plot the "pattern"
temp_epochs = mne.read_epochs(working_dir+'sub5_d2-eventsRelated-epo.fif')
info = temp_epochs.info
import pickle
pickle.dump([scores,info,coefs],open(saving_dir+'score_info_coefs.p','wb'))
scores,info,coefs = pickle.load(open(saving_dir+'score_info_coefs.p','rb'))
font = {
'weight' : 'bold',
'size' : 18}
import matplotlib
matplotlib.rc('font', **font)
# plot the temporal decoding
fig,ax=plt.subplots(figsize=(12,6))
times = np.linspace(0,3000,192)
ax.plot(times,scores.mean(1),color='black',alpha=1.,label='Decoding Scores (Mean ROC AUC)')
ax.fill_between(times,scores.mean(1)-scores.std(1)/np.sqrt(10),scores.mean(1)+scores.std(1)/np.sqrt(10),
color='red',alpha=0.4,label='Decoding Scores (SE ROC AUC)')
ax.axvline(500,linestyle='--',color='black',label='Sleep Spindle Marked Onset')
ax.set(xlabel='Time (ms)',ylabel='AUC ROC',title='Decoding Results\nLinear SVM, 10-fold\nSleep Spindle (N=3372) vs Non-Spindle (N=3368)',
xlim=(0,3000),ylim=(0.5,1.))
ax.legend()
fig.savefig(saving_dir+'decoding results.png',dpi=400,bbox_inches='tight')
coefs = np.swapaxes(coefs,0,-1)
coefs = np.swapaxes(coefs,0,2)
coefs = np.swapaxes(coefs,0,1)
# plot the linear decoding patterns
fig,axes = plt.subplots(nrows=4,ncols=int(32/4),figsize=(20,8),squeeze=False)
for ii,(ax,title) in enumerate(zip(axes.flatten(),info['ch_names'])):
im = ax.imshow(coefs.mean(0)[ii,:,:],origin='lower',aspect='auto',extent=[0,3000,6,22],
vmin=0,vmax=.25)
# ax.scatter(10,20,label=title)
if (ii == 0) or (ii == 8) or (ii == 16) :
ax.set(xticks=[],ylabel='Frequency')
elif ii == 24:
ax.set(xlabel='Time',ylabel='Frequency')
elif (ii == np.arange(25,32)).any():
ax.set(yticks=[],xlabel='Time')
else:
ax.set(xticks=[],yticks=[])
# ax.legend()
ax.set(title=title)
fig.subplots_adjust(bottom=.1,top=.86,left=.1,right=.8,wspace=.02,hspace=.3)
cb_ax=fig.add_axes([.83,.1,.02,.8])
cbar = fig.colorbar(im,cax=cb_ax)
fig.savefig(saving_dir+'decoding patterns.png',dpi=500,bbox_inches='tight')
| [
"sklearn.model_selection.StratifiedShuffleSplit",
"numpy.sqrt",
"numpy.array",
"matplotlib.rc",
"numpy.arange",
"os.path.exists",
"numpy.mean",
"mne.decoding.LinearModel",
"numpy.linspace",
"mne.read_epochs",
"numpy.concatenate",
"mne.time_frequency.read_tfrs",
"os.mkdir",
"sklearn.preproc... | [((1132, 1174), 'os.chdir', 'os.chdir', (['"""D:/Ning - spindle/training set"""'], {}), "('D:/Ning - spindle/training set')\n", (1140, 1174), False, 'import os\n'), ((1888, 1910), 'numpy.concatenate', 'np.concatenate', (['labels'], {}), '(labels)\n', (1902, 1910), True, 'import numpy as np\n'), ((2591, 2619), 'numpy.concatenate', 'np.concatenate', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (2605, 2619), True, 'import numpy as np\n'), ((2827, 2882), 'sklearn.model_selection.StratifiedShuffleSplit', 'StratifiedShuffleSplit', ([], {'n_splits': '(10)', 'random_state': '(12345)'}), '(n_splits=10, random_state=12345)\n', (2849, 2882), False, 'from sklearn.model_selection import StratifiedShuffleSplit, cross_val_score\n'), ((3725, 3740), 'numpy.array', 'np.array', (['coefs'], {}), '(coefs)\n', (3733, 3740), True, 'import numpy as np\n'), ((3750, 3766), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (3758, 3766), True, 'import numpy as np\n'), ((3823, 3885), 'mne.read_epochs', 'mne.read_epochs', (["(working_dir + 'sub5_d2-eventsRelated-epo.fif')"], {}), "(working_dir + 'sub5_d2-eventsRelated-epo.fif')\n", (3838, 3885), False, 'import mne\n'), ((4151, 4180), 'matplotlib.rc', 'matplotlib.rc', (['"""font"""'], {}), "('font', **font)\n", (4164, 4180), False, 'import matplotlib\n'), ((4217, 4246), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (4229, 4246), True, 'from matplotlib import pyplot as plt\n'), ((4256, 4281), 'numpy.linspace', 'np.linspace', (['(0)', '(3000)', '(192)'], {}), '(0, 3000, 192)\n', (4267, 4281), True, 'import numpy as np\n'), ((4904, 4929), 'numpy.swapaxes', 'np.swapaxes', (['coefs', '(0)', '(-1)'], {}), '(coefs, 0, -1)\n', (4915, 4929), True, 'import numpy as np\n'), ((4936, 4960), 'numpy.swapaxes', 'np.swapaxes', (['coefs', '(0)', '(2)'], {}), '(coefs, 0, 2)\n', (4947, 4960), True, 'import numpy as np\n'), ((4967, 4991), 'numpy.swapaxes', 'np.swapaxes', (['coefs', '(0)', '(1)'], {}), '(coefs, 0, 1)\n', (4978, 4991), True, 'import numpy as np\n'), ((900, 1001), 'sklearn.svm.SVC', 'SVC', ([], {'max_iter': '(-1)', 'kernel': '"""linear"""', 'random_state': '(12345)', 'class_weight': '"""balanced"""', 'probability': '(True)'}), "(max_iter=-1, kernel='linear', random_state=12345, class_weight=\n 'balanced', probability=True)\n", (903, 1001), False, 'from sklearn.svm import SVC\n'), ((1102, 1115), 'sklearn.pipeline.Pipeline', 'Pipeline', (['clf'], {}), '(clf)\n', (1110, 1115), False, 'from sklearn.pipeline import Pipeline\n'), ((1464, 1490), 'os.path.exists', 'os.path.exists', (['saving_dir'], {}), '(saving_dir)\n', (1478, 1490), False, 'import os\n'), ((1496, 1516), 'os.mkdir', 'os.mkdir', (['saving_dir'], {}), '(saving_dir)\n', (1504, 1516), False, 'import os\n'), ((1706, 1744), 'os.path.join', 'os.path.join', (['working_dir', '"""*-epo.fif"""'], {}), "(working_dir, '*-epo.fif')\n", (1718, 1744), False, 'import os\n'), ((1764, 1797), 'mne.read_epochs', 'mne.read_epochs', (['e'], {'preload': '(False)'}), '(e, preload=False)\n', (1779, 1797), False, 'import mne\n'), ((1979, 2016), 'os.path.join', 'os.path.join', (['working_dir', '"""*-tfr.h5"""'], {}), "(working_dir, '*-tfr.h5')\n", (1991, 2016), False, 'import os\n'), ((2128, 2162), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(0, 1)'}), '(feature_range=(0, 1))\n', (2140, 2162), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((2248, 2260), 'mne.decoding.Vectorizer', 'Vectorizer', ([], {}), '()\n', (2258, 2260), False, 'from mne.decoding import Vectorizer, SlidingEstimator, cross_val_multiscore, LinearModel, get_coef\n'), ((2717, 2744), 'sklearn.utils.shuffle', 'utils.shuffle', (['data', 'labels'], {}), '(data, labels)\n', (2730, 2744), False, 'from sklearn import utils\n'), ((1029, 1051), 'mne.decoding.LinearModel', 'LinearModel', (['estimator'], {}), '(estimator)\n', (1040, 1051), False, 'from mne.decoding import Vectorizer, SlidingEstimator, cross_val_multiscore, LinearModel, get_coef\n'), ((2029, 2061), 'mne.time_frequency.read_tfrs', 'mne.time_frequency.read_tfrs', (['tf'], {}), '(tf)\n', (2057, 2061), False, 'import mne\n'), ((3415, 3447), 'mne.decoding.get_coef', 'get_coef', (['clf', '"""patterns_"""', '(True)'], {}), "(clf, 'patterns_', True)\n", (3423, 3447), False, 'from mne.decoding import Vectorizer, SlidingEstimator, cross_val_multiscore, LinearModel, get_coef\n'), ((3634, 3650), 'numpy.mean', 'np.mean', (['scores_'], {}), '(scores_)\n', (3641, 3650), True, 'import numpy as np\n'), ((3674, 3688), 'numpy.array', 'np.array', (['coef'], {}), '(coef)\n', (3682, 3688), True, 'import numpy as np\n'), ((829, 843), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (841, 843), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((4423, 4434), 'numpy.sqrt', 'np.sqrt', (['(10)'], {}), '(10)\n', (4430, 4434), True, 'import numpy as np\n'), ((4464, 4475), 'numpy.sqrt', 'np.sqrt', (['(10)'], {}), '(10)\n', (4471, 4475), True, 'import numpy as np\n'), ((789, 801), 'mne.decoding.Vectorizer', 'Vectorizer', ([], {}), '()\n', (799, 801), False, 'from mne.decoding import Vectorizer, SlidingEstimator, cross_val_multiscore, LinearModel, get_coef\n'), ((5512, 5529), 'numpy.arange', 'np.arange', (['(25)', '(32)'], {}), '(25, 32)\n', (5521, 5529), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""objetos.py: Objetos e utilidades necessários para a implementação do algoritmo"""
__copyright__ = "Copyright (c) 2021 <NAME> & <NAME>. MIT. See attached LICENSE.txt file"
from math import sqrt
from copy import deepcopy
from sys import maxsize as int_inf
from typing import List, Tuple, Union, Dict, Iterator
from dataclasses import dataclass, field
from functools import lru_cache as memoized
from pandas import DataFrame
from numpy import power, sqrt
# ######################################################################################################################
# DATA CLASSES BÁSICAS
@dataclass(frozen=True)
class Parametros(object):
peso_distancia: float
peso_urgencia: float
peso_recursoes: float
limite_recursoes: int
clientes_recursao: int
limite_iteracoes: int = 1000
qtd_novos_carros_por_rodada: int = 4
@dataclass(frozen=True)
class Posicao(object):
x: float
y: float
def __repr__(self): return f'({self.x}, {self.y})'
def __str__(self): return self.__repr__()
# @memoized(maxsize=100) # TODO: Verificar a pegada de memória antes de ativar memoização!
def distancia(self, outro) -> float:
return round(sqrt(float(self.x-outro.x)**2 + float(self.y-outro.y)**2), 3)
@dataclass(frozen=True)
class Cliente(Posicao):
demanda: float
inicio: int
fim: int
servico: int
tipo = 'Cliente'
def __repr__(self): return f'CLIENTE({self.demanda} ({self.x}, {self.y}) [{self.inicio}, {self.fim}] {self.servico})'
def __post_init__(self):
assert (self.fim - self.inicio) >= self.servico, f'CLIENTE COM JANELA DE SERVICO INVALIDA! {self}'
@dataclass(frozen=True)
class Deposito(Posicao):
demanda = 0.0
inicio = 0
fim = int_inf
servico = 0
tipo = 'Depósito'
def __repr__(self): return f'DEPOSITO({self.x}, {self.y})'
@dataclass(frozen=True, init=False)
class Mapa(object):
arquivo: str
nome: str
max_carros: int
capacidade_carro: float
deposito: Deposito
clientes: List[Cliente]
matriz_de_distancias: DataFrame
dict_referencias: Dict
def __init__(self, arquivo: str):
object.__setattr__(self, 'arquivo', arquivo)
nome, max_carros, capacidade_carro, deposito, clientes = self.parse_arquivo(arquivo)
matriz_de_distancias, dict_referencias = self.cria_matriz_de_distancias(deposito, clientes)
object.__setattr__(self, 'nome', nome)
object.__setattr__(self, 'max_carros', max_carros)
object.__setattr__(self, 'capacidade_carro', capacidade_carro)
object.__setattr__(self, 'deposito', deposito)
object.__setattr__(self, 'clientes', clientes)
object.__setattr__(self, 'matriz_de_distancias', matriz_de_distancias)
object.__setattr__(self, 'dict_referencias', dict_referencias)
def __repr__(self): return f'MAPA({self.nome}: {self.max_carros}x{self.capacidade_carro} ${len(self.clientes)})'
def __str__(self): return self.__repr__()
@staticmethod
def parse_arquivo(arquivo):
clientes = []
with open(arquivo, 'r') as fin:
for num_linha, linha in enumerate(fin):
linha = linha.strip()
if len(linha) == 0: continue
if num_linha == 0:
nome_teste = linha
continue
if num_linha == 4:
max_carros, capacidade_carro = [int(v) for v in linha.split(' ') if len(v) > 0]
continue
if num_linha >= 9:
_, x, y, demanda, inicio, fim, servico = [int(v) for v in linha.split(' ') if len(v) > 0]
if num_linha == 9:
deposito = Deposito(x=x, y=y)
else:
clientes.append(Cliente(x=x, y=y, demanda=demanda, inicio=inicio, fim=fim, servico=servico))
return nome_teste, max_carros, capacidade_carro, deposito, clientes
@staticmethod
def cria_matriz_de_distancias(deposito: Deposito, clientes: List[Cliente]) -> (DataFrame, dict):
lista_referencia = [deposito] + clientes
str_lista_referencia = list(map(str, lista_referencia))
# Calculamos as distancias entre cada ponto no mapa
df_x = DataFrame([[i.x for i in lista_referencia]] * len(lista_referencia),
columns=str_lista_referencia, index=str_lista_referencia)
df_x = (df_x - df_x.T).applymap(lambda x: power(x, 2))
df_y = DataFrame([[i.y for i in lista_referencia]] * len(lista_referencia),
columns=str_lista_referencia, index=str_lista_referencia)
df_y = (df_y - df_y.T).applymap(lambda y: power(y, 2))
df_distancias = (df_x + df_y).applymap(sqrt).astype(float).round(3)
return df_distancias, dict(zip(str_lista_referencia, lista_referencia))
# ######################################################################################################################
# DATA CLASSES DE AGENTES
@dataclass
class Carro(object):
id: str
origem: Deposito
velocidade: int
capacidade: float
carga: float = 0.0
agenda: List[Union[Cliente, Deposito]] = field(default_factory=list)
fim: int = 0
_inicio = None
def __post_init__(self):
self.agenda = [self.origem]
self.carga = self.capacidade
self.fim = 0
def __repr__(self): return f'Carro{self.id}(O+{len(self.agenda)}>>{self.posicao} |{self.carga}| [{self.inicio}, {self.fim}])'
def __str__(self): return self.__repr__()
@property
def posicao(self): return self.agenda[-1]
@property
def inicio(self):
return 0 if len(self.agenda) == 0 else max([
0,
self.agenda[1].inicio - self.tempo_deslocamento(origem=self.agenda[0], destino=self.agenda[1])
])
@property
def clientes_atendidos(self) -> set:
return set([str(cli) for cli in self.agenda if cli.tipo == 'Cliente'])
def tempo_deslocamento(self, destino:Union[Cliente, Deposito], distancia=None, origem=None) -> int:
pos = self.posicao if origem is None else origem
distancia = pos.distancia(destino) if distancia is None else distancia # Speedup com pre-computado
return int(distancia / self.velocidade) + 1
def reabastecimento(self, deposito: Deposito) -> (float, int, int):
distancia = self.posicao.distancia(deposito)
tempo_deslocamento = self.tempo_deslocamento(deposito, distancia=distancia)
delta_fim = tempo_deslocamento + deposito.servico
self.fim += delta_fim
self.agenda.append(deposito)
self.carga = self.capacidade
return distancia, tempo_deslocamento, delta_fim-tempo_deslocamento
def atendimento(self, cliente: Cliente) -> (float, int, int):
distancia = self.posicao.distancia(cliente)
tempo_deslocamento = self.tempo_deslocamento(cliente, distancia=distancia)
delta_fim = max([self.fim + tempo_deslocamento + cliente.servico, cliente.inicio + cliente.servico]) - self.fim
assert delta_fim > 0, f'ABASTECIMENTO INVALIDO {self} -> {cliente}'
self.fim += delta_fim
self.agenda.append(cliente)
self.carga = self.carga - cliente.demanda
return distancia, tempo_deslocamento, delta_fim-tempo_deslocamento
def resultado(self, display=True) -> Tuple[int, float, int, int, int]:
if display: print(self)
dummy = Carro(id='DUMMY:'+self.id, origem=self.origem, velocidade=self.velocidade, capacidade=self.capacidade)
distancia_total = 0.0
tempo_deslocamento_total = 0
tempo_layover_total = 0
for pos, item in enumerate(self.agenda):
fim_anterior = dummy.fim
if item.tipo == 'Cliente':
distancia, tempo_deslocamento, tempo_layover = dummy.atendimento(item)
else:
distancia, tempo_deslocamento, tempo_layover = dummy.reabastecimento(item)
if display:
print('\t', item)
if pos < (len(self.agenda)-1):
print('\t\t', distancia, '~', fim_anterior, '>>', tempo_deslocamento, '+', tempo_layover,
'>>', fim_anterior+tempo_deslocamento+tempo_layover)
distancia_total += distancia
tempo_deslocamento_total += tempo_deslocamento
tempo_layover_total += tempo_layover
return self.inicio, distancia_total, tempo_deslocamento_total, tempo_layover_total, self.fim
def copia_carro(og: Carro):
carro = Carro(id='COPY:' + og.id, origem=og.origem, velocidade=og.velocidade, capacidade=og.capacidade)
for item in og.agenda[1:]:
if item.tipo == 'Cliente':
carro.atendimento(item)
else:
carro.reabastecimento(item)
return carro
def unifica_agendas_carros(pri: Carro, seg: Carro):
assert pri.id != seg.id, 'TENTATIVA DE UNIFICAR CARROS DE MESMO ID!'
assert len({str(pri.origem), pri.velocidade, pri.capacidade}.intersection(
{str(seg.origem), seg.velocidade, seg.capacidade})
) == 3, 'TENTATIVA DE UNIFICAR CARROS DE CONFIGURAÇÕES DIFERENTES!'
carro = Carro(id=f'{pri.id}+{seg.id}', origem=pri.origem, velocidade=pri.velocidade, capacidade=pri.capacidade)
for item in pri.agenda[1:]:
if item.tipo == 'Cliente':
carro.atendimento(deepcopy(item))
else:
carro.reabastecimento(deepcopy(item))
for item in seg.agenda[1:]:
if item.tipo == 'Cliente':
carro.atendimento(deepcopy(item))
else:
carro.reabastecimento(deepcopy(item))
return carro
@dataclass(frozen=False, init=False)
class Frota(object):
mapa: Mapa
max_carros: int
capacidade_carro: float
velocidade_carro: int
carros: Dict
deposito: Deposito
def __init__(self, mapa: Mapa, velocidade_carro: int):
self.velocidade_carro = velocidade_carro
self.mapa = mapa
self.max_carros = mapa.max_carros
self.capacidade_carro = mapa.capacidade_carro
self.deposito = mapa.deposito
self.carros = {}
def __repr__(self): return f'Frota<{self.mapa.nome}>(|{len(self.carros)}/{self.mapa.max_carros}| x {len(self.clientes_atendidos)}/{len(self.mapa.clientes)}])'
def __str__(self): return self.__repr__()
def __len__(self): return len(self.carros)
def __getitem__(self, item): return self.mapa.dict_referencias[item]
def __iter__(self) -> Iterator[Carro]:
for carro in self.carros.values():
yield carro
@property
def clientes_atendidos(self) -> set:
result = set()
for carro in self.carros.values():
result = result.union(carro.clientes_atendidos)
return result
@property
def clientes_faltantes(self) -> set:
return set(map(str, self.mapa.clientes)) - self.clientes_atendidos
@property
def sumario(self) -> DataFrame:
sumario = []
for carro in self.carros.values():
inicio, distancia, t_desloc, t_layover, fim = carro.resultado(display=False)
sumario.append({'carro': carro.id, 'inicio': inicio, 'fim': fim, 'distancia': distancia,
'tempo_deslocamento': t_desloc, 'tempo_layover': t_layover,
'qtd_clientes': len(carro.clientes_atendidos)})
sumario = DataFrame(sumario)
sumario.loc[:, 'tempo_atividade'] = sumario['fim'] - sumario['inicio']
return sumario
def novo_carro(self) -> Carro:
carro = Carro(str(len(self.carros)), self.deposito, self.velocidade_carro, self.capacidade_carro)
self.carros[carro.id] = carro
return carro
def limpa_carros_sem_agenda(self):
para_remover = []
for id_carro, carro in self.carros.items():
if len(carro.agenda) < 2:
para_remover.append(id_carro)
for id_carro in para_remover:
self.carros.pop(id_carro)
return para_remover
def substitui_carros(self, novos_carros: List[Carro]):
self.carros = {c.id: c for c in novos_carros}
| [
"numpy.power",
"dataclasses.dataclass",
"copy.deepcopy",
"pandas.DataFrame",
"dataclasses.field"
] | [((655, 677), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (664, 677), False, 'from dataclasses import dataclass, field\n'), ((911, 933), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (920, 933), False, 'from dataclasses import dataclass, field\n'), ((1311, 1333), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (1320, 1333), False, 'from dataclasses import dataclass, field\n'), ((1712, 1734), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (1721, 1734), False, 'from dataclasses import dataclass, field\n'), ((1916, 1950), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)', 'init': '(False)'}), '(frozen=True, init=False)\n', (1925, 1950), False, 'from dataclasses import dataclass, field\n'), ((9801, 9836), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(False)', 'init': '(False)'}), '(frozen=False, init=False)\n', (9810, 9836), False, 'from dataclasses import dataclass, field\n'), ((5280, 5307), 'dataclasses.field', 'field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (5285, 5307), False, 'from dataclasses import dataclass, field\n'), ((11544, 11562), 'pandas.DataFrame', 'DataFrame', (['sumario'], {}), '(sumario)\n', (11553, 11562), False, 'from pandas import DataFrame\n'), ((4533, 4544), 'numpy.power', 'power', (['x', '(2)'], {}), '(x, 2)\n', (4538, 4544), False, 'from numpy import power, sqrt\n'), ((4763, 4774), 'numpy.power', 'power', (['y', '(2)'], {}), '(y, 2)\n', (4768, 4774), False, 'from numpy import power, sqrt\n'), ((9524, 9538), 'copy.deepcopy', 'deepcopy', (['item'], {}), '(item)\n', (9532, 9538), False, 'from copy import deepcopy\n'), ((9588, 9602), 'copy.deepcopy', 'deepcopy', (['item'], {}), '(item)\n', (9596, 9602), False, 'from copy import deepcopy\n'), ((9701, 9715), 'copy.deepcopy', 'deepcopy', (['item'], {}), '(item)\n', (9709, 9715), False, 'from copy import deepcopy\n'), ((9765, 9779), 'copy.deepcopy', 'deepcopy', (['item'], {}), '(item)\n', (9773, 9779), False, 'from copy import deepcopy\n')] |
import argparse
import os
import time
import datetime
import yaml
import tensorflow as tf
import numpy as np
import src.core as core
from src.retina_net import config_utils
from src.core import constants
from src.retina_net.builders import dataset_handler_builder
from src.retina_net.models.retinanet_model import RetinaNetModel
keras = tf.keras
def train_model(config):
"""
Training function.
:param config: config file
"""
# Get training config
training_config = config['training_config']
# Create dataset class
dataset_config = config['dataset_config']
dataset_handler = dataset_handler_builder.build_dataset(
dataset_config, 'train')
# Set keras training phase
keras.backend.set_learning_phase(1)
print("Keras Learning Phase Set to: " +
str(keras.backend.learning_phase()))
# Create Model
with tf.name_scope("retinanet_model"):
model = RetinaNetModel(config['model_config'])
# Instantiate an optimizer.
minibatch_size = training_config['minibatch_size']
epoch_size = int(dataset_handler.epoch_size / minibatch_size)
initial_learning_rate = training_config['initial_learning_rate']
decay_factor = training_config['decay_factor']
decay_boundaries = [
boundary *
epoch_size for boundary in training_config['decay_boundaries']]
decay_factors = [decay_factor**i for i in range(0, len(decay_boundaries)+1)]
learning_rate_values = [
np.round(
initial_learning_rate *
decay_factor,
8) for decay_factor in decay_factors]
lr_schedule = tf.keras.optimizers.schedules.PiecewiseConstantDecay(
decay_boundaries, learning_rate_values)
optimizer = keras.optimizers.Adam(learning_rate=lr_schedule, epsilon=1e-2)
# Create summary writer
log_file = config['logs_dir'] + '/training/' + str(datetime.datetime.now())
summary_writer = tf.summary.create_file_writer(log_file)
# Load checkpoint weights if training folder exists
ckpt = tf.train.Checkpoint(
step=tf.Variable(0),
optimizer=optimizer,
net=model)
manager = tf.train.CheckpointManager(
ckpt,
config['checkpoint_path'],
max_to_keep=training_config['max_checkpoints_to_keep'])
ckpt.restore(manager.latest_checkpoint)
# If no checkpoints exist, intialize either from imagenet or from scratch
if manager.latest_checkpoint:
print("Restored from {}".format(manager.latest_checkpoint))
ckpt.step.assign_add(1)
elif config['model_config']['feature_extractor']['pretrained_initialization']:
# Load resnet-50 imagenet pretrained weights if set in config file.
# Dummy input required to define graph.
input_shape = (224, 224, 3)
dummy_input = keras.layers.Input(shape=input_shape)
model.feature_extractor(dummy_input)
weights_path = keras.utils.get_file(
'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
('https://github.com/fchollet/deep-learning-models/'
'releases/download/v0.2/'
'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'),
cache_subdir='models',
md5_hash='a268eb855778b3df3c7506639542a6af')
model.feature_extractor.load_weights(weights_path, by_name=True)
# Tensorflow 2.0 bug with loading weights in nested models. Might get
# fixed later.
model.feature_extractor.conv_block_2a.load_weights(
weights_path, by_name=True)
model.feature_extractor.conv_block_3a.load_weights(
weights_path, by_name=True)
model.feature_extractor.conv_block_4a.load_weights(
weights_path, by_name=True)
model.feature_extractor.conv_block_5a.load_weights(
weights_path, by_name=True)
model.feature_extractor.identity_block_2b.load_weights(
weights_path, by_name=True)
model.feature_extractor.identity_block_2c.load_weights(
weights_path, by_name=True)
model.feature_extractor.identity_block_3b.load_weights(
weights_path, by_name=True)
model.feature_extractor.identity_block_3c.load_weights(
weights_path, by_name=True)
model.feature_extractor.identity_block_3d.load_weights(
weights_path, by_name=True)
model.feature_extractor.identity_block_4b.load_weights(
weights_path, by_name=True)
model.feature_extractor.identity_block_4c.load_weights(
weights_path, by_name=True)
model.feature_extractor.identity_block_4d.load_weights(
weights_path, by_name=True)
model.feature_extractor.identity_block_4e.load_weights(
weights_path, by_name=True)
model.feature_extractor.identity_block_4f.load_weights(
weights_path, by_name=True)
model.feature_extractor.identity_block_5b.load_weights(
weights_path, by_name=True)
model.feature_extractor.identity_block_5c.load_weights(
weights_path, by_name=True)
print("Initializing from ImageNet weights.")
else:
print("Initializing from scratch.")
# Create Dataset
# Skip already passed elements in dataset, in case of resuming training.
dataset = dataset_handler.create_dataset().repeat(
training_config['max_epochs'])
# Batch size goes in parenthesis.
batched_dataset = dataset.batch(minibatch_size)
batched_dataset = batched_dataset.take(tf.data.experimental.cardinality(
batched_dataset) - tf.cast(ckpt.step + 1, tf.int64))
print("Remaining iterations:" +
str(tf.data.experimental.cardinality(batched_dataset).numpy()))
# `prefetch` lets the dataset fetch batches, in the background while the model is training.
batched_dataset = batched_dataset.prefetch(
buffer_size=tf.data.experimental.AUTOTUNE)
last_time = time.time()
for sample_dict in batched_dataset:
with summary_writer.as_default():
# Turn on both graph and profiler for debugging the graph in
# tensorboard
tf.summary.trace_on(graph=False, profiler=False)
total_loss, loss_dict = train_single_step(
model, optimizer, sample_dict)
tf.summary.trace_export(
name="training_trace",
step=0,
profiler_outdir=log_file)
with tf.name_scope('losses'):
for loss_name in loss_dict.keys():
tf.summary.scalar(loss_name,
loss_dict[loss_name],
step=int(ckpt.step))
with tf.name_scope('optimizer'):
tf.summary.scalar('learning_rate',
lr_schedule(int(ckpt.step)),
step=int(ckpt.step))
tf.summary.scalar(
'Total Loss',
total_loss,
step=int(
ckpt.step))
summary_writer.flush()
# Write summary
if int(ckpt.step) % training_config['summary_interval'] == 0:
current_time = time.time()
time_elapsed = current_time - last_time
last_time = time.time()
print(
'Step {}, Total Loss {:0.3f}, Time Elapsed {:0.3f} s'.format(
int(ckpt.step), total_loss.numpy(), time_elapsed))
# Saving checkpoint
if int(ckpt.step) % int(
epoch_size * training_config['checkpoint_interval']) == 0:
save_path = manager.save(checkpoint_number=ckpt.save_counter)
print("Saved checkpoint for step {}: {}".format(
int(ckpt.step), save_path))
print("loss {:1.2f}".format(total_loss.numpy()))
ckpt.step.assign_add(1)
else:
ckpt.step.assign_add(1)
@tf.function
def train_single_step(
model,
optimizer,
sample_dict):
"""
:param model: keras retinanet model
:param optimizer: keras optimizer
:param sample_dict: input dictionary generated from dataset.
If element sizes in this dictionary are variable, remove tf.function decorator.
:return total_loss: Sum of all losses.
:return cls_loss: classification loss.
:return reg_loss: regression loss.
:return regularization_loss: regularization_loss
:return prediction_dict: Dictionary containing neural network predictions
"""
with tf.GradientTape() as tape:
prediction_dict = model(sample_dict[constants.IMAGE_NORMALIZED_KEY],
train_val_test='training')
total_loss, loss_dict = model.get_loss(sample_dict, prediction_dict)
# Get any regularization loss in the model and add it to total loss
regularization_loss = tf.reduce_sum(
tf.concat([layer.losses for layer in model.layers], axis=0))
loss_dict.update(
{constants.REGULARIZATION_LOSS_KEY: regularization_loss})
total_loss += regularization_loss
# Compute the gradient which respect to the loss
with tf.name_scope("grad_ops"):
gradients = tape.gradient(total_loss, model.trainable_variables)
clipped_gradients, _ = tf.clip_by_global_norm(gradients, 5.0)
optimizer.apply_gradients(
zip(clipped_gradients, model.trainable_variables))
return total_loss, loss_dict
def main():
"""Object Detection Model Trainer
"""
# Defaults
default_gpu_device = '1'
default_config_path = core.model_dir(
'retina_net') + '/configs/retinanet_bdd.yaml'
# Allowed data splits are 'train','train_mini', 'val', 'val_half',
# 'val_mini'
default_data_split = 'train'
# Parse input
parser = argparse.ArgumentParser() # Define argparser object
parser.add_argument('--gpu_device',
type=str,
dest='gpu_device',
default=default_gpu_device)
parser.add_argument('--yaml_path',
type=str,
dest='yaml_path',
default=default_config_path)
parser.add_argument('--data_split',
type=str,
dest='data_split',
default=default_data_split)
args = parser.parse_args()
# Set CUDA device id
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_device
# Allow GPU memory growth
physical_devices = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
# Load in configuration file as python dictionary
with open(args.yaml_path, 'r') as yaml_file:
config = yaml.load(yaml_file, Loader=yaml.FullLoader)
# Make necessary directories, update config with checkpoint path and data
# split
config = config_utils.setup(config, args)
# Go to training function
train_model(config)
if __name__ == '__main__':
main()
| [
"tensorflow.data.experimental.cardinality",
"yaml.load",
"tensorflow.GradientTape",
"src.retina_net.models.retinanet_model.RetinaNetModel",
"tensorflow.cast",
"tensorflow.clip_by_global_norm",
"argparse.ArgumentParser",
"src.retina_net.config_utils.setup",
"tensorflow.concat",
"src.core.model_dir"... | [((615, 677), 'src.retina_net.builders.dataset_handler_builder.build_dataset', 'dataset_handler_builder.build_dataset', (['dataset_config', '"""train"""'], {}), "(dataset_config, 'train')\n", (652, 677), False, 'from src.retina_net.builders import dataset_handler_builder\n'), ((1620, 1716), 'tensorflow.keras.optimizers.schedules.PiecewiseConstantDecay', 'tf.keras.optimizers.schedules.PiecewiseConstantDecay', (['decay_boundaries', 'learning_rate_values'], {}), '(decay_boundaries,\n learning_rate_values)\n', (1672, 1716), True, 'import tensorflow as tf\n'), ((1932, 1971), 'tensorflow.summary.create_file_writer', 'tf.summary.create_file_writer', (['log_file'], {}), '(log_file)\n', (1961, 1971), True, 'import tensorflow as tf\n'), ((2152, 2272), 'tensorflow.train.CheckpointManager', 'tf.train.CheckpointManager', (['ckpt', "config['checkpoint_path']"], {'max_to_keep': "training_config['max_checkpoints_to_keep']"}), "(ckpt, config['checkpoint_path'], max_to_keep=\n training_config['max_checkpoints_to_keep'])\n", (2178, 2272), True, 'import tensorflow as tf\n'), ((5958, 5969), 'time.time', 'time.time', ([], {}), '()\n', (5967, 5969), False, 'import time\n'), ((9942, 9967), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (9965, 9967), False, 'import argparse\n'), ((10671, 10722), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (10715, 10722), True, 'import tensorflow as tf\n'), ((10727, 10794), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['physical_devices[0]', '(True)'], {}), '(physical_devices[0], True)\n', (10767, 10794), True, 'import tensorflow as tf\n'), ((11065, 11097), 'src.retina_net.config_utils.setup', 'config_utils.setup', (['config', 'args'], {}), '(config, args)\n', (11083, 11097), False, 'from src.retina_net import config_utils\n'), ((879, 911), 'tensorflow.name_scope', 'tf.name_scope', (['"""retinanet_model"""'], {}), "('retinanet_model')\n", (892, 911), True, 'import tensorflow as tf\n'), ((929, 967), 'src.retina_net.models.retinanet_model.RetinaNetModel', 'RetinaNetModel', (["config['model_config']"], {}), "(config['model_config'])\n", (943, 967), False, 'from src.retina_net.models.retinanet_model import RetinaNetModel\n'), ((1479, 1528), 'numpy.round', 'np.round', (['(initial_learning_rate * decay_factor)', '(8)'], {}), '(initial_learning_rate * decay_factor, 8)\n', (1487, 1528), True, 'import numpy as np\n'), ((8643, 8660), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (8658, 8660), True, 'import tensorflow as tf\n'), ((9285, 9310), 'tensorflow.name_scope', 'tf.name_scope', (['"""grad_ops"""'], {}), "('grad_ops')\n", (9298, 9310), True, 'import tensorflow as tf\n'), ((9416, 9454), 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['gradients', '(5.0)'], {}), '(gradients, 5.0)\n', (9438, 9454), True, 'import tensorflow as tf\n'), ((9719, 9747), 'src.core.model_dir', 'core.model_dir', (['"""retina_net"""'], {}), "('retina_net')\n", (9733, 9747), True, 'import src.core as core\n'), ((10916, 10960), 'yaml.load', 'yaml.load', (['yaml_file'], {'Loader': 'yaml.FullLoader'}), '(yaml_file, Loader=yaml.FullLoader)\n', (10925, 10960), False, 'import yaml\n'), ((1886, 1909), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1907, 1909), False, 'import datetime\n'), ((2074, 2088), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {}), '(0)\n', (2085, 2088), True, 'import tensorflow as tf\n'), ((5540, 5589), 'tensorflow.data.experimental.cardinality', 'tf.data.experimental.cardinality', (['batched_dataset'], {}), '(batched_dataset)\n', (5572, 5589), True, 'import tensorflow as tf\n'), ((5601, 5633), 'tensorflow.cast', 'tf.cast', (['(ckpt.step + 1)', 'tf.int64'], {}), '(ckpt.step + 1, tf.int64)\n', (5608, 5633), True, 'import tensorflow as tf\n'), ((6163, 6211), 'tensorflow.summary.trace_on', 'tf.summary.trace_on', ([], {'graph': '(False)', 'profiler': '(False)'}), '(graph=False, profiler=False)\n', (6182, 6211), True, 'import tensorflow as tf\n'), ((6326, 6411), 'tensorflow.summary.trace_export', 'tf.summary.trace_export', ([], {'name': '"""training_trace"""', 'step': '(0)', 'profiler_outdir': 'log_file'}), "(name='training_trace', step=0, profiler_outdir=log_file\n )\n", (6349, 6411), True, 'import tensorflow as tf\n'), ((9018, 9077), 'tensorflow.concat', 'tf.concat', (['[layer.losses for layer in model.layers]'], {'axis': '(0)'}), '([layer.losses for layer in model.layers], axis=0)\n', (9027, 9077), True, 'import tensorflow as tf\n'), ((6474, 6497), 'tensorflow.name_scope', 'tf.name_scope', (['"""losses"""'], {}), "('losses')\n", (6487, 6497), True, 'import tensorflow as tf\n'), ((6735, 6761), 'tensorflow.name_scope', 'tf.name_scope', (['"""optimizer"""'], {}), "('optimizer')\n", (6748, 6761), True, 'import tensorflow as tf\n'), ((7250, 7261), 'time.time', 'time.time', ([], {}), '()\n', (7259, 7261), False, 'import time\n'), ((7346, 7357), 'time.time', 'time.time', ([], {}), '()\n', (7355, 7357), False, 'import time\n'), ((5685, 5734), 'tensorflow.data.experimental.cardinality', 'tf.data.experimental.cardinality', (['batched_dataset'], {}), '(batched_dataset)\n', (5717, 5734), True, 'import tensorflow as tf\n')] |
import os
import sys
import json
import random
import numpy as np
import torch
from tqdm import tqdm, trange
from scipy.sparse import coo_matrix
from torch.utils.data import DataLoader, SequentialSampler, TensorDataset
import blink.candidate_ranking.utils as utils
from blink.common.params import BlinkParser
from blink.joint.crossencoder import CrossEncoderRanker
from blink.joint.joint_eval.evaluation import (
compute_coref_metrics,
compute_linking_metrics,
compute_joint_metrics,
_get_global_maximum_spanning_tree
)
from IPython import embed
logger = None
def eval_modify(context_input, candidate_input, max_seq_length):
cur_input = context_input.tolist()
cur_candidate = candidate_input.tolist()
mod_input = []
for i in range(len(cur_candidate)):
# remove [CLS] token from candidate
sample = cur_input + cur_candidate[i][1:]
sample = sample[:max_seq_length]
mod_input.append(sample)
return torch.LongTensor(mod_input)
def create_eval_dataloader(
params,
contexts,
context_uids,
knn_cands,
knn_cand_uids,
):
context_input_examples = []
example_uid_pairs = []
for i in trange(contexts.shape[0]):
if knn_cand_uids[i].shape[0] == 0:
continue
context_input_examples.append(
eval_modify(
contexts[i],
knn_cands[i],
params["max_seq_length"]
)
)
example_uid_pairs.extend(
[(context_uids[i], cand_uid) for cand_uid in knn_cand_uids[i]]
)
# concatenate all of the examples together
context_input = torch.cat(context_input_examples)
uid_pairs = torch.LongTensor(example_uid_pairs)
assert context_input.shape[0] == uid_pairs.shape[0]
if params["debug"]:
max_n = 6400
context_input = context_input[:max_n]
uid_pairs = context_input[:max_n]
tensor_data = TensorDataset(context_input, uid_pairs)
sampler = SequentialSampler(tensor_data)
dataloader = DataLoader(
tensor_data,
sampler=sampler,
batch_size=params["encode_batch_size"]
)
return dataloader
def build_ground_truth(eval_data):
# build gold linking map
zipped_gold_map = zip(
eval_data["context_uids"].tolist(),
map(lambda x : x.item(), eval_data["pos_cand_uids"])
)
gold_linking_map = {ctxt_uid : cand_uid
for ctxt_uid, cand_uid in zipped_gold_map}
# build ground truth coref clusters
gold_coref_clusters = [
tuple(sorted([ctxt_uid] + coref_ctxts.tolist()))
for ctxt_uid, coref_ctxts in zip(
eval_data["context_uids"].tolist(),
eval_data["pos_coref_ctxt_uids"]
)
]
gold_coref_clusters = [list(x) for x in set(gold_coref_clusters)]
return gold_linking_map, gold_coref_clusters
def score_contexts(
reranker,
eval_dataloader,
device,
logger,
context_length,
suffix=None,
silent=True
):
assert suffix is not None
reranker.model.eval()
if silent:
iter_ = eval_dataloader
else:
iter_ = tqdm(eval_dataloader, desc="scoring {} contexts".format(suffix))
with torch.no_grad():
edge_vertices, edge_scores = [], []
for step, batch in enumerate(iter_):
batch = tuple(t.to(device) for t in batch)
context_input, uid_pair = batch
edge_vertices.append(uid_pair)
scores = reranker.score_candidate(
context_input.unsqueeze(0),
context_length
)
scores = scores.squeeze(0)
edge_scores.append(scores)
edge_vertices = torch.cat(edge_vertices).type(torch.float)
edge_scores = torch.cat(edge_scores).unsqueeze(1)
edges = torch.cat((edge_vertices, edge_scores), 1)
return edges
def main(params):
# create output dir
eval_output_path = os.path.join(params["output_path"], "eval_output")
if not os.path.exists(eval_output_path):
os.makedirs(eval_output_path)
# get logger
logger = utils.get_logger(eval_output_path)
# output command ran
cmd = sys.argv
cmd.insert(0, "python")
logger.info(" ".join(cmd))
# load the models
assert params["path_to_model"] is None
params["path_to_model"] = params["path_to_ctxt_model"]
ctxt_reranker = CrossEncoderRanker(params)
ctxt_model = ctxt_reranker.model
params["pool_highlighted"] = False
params["path_to_model"] = params["path_to_cand_model"]
cand_reranker = CrossEncoderRanker(params)
cand_model = cand_reranker.model
params["path_to_model"] = None
tokenizer = ctxt_reranker.tokenizer
device = ctxt_reranker.device
n_gpu = ctxt_reranker.n_gpu
context_length = params["max_context_length"]
# fix the random seeds
seed = params["seed"]
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if ctxt_reranker.n_gpu > 0:
torch.cuda.manual_seed_all(seed)
# create eval dataloaders
fname = os.path.join(
params["data_path"],
"joint_" + params["mode"] +".t7"
)
eval_data = torch.load(fname)
ctxt_dataloader = create_eval_dataloader(
params,
eval_data["contexts"],
eval_data["context_uids"],
eval_data["knn_ctxts"],
eval_data["knn_ctxt_uids"]
)
cand_dataloader = create_eval_dataloader(
params,
eval_data["contexts"],
eval_data["context_uids"],
eval_data["knn_cands"],
eval_data["knn_cand_uids"]
)
# construct ground truth data
gold_linking_map, gold_coref_clusters = build_ground_truth(eval_data)
# get all of the edges
ctxt_edges, cand_edges = None, None
ctxt_edges = score_contexts(
ctxt_reranker,
ctxt_dataloader,
device=device,
logger=logger,
context_length=context_length,
suffix="ctxt",
silent=params["silent"],
)
cand_edges = score_contexts(
cand_reranker,
cand_dataloader,
device=device,
logger=logger,
context_length=context_length,
suffix="cand",
silent=params["silent"],
)
# construct the sparse graphs
sparse_shape = tuple(2*[max(gold_linking_map.keys())+1])
_ctxt_data = ctxt_edges[:, 2].cpu().numpy()
_ctxt_row = ctxt_edges[:, 0].cpu().numpy()
_ctxt_col = ctxt_edges[:, 1].cpu().numpy()
ctxt_graph = coo_matrix(
(_ctxt_data, (_ctxt_row, _ctxt_col)), shape=sparse_shape
)
_cand_data = cand_edges[:, 2].cpu().numpy()
_cand_row = cand_edges[:, 1].cpu().numpy()
_cand_col = cand_edges[:, 0].cpu().numpy()
cand_graph = coo_matrix(
(_cand_data, (_cand_row, _cand_col)), shape=sparse_shape
)
logger.info('Computing coref metrics...')
coref_metrics = compute_coref_metrics(
gold_coref_clusters, ctxt_graph
)
logger.info('Done.')
logger.info('Computing linking metrics...')
linking_metrics, slim_linking_graph = compute_linking_metrics(
cand_graph, gold_linking_map
)
logger.info('Done.')
logger.info('Computing joint metrics...')
slim_coref_graph = _get_global_maximum_spanning_tree([ctxt_graph])
joint_metrics = compute_joint_metrics(
[slim_coref_graph, slim_linking_graph],
gold_linking_map,
min(gold_linking_map.keys())
)
logger.info('Done.')
metrics = {
'coref_fmi' : coref_metrics['fmi'],
'coref_rand_index' : coref_metrics['rand_index'],
'coref_threshold' : coref_metrics['threshold'],
'vanilla_recall' : linking_metrics['vanilla_recall'],
'vanilla_accuracy' : linking_metrics['vanilla_accuracy'],
'joint_accuracy' : joint_metrics['joint_accuracy'],
'joint_cc_recall' : joint_metrics['joint_cc_recall']
}
logger.info('joint_metrics: {}'.format(
json.dumps(metrics, sort_keys=True, indent=4)
))
# save all of the predictions for later analysis
save_data = {}
save_data.update(coref_metrics)
save_data.update(linking_metrics)
save_data.update(joint_metrics)
save_fname = os.path.join(eval_output_path, 'results.t7')
torch.save(save_data, save_fname)
if __name__ == "__main__":
parser = BlinkParser(add_model_args=True)
parser.add_eval_args()
parser.add_joint_train_args()
parser.add_joint_eval_args()
# args = argparse.Namespace(**params)
args = parser.parse_args()
print(args)
params = args.__dict__
main(params)
| [
"torch.LongTensor",
"blink.joint.joint_eval.evaluation.compute_linking_metrics",
"os.path.exists",
"blink.joint.joint_eval.evaluation._get_global_maximum_spanning_tree",
"json.dumps",
"numpy.random.seed",
"scipy.sparse.coo_matrix",
"blink.common.params.BlinkParser",
"torch.utils.data.SequentialSampl... | [((986, 1013), 'torch.LongTensor', 'torch.LongTensor', (['mod_input'], {}), '(mod_input)\n', (1002, 1013), False, 'import torch\n'), ((1197, 1222), 'tqdm.trange', 'trange', (['contexts.shape[0]'], {}), '(contexts.shape[0])\n', (1203, 1222), False, 'from tqdm import tqdm, trange\n'), ((1663, 1696), 'torch.cat', 'torch.cat', (['context_input_examples'], {}), '(context_input_examples)\n', (1672, 1696), False, 'import torch\n'), ((1713, 1748), 'torch.LongTensor', 'torch.LongTensor', (['example_uid_pairs'], {}), '(example_uid_pairs)\n', (1729, 1748), False, 'import torch\n'), ((1958, 1997), 'torch.utils.data.TensorDataset', 'TensorDataset', (['context_input', 'uid_pairs'], {}), '(context_input, uid_pairs)\n', (1971, 1997), False, 'from torch.utils.data import DataLoader, SequentialSampler, TensorDataset\n'), ((2012, 2042), 'torch.utils.data.SequentialSampler', 'SequentialSampler', (['tensor_data'], {}), '(tensor_data)\n', (2029, 2042), False, 'from torch.utils.data import DataLoader, SequentialSampler, TensorDataset\n'), ((2060, 2145), 'torch.utils.data.DataLoader', 'DataLoader', (['tensor_data'], {'sampler': 'sampler', 'batch_size': "params['encode_batch_size']"}), "(tensor_data, sampler=sampler, batch_size=params['encode_batch_size']\n )\n", (2070, 2145), False, 'from torch.utils.data import DataLoader, SequentialSampler, TensorDataset\n'), ((3862, 3904), 'torch.cat', 'torch.cat', (['(edge_vertices, edge_scores)', '(1)'], {}), '((edge_vertices, edge_scores), 1)\n', (3871, 3904), False, 'import torch\n'), ((3990, 4040), 'os.path.join', 'os.path.join', (["params['output_path']", '"""eval_output"""'], {}), "(params['output_path'], 'eval_output')\n", (4002, 4040), False, 'import os\n'), ((4154, 4188), 'blink.candidate_ranking.utils.get_logger', 'utils.get_logger', (['eval_output_path'], {}), '(eval_output_path)\n', (4170, 4188), True, 'import blink.candidate_ranking.utils as utils\n'), ((4438, 4464), 'blink.joint.crossencoder.CrossEncoderRanker', 'CrossEncoderRanker', (['params'], {}), '(params)\n', (4456, 4464), False, 'from blink.joint.crossencoder import CrossEncoderRanker\n'), ((4621, 4647), 'blink.joint.crossencoder.CrossEncoderRanker', 'CrossEncoderRanker', (['params'], {}), '(params)\n', (4639, 4647), False, 'from blink.joint.crossencoder import CrossEncoderRanker\n'), ((4936, 4953), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (4947, 4953), False, 'import random\n'), ((4958, 4978), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (4972, 4978), True, 'import numpy as np\n'), ((4983, 5006), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (5000, 5006), False, 'import torch\n'), ((5123, 5191), 'os.path.join', 'os.path.join', (["params['data_path']", "('joint_' + params['mode'] + '.t7')"], {}), "(params['data_path'], 'joint_' + params['mode'] + '.t7')\n", (5135, 5191), False, 'import os\n'), ((5229, 5246), 'torch.load', 'torch.load', (['fname'], {}), '(fname)\n', (5239, 5246), False, 'import torch\n'), ((6538, 6606), 'scipy.sparse.coo_matrix', 'coo_matrix', (['(_ctxt_data, (_ctxt_row, _ctxt_col))'], {'shape': 'sparse_shape'}), '((_ctxt_data, (_ctxt_row, _ctxt_col)), shape=sparse_shape)\n', (6548, 6606), False, 'from scipy.sparse import coo_matrix\n'), ((6781, 6849), 'scipy.sparse.coo_matrix', 'coo_matrix', (['(_cand_data, (_cand_row, _cand_col))'], {'shape': 'sparse_shape'}), '((_cand_data, (_cand_row, _cand_col)), shape=sparse_shape)\n', (6791, 6849), False, 'from scipy.sparse import coo_matrix\n'), ((6931, 6985), 'blink.joint.joint_eval.evaluation.compute_coref_metrics', 'compute_coref_metrics', (['gold_coref_clusters', 'ctxt_graph'], {}), '(gold_coref_clusters, ctxt_graph)\n', (6952, 6985), False, 'from blink.joint.joint_eval.evaluation import compute_coref_metrics, compute_linking_metrics, compute_joint_metrics, _get_global_maximum_spanning_tree\n'), ((7116, 7169), 'blink.joint.joint_eval.evaluation.compute_linking_metrics', 'compute_linking_metrics', (['cand_graph', 'gold_linking_map'], {}), '(cand_graph, gold_linking_map)\n', (7139, 7169), False, 'from blink.joint.joint_eval.evaluation import compute_coref_metrics, compute_linking_metrics, compute_joint_metrics, _get_global_maximum_spanning_tree\n'), ((7279, 7326), 'blink.joint.joint_eval.evaluation._get_global_maximum_spanning_tree', '_get_global_maximum_spanning_tree', (['[ctxt_graph]'], {}), '([ctxt_graph])\n', (7312, 7326), False, 'from blink.joint.joint_eval.evaluation import compute_coref_metrics, compute_linking_metrics, compute_joint_metrics, _get_global_maximum_spanning_tree\n'), ((8249, 8293), 'os.path.join', 'os.path.join', (['eval_output_path', '"""results.t7"""'], {}), "(eval_output_path, 'results.t7')\n", (8261, 8293), False, 'import os\n'), ((8298, 8331), 'torch.save', 'torch.save', (['save_data', 'save_fname'], {}), '(save_data, save_fname)\n', (8308, 8331), False, 'import torch\n'), ((8374, 8406), 'blink.common.params.BlinkParser', 'BlinkParser', ([], {'add_model_args': '(True)'}), '(add_model_args=True)\n', (8385, 8406), False, 'from blink.common.params import BlinkParser\n'), ((3270, 3285), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3283, 3285), False, 'import torch\n'), ((4052, 4084), 'os.path.exists', 'os.path.exists', (['eval_output_path'], {}), '(eval_output_path)\n', (4066, 4084), False, 'import os\n'), ((4094, 4123), 'os.makedirs', 'os.makedirs', (['eval_output_path'], {}), '(eval_output_path)\n', (4105, 4123), False, 'import os\n'), ((5047, 5079), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (5073, 5079), False, 'import torch\n'), ((3753, 3777), 'torch.cat', 'torch.cat', (['edge_vertices'], {}), '(edge_vertices)\n', (3762, 3777), False, 'import torch\n'), ((3814, 3836), 'torch.cat', 'torch.cat', (['edge_scores'], {}), '(edge_scores)\n', (3823, 3836), False, 'import torch\n'), ((7995, 8040), 'json.dumps', 'json.dumps', (['metrics'], {'sort_keys': '(True)', 'indent': '(4)'}), '(metrics, sort_keys=True, indent=4)\n', (8005, 8040), False, 'import json\n')] |
import numpy as np
from .. import Geometry, Line, LineSegmentMaterial
class BoxHelper(Line):
"""A line box object. Commonly used to visualize bounding boxes.
Parameters:
size (float): The length of the box' edges (default 1).
thickness (float): the thickness of the lines (default 1 px).
"""
def __init__(self, size=1.0, thickness=1):
self._size = size
positions = np.array(
[
[0, 0, 0], # bottom edges
[1, 0, 0],
[0, 0, 0],
[0, 0, 1],
[1, 0, 1],
[1, 0, 0],
[1, 0, 1],
[0, 0, 1],
[0, 1, 0], # top edges
[1, 1, 0],
[0, 1, 0],
[0, 1, 1],
[1, 1, 1],
[1, 1, 0],
[1, 1, 1],
[0, 1, 1],
[0, 0, 0], # side edges
[0, 1, 0],
[1, 0, 0],
[1, 1, 0],
[0, 0, 1],
[0, 1, 1],
[1, 0, 1],
[1, 1, 1],
],
dtype="f4",
)
positions -= 0.5
positions *= self._size
geometry = Geometry(positions=positions)
material = LineSegmentMaterial(color=(1, 0, 0), thickness=thickness, aa=True)
super().__init__(geometry, material)
def set_transform_by_aabb(self, aabb):
"""Set the position and scale attributes
based on a given bounding box.
Parameters:
aabb (ndarray): The position and scale attributes
will be configured such that the helper
will match the given bounding box. The array
is expected to have shape (2, 3), where the
two vectors represent the minimum and maximum
coordinates of the axis-aligned bounding box.
"""
aabb = np.asarray(aabb)
if aabb.shape != (2, 3):
raise ValueError(
"The given array does not appear to represent "
"an axis-aligned bounding box, ensure "
"the shape is (2, 3). Shape given: "
f"{aabb.shape}"
)
diagonal = aabb[1] - aabb[0]
center = aabb[0] + diagonal * 0.5
scale = diagonal / self._size
self.position.set(*center)
self.scale.set(*scale)
def set_transform_by_object(self, object, space="world"):
"""Set the position and scale attributes
based on the bounding box of another object.
Parameters:
object (WorldObject): The position and scale attributes
will be configured such that the helper
will match the bounding box of the given object.
space (string, optional): If set to "world"
(the default) the world space bounding box will
be used as reference. If equal to "local", the
object's local space bounding box of its geometry
will be used instead.
:Examples:
World-space bounding box visualization:
.. code-block:: py
box = gfx.BoxHelper()
box.set_transform_by_object(mesh)
scene.add(box)
Local-space bounding box visualization:
.. code-block:: py
box = gfx.BoxHelper()
box.set_transform_by_object(mesh, space="local")
mesh.add(box)
"""
aabb = None
if space not in {"world", "local"}:
raise ValueError(
'Space argument must be either "world"'
f'or "local". Given value: {space}'
)
if space == "world":
aabb = object.get_world_bounding_box()
elif space == "local" and object.geometry is not None:
aabb = object.geometry.bounding_box()
if aabb is None:
raise ValueError(
"No bounding box could be determined "
"for the given object, it (and its "
"children) may not define any geometry"
)
self.set_transform_by_aabb(aabb)
| [
"numpy.array",
"numpy.asarray"
] | [((420, 719), 'numpy.array', 'np.array', (['[[0, 0, 0], [1, 0, 0], [0, 0, 0], [0, 0, 1], [1, 0, 1], [1, 0, 0], [1, 0, 1\n ], [0, 0, 1], [0, 1, 0], [1, 1, 0], [0, 1, 0], [0, 1, 1], [1, 1, 1], [1,\n 1, 0], [1, 1, 1], [0, 1, 1], [0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 1, 0],\n [0, 0, 1], [0, 1, 1], [1, 0, 1], [1, 1, 1]]'], {'dtype': '"""f4"""'}), "([[0, 0, 0], [1, 0, 0], [0, 0, 0], [0, 0, 1], [1, 0, 1], [1, 0, 0],\n [1, 0, 1], [0, 0, 1], [0, 1, 0], [1, 1, 0], [0, 1, 0], [0, 1, 1], [1, 1,\n 1], [1, 1, 0], [1, 1, 1], [0, 1, 1], [0, 0, 0], [0, 1, 0], [1, 0, 0], [\n 1, 1, 0], [0, 0, 1], [0, 1, 1], [1, 0, 1], [1, 1, 1]], dtype='f4')\n", (428, 719), True, 'import numpy as np\n'), ((1966, 1982), 'numpy.asarray', 'np.asarray', (['aabb'], {}), '(aabb)\n', (1976, 1982), True, 'import numpy as np\n')] |
import glob
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def unison_shuffled_copies(a, b):
assert len(a) == len(b)
p = np.random.permutation(len(a))
return a[p], b[p]
def plot_swing(swing_data, shot_type=None, dist=None):
"""
swing_data: Dx6 array of IMU data
"""
columns = ["Ax", "Ay", "Az", "Gx", "Gy", "Gz"]
plt.figure(figsize=(15,7))
for idx, line in enumerate(swing_data):
plt.subplot(2,3,idx+1)
plt.title(columns[idx])
plt.plot(line)
if shot_type is not None:
plt.suptitle(f"{shot_type}, {dist}yds")
def load_data(path):
shot_types = 9
other_metrics = 1
total_metics = 10
X_data = []
y_data = []
# for i in range(100):
for csv in glob.glob(path + "*.csv"):
x = pd.read_csv(csv).drop(columns="Unnamed: 0")
y = np.zeros(total_metics)
y[x["shot_type"][0]] = 1
y[-1] = x["distance"][0]
x_values = x.values[:, :-2].T
X_data.append(x_values)
y_data.append(y)
X_data = np.array(X_data)
y_data = np.array(y_data)
return unison_shuffled_copies(X_data, y_data)
def plot_counts(y, xticks_label):
ax = pd.Series(np.argmax(y[:, :-1],axis=1)).value_counts(sort=False).plot.bar()
ax.set(ylabel="Count")
ax.set_xticklabels(xticks_label) | [
"pandas.read_csv",
"matplotlib.pyplot.plot",
"numpy.argmax",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.zeros",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.suptitle",
"glob.glob"
] | [((377, 404), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 7)'}), '(figsize=(15, 7))\n', (387, 404), True, 'import matplotlib.pyplot as plt\n'), ((781, 806), 'glob.glob', 'glob.glob', (["(path + '*.csv')"], {}), "(path + '*.csv')\n", (790, 806), False, 'import glob\n'), ((1074, 1090), 'numpy.array', 'np.array', (['X_data'], {}), '(X_data)\n', (1082, 1090), True, 'import numpy as np\n'), ((1104, 1120), 'numpy.array', 'np.array', (['y_data'], {}), '(y_data)\n', (1112, 1120), True, 'import numpy as np\n'), ((456, 482), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(idx + 1)'], {}), '(2, 3, idx + 1)\n', (467, 482), True, 'import matplotlib.pyplot as plt\n'), ((487, 510), 'matplotlib.pyplot.title', 'plt.title', (['columns[idx]'], {}), '(columns[idx])\n', (496, 510), True, 'import matplotlib.pyplot as plt\n'), ((519, 533), 'matplotlib.pyplot.plot', 'plt.plot', (['line'], {}), '(line)\n', (527, 533), True, 'import matplotlib.pyplot as plt\n'), ((572, 611), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['f"""{shot_type}, {dist}yds"""'], {}), "(f'{shot_type}, {dist}yds')\n", (584, 611), True, 'import matplotlib.pyplot as plt\n'), ((876, 898), 'numpy.zeros', 'np.zeros', (['total_metics'], {}), '(total_metics)\n', (884, 898), True, 'import numpy as np\n'), ((820, 836), 'pandas.read_csv', 'pd.read_csv', (['csv'], {}), '(csv)\n', (831, 836), True, 'import pandas as pd\n'), ((1230, 1258), 'numpy.argmax', 'np.argmax', (['y[:, :-1]'], {'axis': '(1)'}), '(y[:, :-1], axis=1)\n', (1239, 1258), True, 'import numpy as np\n')] |
"""
This script shows you how to select gripper for an environment.
This is controlled by gripper_type keyword argument
demo script:
python3 examples/policy.py GQCNN-4.0-PJ --depth_image <depth.npy> --segmask <seg.png> --camera_intr data/calib/phoxi/phoxi.intr
"""
import numpy as np
import math
import cv2
import robosuite as suite
from robosuite.wrappers import GymWrapper
from mujoco_py import load_model_from_path, MjSim, MjViewer, MjRenderContextOffscreen, MjRenderContext
from dexnet import DexNet
from ik_controller import robosuite_IKmover
from robosuite.environments import SawyerLift_vj
def set_camera_birdview(viewer):
'''
world.mujoco_arena.bin_abs => the center of the table is (0.6, -0.15, 0.8)
the camera is positioned directly on top of the table looking downward, with camera calibration:
- with default fov: 45
[[544.40515832 0. 258. ]
[ 0. 544.40515832 193. ]
[ 0. 0. 1. ]]
the camera position in the world coordinate is:
T: [0.6, -0.15, 0.8 + 0.9 (distance)]
took the front view camera and rotate around y-axis:
looking from robot orientation with azimuth = -180, rotation around z-axis
'''
viewer.cam.fixedcamid = 1
viewer.cam.distance = 0.8
viewer.cam.elevation = -90
viewer.cam.lookat[0] = 0.6
viewer.cam.lookat[1] = -0.15
viewer.cam.lookat[2] = 0.8
if __name__ == "__main__":
# configuration for depth and segmask rig
top_pad, left_pad = 110, 50
width, height = 516, 386
# create simulation environment
# world = suite.make(
# "SawyerPickPlace",
# gripper_type="TwoFingerGripper",
# use_camera_obs=False, # do not use pixel observations
# has_offscreen_renderer=False, # not needed since not using pixel obs
# has_renderer=True, # make sure we can render to the screen
# reward_shaping=False, # use dense rewards
# control_freq=100, # control should happen fast enough so that simulation looks smooth
# ignore_done=True
# )
world = SawyerLift_vj(
ignore_done=True,
gripper_type="TwoFingerGripper",
use_camera_obs=False,
has_offscreen_renderer=False,
has_renderer=True,
camera_name=None,
control_freq=100)
world.reset()
world.mode = 'human'
ik_wrapper = robosuite_IKmover(world)
sim = world.sim
viewer = MjRenderContextOffscreen(sim, 0)
set_camera_birdview(viewer)
viewer.render(width, height)
image = np.asarray(viewer.read_pixels(width, height, depth=False)[:, :, :], dtype=np.uint8)
depth = np.asarray((viewer.read_pixels(width, height, depth=True)[1]))
# , depth[left_pad, top_pad], depth[-left_pad, -top_pad]
cdepth = depth[height//2, width//2]
print(cdepth)
depth[depth > cdepth] = cdepth
seg = depth != cdepth
depth[:, :top_pad], depth[:, -top_pad:] = cdepth, cdepth
depth[:left_pad, :], depth[-left_pad:, :] = cdepth, cdepth
offset, scale = np.min(depth), np.max(depth) - np.min(depth)
depth = (depth - np.min(depth)) / (np.max(depth) - np.min(depth))
seg[:, :top_pad], seg[:, -top_pad:] = False, False
seg[:left_pad, :], seg[-left_pad:, :] = False, False
dexnet = DexNet()
dexnet.prepare_dexnet()
print("dexnet prepared")
state, rgbd_im = dexnet.get_state(depth, seg)
action = dexnet.get_action(state)
'''
get depth of the action and the x, y
apply inverse from camera coordinate to the world coordinate
'''
dexnet.visualization(action, rgbd_im, offset, scale)
action.grasp.depth = action.grasp.depth * scale + offset
rigid_transform = action.grasp.pose()
print('center: {}, {}'.format(action.grasp.center.x, action.grasp.center.y))
print('depth: {}'.format(action.grasp.depth))
print('rot: {}'.format(rigid_transform.rotation))
print('tra: {}'.format(rigid_transform.translation))
print('camera intr: {}'.format(dir(action.grasp.camera_intr)))
print('proj matrix: {}'.format(action.grasp.camera_intr.proj_matrix))
print('other attr: {}'.format(dir(action.grasp)))
# gripper_pos = (rigid_transform.rotation.T @ rigid_transform.translation).flatten()
# gripper_pos[2] = (1-gripper_pos[2]) * scale + offset
# gripper_pos[0] += 0.6
# gripper_pos[1] += -0.15
gripper_pos = rigid_transform.translation.flatten() + np.array([0.6, -0.15, -0.11])
print('gripper position: {}'.format(gripper_pos))
# world_coord = invCamR @ (gripper_pos + invCamT).reshape(3,1)
# world_coord[2] += 1
# print(world_coord)
# print('x: {}, y: {}'.format(action.grasp.center.x, action.grasp.center.y))
# print(action.grasp.depth)
# # visualization
cv2.imwrite('test_dataset/seg.png', seg * 255)
# # normalize the depth
np.save('test_dataset/depth_0.npy', depth)
cv2.imwrite('test_dataset/depth.png', depth * 255)
cv2.imwrite('test_dataset/visual.png', image)
initial_pos = np.copy(world.observation_spec()['eef_pos'])
ik_wrapper.move(gripper_pos, rigid_transform.rotation)
ik_wrapper.lift(initial_pos)
# viewer = MjViewer(sim)
# while True:
# # viewer.add_marker(pos=np.array([0.6, -0.15, 0.8 + 0.8]),
# # label=str('camera position'))
# viewer.add_marker(size=np.ones(3) * 0.01,
# pos=gripper_pos.flatten(),
# label=str('target position'))
# viewer.render()
| [
"cv2.imwrite",
"ik_controller.robosuite_IKmover",
"numpy.max",
"numpy.array",
"dexnet.DexNet",
"robosuite.environments.SawyerLift_vj",
"numpy.min",
"mujoco_py.MjRenderContextOffscreen",
"numpy.save"
] | [((2136, 2315), 'robosuite.environments.SawyerLift_vj', 'SawyerLift_vj', ([], {'ignore_done': '(True)', 'gripper_type': '"""TwoFingerGripper"""', 'use_camera_obs': '(False)', 'has_offscreen_renderer': '(False)', 'has_renderer': '(True)', 'camera_name': 'None', 'control_freq': '(100)'}), "(ignore_done=True, gripper_type='TwoFingerGripper',\n use_camera_obs=False, has_offscreen_renderer=False, has_renderer=True,\n camera_name=None, control_freq=100)\n", (2149, 2315), False, 'from robosuite.environments import SawyerLift_vj\n'), ((2426, 2450), 'ik_controller.robosuite_IKmover', 'robosuite_IKmover', (['world'], {}), '(world)\n', (2443, 2450), False, 'from ik_controller import robosuite_IKmover\n'), ((2485, 2517), 'mujoco_py.MjRenderContextOffscreen', 'MjRenderContextOffscreen', (['sim', '(0)'], {}), '(sim, 0)\n', (2509, 2517), False, 'from mujoco_py import load_model_from_path, MjSim, MjViewer, MjRenderContextOffscreen, MjRenderContext\n'), ((3325, 3333), 'dexnet.DexNet', 'DexNet', ([], {}), '()\n', (3331, 3333), False, 'from dexnet import DexNet\n'), ((4815, 4861), 'cv2.imwrite', 'cv2.imwrite', (['"""test_dataset/seg.png"""', '(seg * 255)'], {}), "('test_dataset/seg.png', seg * 255)\n", (4826, 4861), False, 'import cv2\n'), ((4894, 4936), 'numpy.save', 'np.save', (['"""test_dataset/depth_0.npy"""', 'depth'], {}), "('test_dataset/depth_0.npy', depth)\n", (4901, 4936), True, 'import numpy as np\n'), ((4941, 4991), 'cv2.imwrite', 'cv2.imwrite', (['"""test_dataset/depth.png"""', '(depth * 255)'], {}), "('test_dataset/depth.png', depth * 255)\n", (4952, 4991), False, 'import cv2\n'), ((4996, 5041), 'cv2.imwrite', 'cv2.imwrite', (['"""test_dataset/visual.png"""', 'image'], {}), "('test_dataset/visual.png', image)\n", (5007, 5041), False, 'import cv2\n'), ((3079, 3092), 'numpy.min', 'np.min', (['depth'], {}), '(depth)\n', (3085, 3092), True, 'import numpy as np\n'), ((4469, 4498), 'numpy.array', 'np.array', (['[0.6, -0.15, -0.11]'], {}), '([0.6, -0.15, -0.11])\n', (4477, 4498), True, 'import numpy as np\n'), ((3094, 3107), 'numpy.max', 'np.max', (['depth'], {}), '(depth)\n', (3100, 3107), True, 'import numpy as np\n'), ((3110, 3123), 'numpy.min', 'np.min', (['depth'], {}), '(depth)\n', (3116, 3123), True, 'import numpy as np\n'), ((3145, 3158), 'numpy.min', 'np.min', (['depth'], {}), '(depth)\n', (3151, 3158), True, 'import numpy as np\n'), ((3163, 3176), 'numpy.max', 'np.max', (['depth'], {}), '(depth)\n', (3169, 3176), True, 'import numpy as np\n'), ((3179, 3192), 'numpy.min', 'np.min', (['depth'], {}), '(depth)\n', (3185, 3192), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
def calculate_effect_of_credit_drop(model, X, credit_factors):
probs = model.predict_proba(X)[:, 1]
all_probs_changes = {}
all_credit_amount_changes = {}
all_expected_costs_in_credit = {}
for factor in credit_factors:
probs_modified = model.predict_proba(
X.assign(credit_given=X["credit_given"] * factor)
)[:, 1]
probs_change = (probs - probs_modified) * 100
probs_change = np.where(probs_change <= 0, np.nan, probs_change)
credit_change = np.array(np.round(X.credit_given * (1 - factor), 0))
cost_in_credit = np.divide(credit_change, probs_change)
all_probs_changes[factor] = probs_change
all_credit_amount_changes[factor] = credit_change
all_expected_costs_in_credit[factor] = cost_in_credit
return all_probs_changes, all_credit_amount_changes, all_expected_costs_in_credit
def order_effects_within_customers(
X,
credit_factors,
all_probs_changes,
all_credit_amount_changes,
all_expected_costs_in_credit,
):
all_processed_costs = []
all_processed_factors = []
all_processed_credit_changes = []
all_processed_probs_changes = []
for customer in range(len(X)):
costs = []
factors = []
credit_change = []
probs_change = []
for factor in credit_factors:
costs.append(all_expected_costs_in_credit[factor][customer])
factors.append(factor)
credit_change.append(all_credit_amount_changes[factor][customer])
probs_change.append(all_probs_changes[factor][customer])
sorted_costs = sorted(costs)
sorted_factors = [x for _, x in sorted(zip(costs, factors))]
sorted_credit_change = [x for _, x in sorted(zip(costs, credit_change))]
sorted_probs_change = [x for _, x in sorted(zip(costs, probs_change))]
# assign na to costs and credit change if factor of the next
# best change is not bigger than in the previous drop
smallest_factor = None
for i, factor in enumerate(sorted_factors):
if (not smallest_factor or factor < smallest_factor) and (
not np.isnan(sorted_costs[i])
):
smallest_factor = factor
else:
sorted_costs[i] = np.nan
# removing indices from list where costs is nan
sorted_factors = [
factor
for factor, cost in zip(sorted_factors, sorted_costs)
if not np.isnan(cost)
]
sorted_credit_change = [
factor
for factor, cost in zip(sorted_credit_change, sorted_costs)
if not np.isnan(cost)
]
sorted_probs_change = [
probs
for probs, cost in zip(sorted_probs_change, sorted_costs)
if not np.isnan(cost)
]
sorted_costs = [cost for cost in sorted_costs if not np.isnan(cost)]
if len(sorted_costs) > 1:
# change in probs is the current value minus previous except for
# the first one that stays the same
sorted_probs_change = [
current_change - previous_change
for current_change, previous_change in zip(
sorted_probs_change, [0] + sorted_probs_change[:-1]
)
]
# keeping only values where the default risk is actually lessened
sorted_credit_change = [
change
for change, probs in zip(sorted_credit_change, sorted_probs_change)
if probs > 0
]
sorted_factors = [
factor
for factor, probs in zip(sorted_factors, sorted_probs_change)
if probs > 0
]
sorted_probs_change = [probs for probs in sorted_probs_change if probs > 0]
# calculating the change in credit for each viable option
sorted_credit_change = [
current_change - previous_change
for current_change, previous_change in zip(
sorted_credit_change, [0] + sorted_credit_change[:-1]
)
]
# calculating the cost (percent default drop per dollar) for
# each viable option for credit limit drop
sorted_costs = [
credit_change / probs_change
for credit_change, probs_change in zip(
sorted_credit_change, sorted_probs_change
)
]
all_processed_costs.append(sorted_costs)
all_processed_factors.append(sorted_factors)
all_processed_credit_changes.append(sorted_credit_change)
all_processed_probs_changes.append(sorted_probs_change)
return (
all_processed_costs,
all_processed_factors,
all_processed_credit_changes,
all_processed_probs_changes,
)
def form_results_to_ordered_df(
y,
X,
probs,
all_processed_costs,
all_processed_factors,
all_processed_credit_changes,
all_processed_probs_changes,
):
costs_df = pd.DataFrame(
{
"defaulted": y,
"credit_given": X["credit_given"],
"prob": probs,
"factors": all_processed_factors,
"costs": all_processed_costs,
"credit_losses": all_processed_credit_changes,
"probs_changes": all_processed_probs_changes,
}
)
# unpacking the list of options and then sorting by it
# the within customer drops are already sorted (we start from the best one)
# the order within customers is in right order (smaller credit drops first)
factors = np.array(costs_df[["factors"]].explode("factors"))
costs = np.array(costs_df[["costs"]].explode("costs"))
credit_losses = np.array(costs_df[["credit_losses"]].explode("credit_losses"))
probs_changes = np.array(costs_df[["probs_changes"]].explode("probs_changes"))
# df to same size as exploded columns
costs_df = costs_df.explode("factors")
# overwriting old columns
costs_df = costs_df.assign(
factors=factors,
costs=costs,
credit_losses=credit_losses,
probs_changes=probs_changes,
)
costs_df.sort_values(by="costs", inplace=True)
first_instance_of_customer = ~costs_df.index.duplicated()
costs_df = costs_df.assign(first_instance_of_customer=first_instance_of_customer)
costs_df = costs_df.assign(
defaults_prevented_perc=(
costs_df["probs_changes"].cumsum()
/ costs_df["first_instance_of_customer"].sum()
),
credit_cost_perc=(
costs_df["credit_losses"].cumsum()
* 100
/ costs_df["credit_given"]
.multiply(costs_df["first_instance_of_customer"])
.sum()
),
customers_affected_perc=costs_df["first_instance_of_customer"].cumsum()
* 100
/ costs_df["first_instance_of_customer"].sum(),
defaulters_affected_perc=costs_df["first_instance_of_customer"]
.multiply(costs_df["defaulted"])
.cumsum()
* 100
/ costs_df["first_instance_of_customer"].multiply(costs_df["defaulted"]).sum(),
non_defaulters_affected_perc=costs_df["first_instance_of_customer"]
.multiply(costs_df["defaulted"].subtract(1).abs())
.cumsum()
* 100
/ costs_df["first_instance_of_customer"]
.multiply(costs_df["defaulted"].subtract(1).abs())
.sum(),
)
costs_df.reset_index(inplace=True)
return costs_df
| [
"numpy.divide",
"numpy.where",
"numpy.isnan",
"pandas.DataFrame",
"numpy.round"
] | [((5209, 5458), 'pandas.DataFrame', 'pd.DataFrame', (["{'defaulted': y, 'credit_given': X['credit_given'], 'prob': probs,\n 'factors': all_processed_factors, 'costs': all_processed_costs,\n 'credit_losses': all_processed_credit_changes, 'probs_changes':\n all_processed_probs_changes}"], {}), "({'defaulted': y, 'credit_given': X['credit_given'], 'prob':\n probs, 'factors': all_processed_factors, 'costs': all_processed_costs,\n 'credit_losses': all_processed_credit_changes, 'probs_changes':\n all_processed_probs_changes})\n", (5221, 5458), True, 'import pandas as pd\n'), ((509, 558), 'numpy.where', 'np.where', (['(probs_change <= 0)', 'np.nan', 'probs_change'], {}), '(probs_change <= 0, np.nan, probs_change)\n', (517, 558), True, 'import numpy as np\n'), ((662, 700), 'numpy.divide', 'np.divide', (['credit_change', 'probs_change'], {}), '(credit_change, probs_change)\n', (671, 700), True, 'import numpy as np\n'), ((593, 635), 'numpy.round', 'np.round', (['(X.credit_given * (1 - factor))', '(0)'], {}), '(X.credit_given * (1 - factor), 0)\n', (601, 635), True, 'import numpy as np\n'), ((2247, 2272), 'numpy.isnan', 'np.isnan', (['sorted_costs[i]'], {}), '(sorted_costs[i])\n', (2255, 2272), True, 'import numpy as np\n'), ((2576, 2590), 'numpy.isnan', 'np.isnan', (['cost'], {}), '(cost)\n', (2584, 2590), True, 'import numpy as np\n'), ((2744, 2758), 'numpy.isnan', 'np.isnan', (['cost'], {}), '(cost)\n', (2752, 2758), True, 'import numpy as np\n'), ((2908, 2922), 'numpy.isnan', 'np.isnan', (['cost'], {}), '(cost)\n', (2916, 2922), True, 'import numpy as np\n'), ((2994, 3008), 'numpy.isnan', 'np.isnan', (['cost'], {}), '(cost)\n', (3002, 3008), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
import numpy as np
from tqdm import tqdm
from functools import reduce
import disk.funcs as dfn
import h5py
import os
import glob
import sys
from matplotlib import pyplot as plt
class binary_mbh(object):
def __init__(self, filename):
self.parse_file(filename)
def parse_file(self, filename, cgs_units=True):
self.filename = filename
if cgs_units:
print ('The cgs units are used!')
with h5py.File(self.filename, 'r') as f:
self.SubhaloMassInHalfRadType = np.array(f['meta/SubhaloMassInHalfRadType'])
self.SubhaloSFRinHalfRad = np.array(f['meta/SubhaloSFRinHalfRad'])
self.snapshot = np.array(f['meta/snapshot'])
self.subhalo_id = np.array(f['meta/subhalo_id'])
self.masses = np.array(f['evolution/masses']) #g
self.mdot = np.array(f['evolution/mdot_eff']) #g/s
self.sep = np.array(f['evolution/sep']) #cm
self.dadt = np.array(f['evolution/dadt']) #cm/s
self.dadt_df = np.array(f['evolution/dadt_df']) #cm/s
self.dadt_gw = np.array(f['evolution/dadt_gw']) #cm/s
self.dadt_lc = np.array(f['evolution/dadt_lc']) #cm/s
self.dadt_vd = np.array(f['evolution/dadt_vd']) #cm/s
self.scales = np.array(f['evolution/scales']) #NA
self.times = np.array(f['evolution/times']) #s
self.eccen = np.array(f['evolution/eccen']) #NA
self.z = (1./self.scales)-1 #NA
self.m1 = self.masses[:,0]
self.m2 = self.masses[:,1]
self.mtot = self.m1+self.m2
self.q = self.m2/self.m1
def find_Rlc(self):
R_lc = np.zeros((self.sep.shape[0],3))
for i in range(len(self.sep)):
try:
idx = reduce(np.intersect1d,(np.where(np.abs(self.dadt_lc[i])>np.abs(self.dadt_df[i]))[0],
np.where(np.abs(self.dadt_lc[i])>np.abs(self.dadt_vd[i]))[0],
np.where(np.abs(self.dadt_lc[i])>np.abs(self.dadt_gw[i]))[0]))[0]
R_lc[i]=[i,idx,self.sep[i][idx]]
except:
R_lc[i]=[i,np.nan,np.nan]
return R_lc
def find_Rvd(self):
R_vd = np.zeros((self.sep.shape[0],3))
for i in range(len(self.sep)):
try:
idx = reduce(np.intersect1d,(np.where(np.abs(self.dadt_vd[i])>np.abs(self.dadt_df[i]))[0],
np.where(np.abs(self.dadt_vd[i])>np.abs(self.dadt_lc[i]))[0],
np.where(np.abs(self.dadt_vd[i])>np.abs(self.dadt_gw[i]))[0]))[0]
R_vd[i]=[i,idx,self.sep[i][idx]]
except:
R_vd[i]=[i,np.nan,np.nan]
return R_vd
def find_Rgw(self):
R_gw = np.zeros((self.sep.shape[0],3))
for i in range(len(self.sep)):
try:
idx = reduce(np.intersect1d,(np.where(np.abs(self.dadt_gw[i])>np.abs(self.dadt_df[i]))[0],
np.where(np.abs(self.dadt_gw[i])>np.abs(self.dadt_lc[i]))[0],
np.where(np.abs(self.dadt_gw[i])>np.abs(self.dadt_vd[i]))[0]))[0]
R_gw[i]=[i,idx,self.sep[i][idx]]
except:
R_gw[i]=[i,np.nan,np.nan]
return R_gw
def find_mbin_at_Rvd(self):
"""
finding mass growth upto disk phase
"""
R_vd = self.find_Rvd()
mbin_at_rdisk = np.zeros(self.mtot.size)
for mm in range(self.mtot.size):
ti = self.times[mm]
mdoti = self.mdot[mm]
if np.isnan(np.sum(R_vd[mm])):
condition = (self.scales[mm] > 0.0) & (self.scales[mm] < 1.0) & (self.sep[mm]>np.nanmedian(R_vd[:,-1]))
else:
condition = (self.scales[mm] > 0.0) & (self.scales[mm] < 1.0) & (self.sep[mm]>R_vd[mm][-1])
ti = ti[condition]
mdoti = mdoti[condition]
delta_ti = np.diff(ti)
mdot_av = 0.5*(mdoti[1:]+mdoti[:-1])
dmi = mdot_av*delta_ti
mbin_at_rdisk[mm] = self.mtot[mm] + np.nansum(dmi)
return mbin_at_rdisk
def m1m2(self, mbin=None, q=None ):
if mbin is None:
mbin = self.mtot
if q is None:
q = self.q
m1 = mbin/(1+q)
m2 = mbin-m1
return m1, m2
def mbin_df_lc(self):
"""
finding mass growth upto disk phase
return : an (MxN) matrix of masses for all binaries at all
separations.
"""
R_vd = self.find_Rvd()
mbin_df_lc =-1* np.ones(shape = self.mdot.shape)
q_df_lc =-1* np.ones(shape = self.mdot.shape)
m1_df_lc = -1*np.ones(self.mdot.shape)
m2_df_lc = -1*np.ones(self.mdot.shape)
#initialize masses and mass ratios from illustris
mbin_df_lc[:,0] = self.mtot
q_df_lc[:,0] = self.q
m1_df_lc[:,0] = self.m1
m2_df_lc[:,0] = self.m2
for mm in tqdm(range(self.mtot.size)):
ti = self.times[mm]
mdoti = self.mdot[mm]
if np.isnan(np.sum(R_vd[mm])):
condition = (self.scales[mm] > 0.0) & (self.scales[mm] < 1.0) & (self.sep[mm]>=np.nanmedian(R_vd[:,-1]))
else:
condition = (self.scales[mm] > 0.0) & (self.scales[mm] < 1.0) & (self.sep[mm]>=R_vd[mm][-1])
q_df_lc[mm][condition] = np.full(q_df_lc[mm][condition].shape, self.q[mm]) #q is not evolving in df_lc
#phase ==> fill with same value
ti = ti[condition]
mdoti = mdoti[condition]
delta_ti = np.diff(ti)
mdot_av = 0.5*(mdoti[1:]+mdoti[:-1])
dmi = mdot_av*delta_ti
idx = np.where(condition)[0]
for ll in range(len(idx)-1):
mbin_df_lc[mm][idx[ll]+1] = mbin_df_lc[mm][idx[ll]] + dmi[ll]
m1_df_lc[mm][idx[ll]+1], m2_df_lc[mm][idx[ll]+1] = self.m1m2(mbin_df_lc[mm][idx[ll]+1]
, q_df_lc[mm][idx[ll]+1])
return m1_df_lc, m2_df_lc, mbin_df_lc, q_df_lc
def find_mrgr_idx(self):
idx_merged_by_z0 =[]
idx_not_merged_by_z0 =[]
for i in range(len(self.z)):
if 0 in self.z[i]:
idx_not_merged_by_z0.append(i)
else:
idx = np.where(np.isinf(self.z[i]))[0][0]
idx_merged_by_z0.append(i)
return np.array(idx_merged_by_z0), np.array(idx_not_merged_by_z0)
# def dm_disk_phase(self):
# """
# finds mass growth during disk phase. The inital binary mass in this phase comes
# from the mass growth in the loss cone and dynamical friction phases.
# """
# R_vd = self.find_Rvd()
# R_gw = self.find_Rgw()
# m1_after_disk = np.zeros(self.mtot.size)
# m2_after_disk = np.zeros(self.mtot.size)
# q_after_disk = -1*np.ones(self.mtot.size)
# mbin_at_rdisk = self.find_mbin_at_Rvd()
# for mm in tqdm(range(self.mtot.size)):
# ti = self.times[mm]
# mdoti = self.mdot[mm]
# if np.isnan(np.sum(R_vd[mm])):
# if np.isnan(np.sum(R_gw[mm])):
# print ('this binary has niether a gas dominated phase nor a gw dominated phase')
# condition = (self.scales[mm] > 0.0) & (self.scales[mm] < 1.0) & (self.sep[mm]<=np.nanmedian(R_vd[:,-1]))
# else:
# condition = (self.scales[mm] > 0.0) & (self.scales[mm] < 1.0) & (R_gw[mm][-1]<self.sep[mm]) & (self.sep[mm] <=np.nanmedian(R_vd[:,-1]))
# else:
# if np.isnan(np.sum(R_gw[mm])):
# #gas dominated all the way
# condition = (self.scales[mm] > 0.0) & (self.scales[mm] < 1.0) & (self.sep[mm]<=R_vd[mm][-1])
# else:
# condition = (self.scales[mm] > 0.0) & (self.scales[mm] < 1.0) & (R_gw[mm][-1]<self.sep[mm]) & (self.sep[mm]<=R_vd[mm][-1])
# ti = ti[condition]
# mdoti = mdoti[condition]
# delta_ti = np.diff(ti)
# mdot_av = 0.5*(mdoti[1:]+mdoti[:-1])
# cond_idx = np.where(condition==True)
# qi = self.q[mm]
# m1_fin = mbin_at_rdisk[mm]/(1+qi)
# m2_fin = mbin_at_rdisk[mm]*qi/(1+qi)
# for jj in range(mdot_av.size):
# mdot1, mdot2 = dfn.dm1dm2_lk(qi, mdot_av[jj])
# dm1 = mdot1*delta_ti[jj]
# dm2 = mdot2*delta_ti[jj]
# m1_fin = m1_fin + dm1
# m2_fin = m2_fin + dm2
# qi = m2_fin/m1_fin
# m1_after_disk[mm] = m1_fin
# m2_after_disk[mm] = m2_fin
# q_after_disk[mm] = qi
# return m1_after_disk, m2_after_disk
#############new function##################
def mbin_cbd(self):
"""
finds mass growth during disk phase. The inital binary mass in this phase comes
from the mass growth in the loss cone and dynamical friction phases.
"""
R_vd = self.find_Rvd()
R_gw = self.find_Rgw()
#initialize mbin_cbd to mbin_df_lc
m1_cbd, m2_cbd, mbin_cbd, q_cbd = self.mbin_df_lc()
# print ('shape of m1_cbd {}, m2_cbd {}, mbin_cbd {}, q_cbd{}'.format(m1_cbd.shape, m2_cbd.shape
# , mbin_cbd.shape, q_cbd.shape))
no_condition = 0
for mm in tqdm(range(self.mtot.size)):
ti = self.times[mm]
mdoti = self.mdot[mm]
if np.isnan(np.sum(R_vd[mm])):
if np.isnan(np.sum(R_gw[mm])):
print ('this binary has niether a gas dominated phase nor a gw dominated phase')
condition = ((self.scales[mm] > 0.0) & (self.scales[mm] < 1.0)
& (self.sep[mm]<=np.nanmedian(R_vd[:,-1])))
else:
condition = ((self.scales[mm] > 0.0) & (self.scales[mm] < 1.0) & (self.sep[mm]>=R_gw[mm][-1])
& (self.sep[mm] <= np.nanmedian(R_vd[:,-1])))
else:
if np.isnan(np.sum(R_gw[mm])):
#gas dominated all the way
condition = (self.scales[mm] > 0.0) & (self.scales[mm] < 1.0) & (self.sep[mm]<=R_vd[mm][-1])
else:
condition = ((self.scales[mm] > 0.0) & (self.scales[mm] < 1.0) & (self.sep[mm]>=R_gw[mm][-1])
& (self.sep[mm] <= R_vd[mm][-1]))
idx = np.where(condition)[0]
if len(idx)<1:
no_condition+=1
print (len(idx))
print (idx)
print (no_condition)
continue
ti = ti[condition]
mdoti = mdoti[condition]
delta_ti = np.diff(ti)
mdot_av = 0.5*(mdoti[1:]+mdoti[:-1])
print (q_cbd[mm][idx])
#print (len(idx), mdot_av.size)
for ll in range(len(idx)-1):
if q_cbd[mm][idx[ll]]<0:
print ('binary number {} and separation {}'.format(mm, ll))
print (q_cbd[mm])
print ('')
mdot1, mdot2 = dfn.dm1dm2_lk(q_cbd[mm][idx[ll]], mdot_av[ll])
dm1 = mdot1*delta_ti[ll]
dm2 = mdot2*delta_ti[ll]
m1_cbd[mm][idx[ll]+1] = m1_cbd[mm][idx[ll]] + dm1
m2_cbd[mm][idx[ll]+1] = m2_cbd[mm][idx[ll]] + dm2
q_cbd[mm][idx[ll]+1] = m2_cbd[mm][idx[ll]+1]/m1_cbd[mm][idx[ll]+1]
mbin_cbd[mm][idx[ll]+1] = m1_cbd[mm][idx[ll]+1]+m2_cbd[mm][idx[ll]+1]
# print ('\n')
# print (m1_cbd[mm][idx[ll]], m2_cbd[mm][idx[ll]], mbin_cbd[mm][idx[ll]], q_cbd[mm][idx[ll]])
# sys.exit()
print (q_cbd[mm][idx])
print ('')
return m1_cbd, m2_cbd, mbin_cbd, q_cbd
#############End new function##################
def mbin_after_insp(self):
"""
finding mass growth for the whole inspiral
"""
R_vd = self.find_Rvd()
mbin_after_insp = np.zeros(self.mtot.size)
for mm in range(self.mtot.size):
ti = self.times[mm]
mdoti = self.mdot[mm]
condition = (self.scales[mm] > 0.0) & (self.scales[mm] < 1.0)
ti = ti[condition]
mdoti = mdoti[condition]
delta_ti = np.diff(ti)
mdot_av = 0.5*(mdoti[1:]+mdoti[:-1])
dmi = mdot_av*delta_ti
dm = np.nansum(dmi)
mbin_after_insp[mm] = self.mtot[mm] + dm
return mbin_after_insp
class inspiral(object):
def __init__(self,filename):
self.spin_magnitudes()
self.binary_mbh = binary_mbh(filename)
self.chi1, self.chi2 = self.spin_magnitudes()
def spin_magnitudes(self,use_fgas = True):
input_dir = '/input/'
abs_path = os.path.abspath(os.getcwd())
files= glob.glob('.'+os.path.join(abs_path,input_dir)+'*hdf5')
fspin = [s for s in files if "spin_magnitude" in s]
if use_fgas:
print ("spin magnitudes are gas dependent")
fspin = [s for s in fspin if "fgas" in s][0]
print ("result of if", fspin)
else:
fspin = [s for s in fspin if "fgas" in s][0]
print ("spin magnitudes are gas independent")
with h5py.File(fspin,'r') as f:
primary_dimleesspins =np.array(f['dimensionlessspins/primary'])
secondary_dimleesspins =np.array(f['dimensionlessspins/secondary'])
chi1 = primary_dimleesspins
chi2 = secondary_dimleesspins
return chi1, chi2
def modify_dadt_vd(factor=1, mass_growth=False):
dadt_vd = np.zeros(shape=mdot.shape)
# m1s = (np.ones(shape=self.binary_mbh.mdot.shape).T*m1).T
# m2s = (np.ones(shape=self.binary_mbh.mdot.shape).T*m2).T
if not mass_growth:
for i in tqdm(range(len(sep))):
inds = (self.binary_mbh.sep[i]>0.0)
dadt_vd[i][inds],d1,regs,d3,d4 = disk_torq.harden(self.binary_mbh.sep[i][inds]
, self.binary_mbh.m1[i]
, self.binary_mbh.m2[i]
, self.binary_mbh.mdot[i][inds]/factor)
# dadt_vd[i][inds],d1,regs,d3,d4 = disk_torq.harden(sep[i][inds],m1s[i][inds],m2s[i][inds],mdot[i][inds]/factor)
dadt_vd[i][inds] = np.abs(dadt_vd[i][inds])
elif mass_growth:
#substitute the new m1 and m2 masses
dadt_vd[i][inds],d1,regs,d3,d4 = disk_torq.harden(self.binary_mbh.sep[i][inds]
, self.binary_mbh.m1[i]
, self.binary_mbh.m2[i]
, self.binary_mbh.mdot[i][inds]/factor)
| [
"numpy.abs",
"numpy.ones",
"numpy.nanmedian",
"numpy.where",
"os.path.join",
"numpy.diff",
"h5py.File",
"os.getcwd",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.full",
"numpy.nansum",
"numpy.isinf",
"disk.funcs.dm1dm2_lk"
] | [((1840, 1872), 'numpy.zeros', 'np.zeros', (['(self.sep.shape[0], 3)'], {}), '((self.sep.shape[0], 3))\n', (1848, 1872), True, 'import numpy as np\n'), ((2443, 2475), 'numpy.zeros', 'np.zeros', (['(self.sep.shape[0], 3)'], {}), '((self.sep.shape[0], 3))\n', (2451, 2475), True, 'import numpy as np\n'), ((3037, 3069), 'numpy.zeros', 'np.zeros', (['(self.sep.shape[0], 3)'], {}), '((self.sep.shape[0], 3))\n', (3045, 3069), True, 'import numpy as np\n'), ((3738, 3762), 'numpy.zeros', 'np.zeros', (['self.mtot.size'], {}), '(self.mtot.size)\n', (3746, 3762), True, 'import numpy as np\n'), ((12843, 12867), 'numpy.zeros', 'np.zeros', (['self.mtot.size'], {}), '(self.mtot.size)\n', (12851, 12867), True, 'import numpy as np\n'), ((14528, 14554), 'numpy.zeros', 'np.zeros', ([], {'shape': 'mdot.shape'}), '(shape=mdot.shape)\n', (14536, 14554), True, 'import numpy as np\n'), ((487, 516), 'h5py.File', 'h5py.File', (['self.filename', '"""r"""'], {}), "(self.filename, 'r')\n", (496, 516), False, 'import h5py\n'), ((567, 611), 'numpy.array', 'np.array', (["f['meta/SubhaloMassInHalfRadType']"], {}), "(f['meta/SubhaloMassInHalfRadType'])\n", (575, 611), True, 'import numpy as np\n'), ((651, 690), 'numpy.array', 'np.array', (["f['meta/SubhaloSFRinHalfRad']"], {}), "(f['meta/SubhaloSFRinHalfRad'])\n", (659, 690), True, 'import numpy as np\n'), ((719, 747), 'numpy.array', 'np.array', (["f['meta/snapshot']"], {}), "(f['meta/snapshot'])\n", (727, 747), True, 'import numpy as np\n'), ((778, 808), 'numpy.array', 'np.array', (["f['meta/subhalo_id']"], {}), "(f['meta/subhalo_id'])\n", (786, 808), True, 'import numpy as np\n'), ((840, 871), 'numpy.array', 'np.array', (["f['evolution/masses']"], {}), "(f['evolution/masses'])\n", (848, 871), True, 'import numpy as np\n'), ((905, 938), 'numpy.array', 'np.array', (["f['evolution/mdot_eff']"], {}), "(f['evolution/mdot_eff'])\n", (913, 938), True, 'import numpy as np\n'), ((979, 1007), 'numpy.array', 'np.array', (["f['evolution/sep']"], {}), "(f['evolution/sep'])\n", (987, 1007), True, 'import numpy as np\n'), ((1047, 1076), 'numpy.array', 'np.array', (["f['evolution/dadt']"], {}), "(f['evolution/dadt'])\n", (1055, 1076), True, 'import numpy as np\n'), ((1117, 1149), 'numpy.array', 'np.array', (["f['evolution/dadt_df']"], {}), "(f['evolution/dadt_df'])\n", (1125, 1149), True, 'import numpy as np\n'), ((1187, 1219), 'numpy.array', 'np.array', (["f['evolution/dadt_gw']"], {}), "(f['evolution/dadt_gw'])\n", (1195, 1219), True, 'import numpy as np\n'), ((1257, 1289), 'numpy.array', 'np.array', (["f['evolution/dadt_lc']"], {}), "(f['evolution/dadt_lc'])\n", (1265, 1289), True, 'import numpy as np\n'), ((1327, 1359), 'numpy.array', 'np.array', (["f['evolution/dadt_vd']"], {}), "(f['evolution/dadt_vd'])\n", (1335, 1359), True, 'import numpy as np\n'), ((1397, 1428), 'numpy.array', 'np.array', (["f['evolution/scales']"], {}), "(f['evolution/scales'])\n", (1405, 1428), True, 'import numpy as np\n'), ((1465, 1495), 'numpy.array', 'np.array', (["f['evolution/times']"], {}), "(f['evolution/times'])\n", (1473, 1495), True, 'import numpy as np\n'), ((1532, 1562), 'numpy.array', 'np.array', (["f['evolution/eccen']"], {}), "(f['evolution/eccen'])\n", (1540, 1562), True, 'import numpy as np\n'), ((4283, 4294), 'numpy.diff', 'np.diff', (['ti'], {}), '(ti)\n', (4290, 4294), True, 'import numpy as np\n'), ((4930, 4960), 'numpy.ones', 'np.ones', ([], {'shape': 'self.mdot.shape'}), '(shape=self.mdot.shape)\n', (4937, 4960), True, 'import numpy as np\n'), ((4984, 5014), 'numpy.ones', 'np.ones', ([], {'shape': 'self.mdot.shape'}), '(shape=self.mdot.shape)\n', (4991, 5014), True, 'import numpy as np\n'), ((5039, 5063), 'numpy.ones', 'np.ones', (['self.mdot.shape'], {}), '(self.mdot.shape)\n', (5046, 5063), True, 'import numpy as np\n'), ((5086, 5110), 'numpy.ones', 'np.ones', (['self.mdot.shape'], {}), '(self.mdot.shape)\n', (5093, 5110), True, 'import numpy as np\n'), ((5743, 5792), 'numpy.full', 'np.full', (['q_df_lc[mm][condition].shape', 'self.q[mm]'], {}), '(q_df_lc[mm][condition].shape, self.q[mm])\n', (5750, 5792), True, 'import numpy as np\n'), ((6037, 6048), 'numpy.diff', 'np.diff', (['ti'], {}), '(ti)\n', (6044, 6048), True, 'import numpy as np\n'), ((6915, 6941), 'numpy.array', 'np.array', (['idx_merged_by_z0'], {}), '(idx_merged_by_z0)\n', (6923, 6941), True, 'import numpy as np\n'), ((6943, 6973), 'numpy.array', 'np.array', (['idx_not_merged_by_z0'], {}), '(idx_not_merged_by_z0)\n', (6951, 6973), True, 'import numpy as np\n'), ((11488, 11499), 'numpy.diff', 'np.diff', (['ti'], {}), '(ti)\n', (11495, 11499), True, 'import numpy as np\n'), ((13140, 13151), 'numpy.diff', 'np.diff', (['ti'], {}), '(ti)\n', (13147, 13151), True, 'import numpy as np\n'), ((13253, 13267), 'numpy.nansum', 'np.nansum', (['dmi'], {}), '(dmi)\n', (13262, 13267), True, 'import numpy as np\n'), ((13688, 13699), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (13697, 13699), False, 'import os\n'), ((14155, 14176), 'h5py.File', 'h5py.File', (['fspin', '"""r"""'], {}), "(fspin, 'r')\n", (14164, 14176), False, 'import h5py\n'), ((14218, 14259), 'numpy.array', 'np.array', (["f['dimensionlessspins/primary']"], {}), "(f['dimensionlessspins/primary'])\n", (14226, 14259), True, 'import numpy as np\n'), ((14296, 14339), 'numpy.array', 'np.array', (["f['dimensionlessspins/secondary']"], {}), "(f['dimensionlessspins/secondary'])\n", (14304, 14339), True, 'import numpy as np\n'), ((3894, 3910), 'numpy.sum', 'np.sum', (['R_vd[mm]'], {}), '(R_vd[mm])\n', (3900, 3910), True, 'import numpy as np\n'), ((4427, 4441), 'numpy.nansum', 'np.nansum', (['dmi'], {}), '(dmi)\n', (4436, 4441), True, 'import numpy as np\n'), ((5436, 5452), 'numpy.sum', 'np.sum', (['R_vd[mm]'], {}), '(R_vd[mm])\n', (5442, 5452), True, 'import numpy as np\n'), ((6163, 6182), 'numpy.where', 'np.where', (['condition'], {}), '(condition)\n', (6171, 6182), True, 'import numpy as np\n'), ((10205, 10221), 'numpy.sum', 'np.sum', (['R_vd[mm]'], {}), '(R_vd[mm])\n', (10211, 10221), True, 'import numpy as np\n'), ((11196, 11215), 'numpy.where', 'np.where', (['condition'], {}), '(condition)\n', (11204, 11215), True, 'import numpy as np\n'), ((11891, 11937), 'disk.funcs.dm1dm2_lk', 'dfn.dm1dm2_lk', (['q_cbd[mm][idx[ll]]', 'mdot_av[ll]'], {}), '(q_cbd[mm][idx[ll]], mdot_av[ll])\n', (11904, 11937), True, 'import disk.funcs as dfn\n'), ((15362, 15386), 'numpy.abs', 'np.abs', (['dadt_vd[i][inds]'], {}), '(dadt_vd[i][inds])\n', (15368, 15386), True, 'import numpy as np\n'), ((10252, 10268), 'numpy.sum', 'np.sum', (['R_gw[mm]'], {}), '(R_gw[mm])\n', (10258, 10268), True, 'import numpy as np\n'), ((10795, 10811), 'numpy.sum', 'np.sum', (['R_gw[mm]'], {}), '(R_gw[mm])\n', (10801, 10811), True, 'import numpy as np\n'), ((13730, 13763), 'os.path.join', 'os.path.join', (['abs_path', 'input_dir'], {}), '(abs_path, input_dir)\n', (13742, 13763), False, 'import os\n'), ((4007, 4032), 'numpy.nanmedian', 'np.nanmedian', (['R_vd[:, -1]'], {}), '(R_vd[:, -1])\n', (4019, 4032), True, 'import numpy as np\n'), ((5550, 5575), 'numpy.nanmedian', 'np.nanmedian', (['R_vd[:, -1]'], {}), '(R_vd[:, -1])\n', (5562, 5575), True, 'import numpy as np\n'), ((6830, 6849), 'numpy.isinf', 'np.isinf', (['self.z[i]'], {}), '(self.z[i])\n', (6838, 6849), True, 'import numpy as np\n'), ((10506, 10531), 'numpy.nanmedian', 'np.nanmedian', (['R_vd[:, -1]'], {}), '(R_vd[:, -1])\n', (10518, 10531), True, 'import numpy as np\n'), ((10722, 10747), 'numpy.nanmedian', 'np.nanmedian', (['R_vd[:, -1]'], {}), '(R_vd[:, -1])\n', (10734, 10747), True, 'import numpy as np\n'), ((1982, 2005), 'numpy.abs', 'np.abs', (['self.dadt_lc[i]'], {}), '(self.dadt_lc[i])\n', (1988, 2005), True, 'import numpy as np\n'), ((2006, 2029), 'numpy.abs', 'np.abs', (['self.dadt_df[i]'], {}), '(self.dadt_df[i])\n', (2012, 2029), True, 'import numpy as np\n'), ((2089, 2112), 'numpy.abs', 'np.abs', (['self.dadt_lc[i]'], {}), '(self.dadt_lc[i])\n', (2095, 2112), True, 'import numpy as np\n'), ((2113, 2136), 'numpy.abs', 'np.abs', (['self.dadt_vd[i]'], {}), '(self.dadt_vd[i])\n', (2119, 2136), True, 'import numpy as np\n'), ((2197, 2220), 'numpy.abs', 'np.abs', (['self.dadt_lc[i]'], {}), '(self.dadt_lc[i])\n', (2203, 2220), True, 'import numpy as np\n'), ((2221, 2244), 'numpy.abs', 'np.abs', (['self.dadt_gw[i]'], {}), '(self.dadt_gw[i])\n', (2227, 2244), True, 'import numpy as np\n'), ((2585, 2608), 'numpy.abs', 'np.abs', (['self.dadt_vd[i]'], {}), '(self.dadt_vd[i])\n', (2591, 2608), True, 'import numpy as np\n'), ((2609, 2632), 'numpy.abs', 'np.abs', (['self.dadt_df[i]'], {}), '(self.dadt_df[i])\n', (2615, 2632), True, 'import numpy as np\n'), ((2692, 2715), 'numpy.abs', 'np.abs', (['self.dadt_vd[i]'], {}), '(self.dadt_vd[i])\n', (2698, 2715), True, 'import numpy as np\n'), ((2716, 2739), 'numpy.abs', 'np.abs', (['self.dadt_lc[i]'], {}), '(self.dadt_lc[i])\n', (2722, 2739), True, 'import numpy as np\n'), ((2800, 2823), 'numpy.abs', 'np.abs', (['self.dadt_vd[i]'], {}), '(self.dadt_vd[i])\n', (2806, 2823), True, 'import numpy as np\n'), ((2824, 2847), 'numpy.abs', 'np.abs', (['self.dadt_gw[i]'], {}), '(self.dadt_gw[i])\n', (2830, 2847), True, 'import numpy as np\n'), ((3179, 3202), 'numpy.abs', 'np.abs', (['self.dadt_gw[i]'], {}), '(self.dadt_gw[i])\n', (3185, 3202), True, 'import numpy as np\n'), ((3203, 3226), 'numpy.abs', 'np.abs', (['self.dadt_df[i]'], {}), '(self.dadt_df[i])\n', (3209, 3226), True, 'import numpy as np\n'), ((3286, 3309), 'numpy.abs', 'np.abs', (['self.dadt_gw[i]'], {}), '(self.dadt_gw[i])\n', (3292, 3309), True, 'import numpy as np\n'), ((3310, 3333), 'numpy.abs', 'np.abs', (['self.dadt_lc[i]'], {}), '(self.dadt_lc[i])\n', (3316, 3333), True, 'import numpy as np\n'), ((3394, 3417), 'numpy.abs', 'np.abs', (['self.dadt_gw[i]'], {}), '(self.dadt_gw[i])\n', (3400, 3417), True, 'import numpy as np\n'), ((3418, 3441), 'numpy.abs', 'np.abs', (['self.dadt_vd[i]'], {}), '(self.dadt_vd[i])\n', (3424, 3441), True, 'import numpy as np\n')] |
import math
import os
import random
import time
import gc
import dgl
import dgl.function as fn
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.decomposition import PCA
from dataset import load_dataset
from utils import compute_spectral_emb, entropy
def neighbor_average_features(g, feat, args, use_norm=False, style="all"):
"""
Compute multi-hop neighbor-averaged node features
"""
print("Compute neighbor-averaged feats", style)
aggr_device = torch.device("cpu" if args.aggr_gpu < 0 else "cuda:{}".format(args.aggr_gpu))
g = g.to(aggr_device)
feat = feat.to(aggr_device)
if style == "all":
g.ndata['feat_0'] = feat
# print(g.ndata["feat"].shape)
# print(norm.shape)
if use_norm:
degs = g.out_degrees().float().clamp(min=1)
norm = torch.pow(degs, -0.5)
shp = norm.shape + (1,) * (feat.dim() - 1)
norm = torch.reshape(norm, shp)
for hop in range(1, args.K + 1):
g.ndata[f'feat_{hop}'] = g.ndata[f'feat_{hop-1}']
# g.ndata['pre_label_emb'] = g.ndata['label_emb']
if use_norm:
g.ndata[f'feat_{hop}'] = g.ndata[f'feat_{hop}'] * norm
g.update_all(fn.copy_src(src=f'feat_{hop}', out='msg'),
fn.sum(msg='msg', out=f'feat_{hop}'))
g.ndata[f'feat_{hop}'] = g.ndata[f'feat_{hop}'] * norm
else:
g.update_all(fn.copy_src(src=f'feat_{hop}', out='msg'),
fn.mean(msg='msg', out=f'feat_{hop}'))
# if hop > 1:
# g.ndata['label_emb'] = 0.5 * g.ndata['pre_label_emb'] + \
# 0.5 * g.ndata['label_emb']
res = []
for hop in range(args.K + 1):
res.append(g.ndata.pop(f'feat_{hop}'))
gc.collect()
if args.dataset == "ogbn-mag":
# For MAG dataset, only return features for target node types (i.e.
# paper nodes)
target_mask = g.ndata['target_mask']
target_ids = g.ndata[dgl.NID][target_mask]
num_target = target_mask.sum().item()
new_res = []
for x in res:
feat = torch.zeros((num_target,) + x.shape[1:],
dtype=x.dtype, device=x.device)
feat[target_ids] = x[target_mask]
new_res.append(feat)
res = new_res
# del g.ndata['pre_label_emb']
elif style in ["last", "ppnp"]:
if style == "ppnp": init_feat = feat
if use_norm:
degs = g.out_degrees().float().clamp(min=1)
norm = torch.pow(degs, -0.5)
shp = norm.shape + (1,) * (feat.dim() - 1)
norm = torch.reshape(norm, shp)
for hop in range(1, args.label_K+1):
# g.ndata["f_next"] = g.ndata["f"]
if use_norm:
feat = feat * norm
g.ndata['f'] = feat
g.update_all(fn.copy_src(src='f', out='msg'),
fn.sum(msg='msg', out='f'))
feat = g.ndata.pop('f')
# degs = g.in_degrees().float().clamp(min=1)
# norm = torch.pow(degs, -0.5)
# shp = norm.shape + (1,) * (g.ndata['f'].dim() - 1)
# norm = torch.reshape(norm, shp)
feat = feat * norm
else:
g.ndata['f'] = feat
g.update_all(fn.copy_src(src='f', out='msg'),
fn.mean(msg='msg', out='f'))
feat = g.ndata.pop('f')
if style == "ppnp":
feat = 0.5 * feat + 0.5 * init_feat
res = feat
gc.collect()
if args.dataset == "ogbn-mag":
# For MAG dataset, only return features for target node types (i.e.
# paper nodes)
target_mask = g.ndata['target_mask']
target_ids = g.ndata[dgl.NID][target_mask]
num_target = target_mask.sum().item()
new_res = torch.zeros((num_target,) + feat.shape[1:],
dtype=feat.dtype, device=feat.device)
new_res[target_ids] = res[target_mask]
res = new_res
return res
def prepare_data(device, args, probs_path, stage=0, load_embs=False, load_label_emb=False):
"""
Load dataset and compute neighbor-averaged node features used by scalable GNN model
Note that we select only one integrated representation as node feature input for mlp
"""
aggr_device = torch.device("cpu" if args.aggr_gpu < 0 else "cuda:{}".format(args.aggr_gpu))
emb_path = os.path.join("..", "embeddings", args.dataset,
args.model if (args.model != "simple_sagn") else (args.model + "_" + args.weight_style),
"smoothed_emb.pt")
data = load_dataset(args.dataset, "../../dataset", aggr_device, mag_emb=args.mag_emb)
t1 = time.time()
g, labels, n_classes, train_nid, val_nid, test_nid, evaluator = data
label_emb_path = os.path.join("..", "embeddings", args.dataset,
args.model if (args.model != "simple_sagn") else (args.model + "_" + args.weight_style),
"smoothed_label_emb.pt")
if not os.path.exists(os.path.dirname(emb_path)):
os.makedirs(os.path.dirname(emb_path))
feat_averaging_style = "all" if args.model in ["sagn", "plain_sagn", "simple_sagn", "sign"] else "ppnp"
label_averaging_style = "last"
in_feats = g.ndata['feat'].shape[1]
# n_classes = (labels.max() + 1).item() if labels.dim() == 1 else labels.size(1)
print("in_feats:", in_feats)
feat = g.ndata.pop('feat')
if args.model in ["mlp"]:
spectral_emb_path = os.path.join("..", "embeddings", args.dataset, "spectral.pt")
if os.path.exists(spectral_emb_path):
spectral_emb = torch.load(spectral_emb_path).to(aggr_device)
else:
spectral_emb = compute_spectral_emb(g.adjacency_matrix(), 128).to(aggr_device)
if not os.path.exists(os.path.dirname(spectral_emb_path)):
os.path.makedirs(os.path.dirname(spectral_emb_path))
torch.save(spectral_emb, spectral_emb_path)
else:
spectral_emb = None
if stage > 0:
teacher_probs = torch.load(probs_path).to(aggr_device)
tr_va_te_nid = torch.cat([train_nid, val_nid, test_nid], dim=0)
# assert len(teacher_probs) == len(feat)
if args.dataset in ['yelp', 'ppi', 'ppi_large']:
threshold = - args.threshold * np.log(args.threshold) - (1-args.threshold) * np.log(1-args.threshold)
entropy_distribution = entropy(teacher_probs)
print(threshold)
print(entropy_distribution.mean(1).max().item())
confident_nid_inner = torch.arange(len(teacher_probs))[(entropy_distribution.mean(1) <= threshold)]
else:
confident_nid_inner = torch.arange(len(teacher_probs))[teacher_probs.max(1)[0] > args.threshold]
extra_confident_nid_inner = confident_nid_inner[confident_nid_inner >= len(train_nid)]
confident_nid = tr_va_te_nid[confident_nid_inner]
extra_confident_nid = tr_va_te_nid[extra_confident_nid_inner]
print(f"pseudo label number: {len(confident_nid)}")
if args.dataset in ["yelp", "ppi", "ppi_large"]:
pseudo_labels = teacher_probs
pseudo_labels[pseudo_labels >= 0.5] = 1
pseudo_labels[pseudo_labels < 0.5] = 0
labels_with_pseudos = torch.ones_like(labels)
else:
pseudo_labels = torch.argmax(teacher_probs, dim=1).to(labels.device)
labels_with_pseudos = torch.zeros_like(labels)
train_nid_with_pseudos = np.union1d(train_nid, confident_nid)
print(f"enhanced train set number: {len(train_nid_with_pseudos)}")
labels_with_pseudos[train_nid] = labels[train_nid]
labels_with_pseudos[extra_confident_nid] = pseudo_labels[extra_confident_nid_inner]
# train_nid_with_pseudos = np.random.choice(train_nid_with_pseudos, size=int(0.5 * len(train_nid_with_pseudos)), replace=False)
else:
teacher_probs = None
pseudo_labels = None
labels_with_pseudos = labels.clone()
confident_nid = train_nid
train_nid_with_pseudos = train_nid
if args.use_labels & ((not args.inductive) or stage > 0):
print("using label information")
if args.dataset in ["yelp", "ppi", "ppi_large"]:
label_emb = 0.5 * torch.ones([feat.shape[0], n_classes]).to(labels.device)
# label_emb = labels_with_pseudos.mean(0).repeat([feat.shape[0], 1])
label_emb[train_nid_with_pseudos] = labels_with_pseudos.float()[train_nid_with_pseudos]
else:
label_emb = torch.zeros([feat.shape[0], n_classes]).to(labels.device)
# label_emb = (1. / n_classes) * torch.ones([feat.shape[0], n_classes]).to(device)
label_emb[train_nid_with_pseudos] = F.one_hot(labels_with_pseudos[train_nid_with_pseudos], num_classes=n_classes).float().to(labels.device)
if args.dataset == "ogbn-mag":
# rand_weight = torch.Tensor(n_classes, 128).uniform_(-0.5, 0.5)
# label_emb = torch.matmul(label_emb, rand_weight.to(device))
# pca = PCA(n_components=128)
# label_emb = torch.FloatTensor(pca.fit_transform(label_emb.cpu())).to(device)
target_mask = g.ndata["target_mask"]
target_ids = g.ndata[dgl.NID][target_mask]
num_target = target_mask.sum().item()
new_label_emb = torch.zeros((len(feat),) + label_emb.shape[1:],
dtype=label_emb.dtype, device=label_emb.device)
new_label_emb[target_mask] = label_emb[target_ids]
label_emb = new_label_emb
else:
label_emb = None
if args.inductive:
print("inductive setting detected")
if os.path.exists(os.path.join("../subgraphs",args.dataset, "subgraph_train.pt")):
print("load train subgraph")
g_train = torch.load(os.path.join("../subgraphs",args.dataset, "subgraph_train.pt")).to(g.device)
else:
print("get train subgraph")
g_train = dgl.node_subgraph(g, train_nid.to(g.device))
if not os.path.exists(os.path.join("../subgraphs",args.dataset)):
os.makedirs(os.path.join("../subgraphs",args.dataset))
torch.save(g_train, os.path.join("../subgraphs",args.dataset, "subgraph_train.pt"))
# print("get val/test subgraph")
# g_val_test = dgl.node_subgraph(g, torch.cat([val_nid, test_nid],dim=0).to(g.device))
train_mask = g_train.ndata[dgl.NID]
if load_embs and os.path.exists(emb_path):
pass
else:
feats = neighbor_average_features(g, feat, args, use_norm=args.use_norm, style=feat_averaging_style)
feats_train = neighbor_average_features(g_train, feat[g_train.ndata[dgl.NID]], args, use_norm=args.use_norm, style=feat_averaging_style)
if args.model in ["sagn", "simple_sagn", "sign"]:
for i in range(args.K+1):
feats[i][train_mask] = feats_train[i]
else:
feats[train_mask] = feats_train
if load_embs:
if not os.path.exists(emb_path):
print("saving smoothed node features to " + emb_path)
torch.save(feats, emb_path)
del feats, feat
gc.collect()
with torch.cuda.device(device):
torch.cuda.empty_cache()
if (stage == 0) and load_label_emb and os.path.exists(label_emb_path):
pass
else:
if label_emb is not None:
label_emb_train = neighbor_average_features(g_train, label_emb[g_train.ndata[dgl.NID]], args, use_norm=args.use_norm if (args.dataset != 'cora') else False, style=label_averaging_style)
else:
label_emb_train = None
# del g_train
# torch.cuda.empty_cache()
if label_emb is not None:
label_emb = neighbor_average_features(g, label_emb, args, use_norm=args.use_norm if (args.dataset != 'cora') else False, style=label_averaging_style)
label_emb[train_mask] = label_emb_train
if load_label_emb:
if not os.path.exists(label_emb_path):
print("saving initial label embeddings to " + label_emb_path)
torch.save(label_emb, label_emb_path)
del label_emb
gc.collect()
with torch.cuda.device(device):
torch.cuda.empty_cache()
else:
# for transductive setting
if (stage == 0) and load_label_emb and os.path.exists(label_emb_path):
pass
else:
if label_emb is not None:
label_emb = neighbor_average_features(g, label_emb, args, use_norm=args.use_norm if (args.dataset != 'cora') else False, style=label_averaging_style)
if load_label_emb and stage == 0:
if (not os.path.exists(label_emb_path)):
print("saving initial label embeddings to " + label_emb_path)
torch.save(label_emb, label_emb_path)
del label_emb, g
gc.collect()
with torch.cuda.device(device):
torch.cuda.empty_cache()
if load_embs and os.path.exists(emb_path):
pass
else:
feats = neighbor_average_features(g, feat, args, style=feat_averaging_style)
if load_embs:
if not os.path.exists(emb_path):
print("saving smoothed node features to " + emb_path)
torch.save(feats, emb_path)
del feats, feat
gc.collect()
with torch.cuda.device(device):
torch.cuda.empty_cache()
# if args.save_temporal_emb:
# torch.save(feats, emb_path)
if spectral_emb is not None:
# feats = torch.cat([feats, spectral_emb], dim=1)
in_feats = feats.size(1)
# save smoothed node features and initial smoothed node label embeddings,
# if "load" is set true and they have not been saved
if load_embs:
print("load saved embeddings")
feats = torch.load(emb_path)
if load_label_emb and (stage == 0):
print("load saved label embedding")
label_emb = torch.load(label_emb_path)
# label_emb = (label_emb - label_emb.mean(0)) / label_emb.std(0)
# eval_feats = neighbor_average_features(g, eval_feat, args)
labels = labels.to(device)
labels_with_pseudos = labels_with_pseudos.to(device)
# move to device
train_nid = train_nid.to(device)
train_nid_with_pseudos = torch.LongTensor(train_nid_with_pseudos).to(device)
val_nid = val_nid.to(device)
test_nid = test_nid.to(device)
t2 = time.time()
return feats, label_emb, teacher_probs, labels, labels_with_pseudos, in_feats, n_classes, \
train_nid, train_nid_with_pseudos, val_nid, test_nid, evaluator, t2 - t1
| [
"numpy.union1d",
"dataset.load_dataset",
"torch.LongTensor",
"numpy.log",
"torch.pow",
"os.path.exists",
"torch.cuda.device",
"dgl.function.copy_src",
"torch.zeros_like",
"torch.argmax",
"dgl.function.sum",
"torch.ones_like",
"os.path.dirname",
"dgl.function.mean",
"torch.nn.functional.o... | [((4853, 5007), 'os.path.join', 'os.path.join', (['""".."""', '"""embeddings"""', 'args.dataset', "(args.model if args.model != 'simple_sagn' else args.model + '_' + args.\n weight_style)", '"""smoothed_emb.pt"""'], {}), "('..', 'embeddings', args.dataset, args.model if args.model !=\n 'simple_sagn' else args.model + '_' + args.weight_style, 'smoothed_emb.pt')\n", (4865, 5007), False, 'import os\n'), ((5053, 5131), 'dataset.load_dataset', 'load_dataset', (['args.dataset', '"""../../dataset"""', 'aggr_device'], {'mag_emb': 'args.mag_emb'}), "(args.dataset, '../../dataset', aggr_device, mag_emb=args.mag_emb)\n", (5065, 5131), False, 'from dataset import load_dataset\n'), ((5141, 5152), 'time.time', 'time.time', ([], {}), '()\n', (5150, 5152), False, 'import time\n'), ((5257, 5421), 'os.path.join', 'os.path.join', (['""".."""', '"""embeddings"""', 'args.dataset', "(args.model if args.model != 'simple_sagn' else args.model + '_' + args.\n weight_style)", '"""smoothed_label_emb.pt"""'], {}), "('..', 'embeddings', args.dataset, args.model if args.model !=\n 'simple_sagn' else args.model + '_' + args.weight_style,\n 'smoothed_label_emb.pt')\n", (5269, 5421), False, 'import os\n'), ((15394, 15405), 'time.time', 'time.time', ([], {}), '()\n', (15403, 15405), False, 'import time\n'), ((1960, 1972), 'gc.collect', 'gc.collect', ([], {}), '()\n', (1970, 1972), False, 'import gc\n'), ((5943, 6004), 'os.path.join', 'os.path.join', (['""".."""', '"""embeddings"""', 'args.dataset', '"""spectral.pt"""'], {}), "('..', 'embeddings', args.dataset, 'spectral.pt')\n", (5955, 6004), False, 'import os\n'), ((6016, 6049), 'os.path.exists', 'os.path.exists', (['spectral_emb_path'], {}), '(spectral_emb_path)\n', (6030, 6049), False, 'import os\n'), ((6567, 6615), 'torch.cat', 'torch.cat', (['[train_nid, val_nid, test_nid]'], {'dim': '(0)'}), '([train_nid, val_nid, test_nid], dim=0)\n', (6576, 6615), False, 'import torch\n'), ((7963, 7999), 'numpy.union1d', 'np.union1d', (['train_nid', 'confident_nid'], {}), '(train_nid, confident_nid)\n', (7973, 7999), True, 'import numpy as np\n'), ((14802, 14822), 'torch.load', 'torch.load', (['emb_path'], {}), '(emb_path)\n', (14812, 14822), False, 'import torch\n'), ((14927, 14953), 'torch.load', 'torch.load', (['label_emb_path'], {}), '(label_emb_path)\n', (14937, 14953), False, 'import torch\n'), ((893, 914), 'torch.pow', 'torch.pow', (['degs', '(-0.5)'], {}), '(degs, -0.5)\n', (902, 914), False, 'import torch\n'), ((989, 1013), 'torch.reshape', 'torch.reshape', (['norm', 'shp'], {}), '(norm, shp)\n', (1002, 1013), False, 'import torch\n'), ((3903, 3915), 'gc.collect', 'gc.collect', ([], {}), '()\n', (3913, 3915), False, 'import gc\n'), ((5477, 5502), 'os.path.dirname', 'os.path.dirname', (['emb_path'], {}), '(emb_path)\n', (5492, 5502), False, 'import os\n'), ((5525, 5550), 'os.path.dirname', 'os.path.dirname', (['emb_path'], {}), '(emb_path)\n', (5540, 5550), False, 'import os\n'), ((6381, 6424), 'torch.save', 'torch.save', (['spectral_emb', 'spectral_emb_path'], {}), '(spectral_emb, spectral_emb_path)\n', (6391, 6424), False, 'import torch\n'), ((6872, 6894), 'utils.entropy', 'entropy', (['teacher_probs'], {}), '(teacher_probs)\n', (6879, 6894), False, 'from utils import compute_spectral_emb, entropy\n'), ((7752, 7775), 'torch.ones_like', 'torch.ones_like', (['labels'], {}), '(labels)\n', (7767, 7775), False, 'import torch\n'), ((7905, 7929), 'torch.zeros_like', 'torch.zeros_like', (['labels'], {}), '(labels)\n', (7921, 7929), False, 'import torch\n'), ((10207, 10270), 'os.path.join', 'os.path.join', (['"""../subgraphs"""', 'args.dataset', '"""subgraph_train.pt"""'], {}), "('../subgraphs', args.dataset, 'subgraph_train.pt')\n", (10219, 10270), False, 'import os\n'), ((11003, 11027), 'os.path.exists', 'os.path.exists', (['emb_path'], {}), '(emb_path)\n', (11017, 11027), False, 'import os\n'), ((11974, 12004), 'os.path.exists', 'os.path.exists', (['label_emb_path'], {}), '(label_emb_path)\n', (11988, 12004), False, 'import os\n'), ((13160, 13190), 'os.path.exists', 'os.path.exists', (['label_emb_path'], {}), '(label_emb_path)\n', (13174, 13190), False, 'import os\n'), ((13852, 13876), 'os.path.exists', 'os.path.exists', (['emb_path'], {}), '(emb_path)\n', (13866, 13876), False, 'import os\n'), ((15265, 15305), 'torch.LongTensor', 'torch.LongTensor', (['train_nid_with_pseudos'], {}), '(train_nid_with_pseudos)\n', (15281, 15305), False, 'import torch\n'), ((2372, 2444), 'torch.zeros', 'torch.zeros', (['((num_target,) + x.shape[1:])'], {'dtype': 'x.dtype', 'device': 'x.device'}), '((num_target,) + x.shape[1:], dtype=x.dtype, device=x.device)\n', (2383, 2444), False, 'import torch\n'), ((2828, 2849), 'torch.pow', 'torch.pow', (['degs', '(-0.5)'], {}), '(degs, -0.5)\n', (2837, 2849), False, 'import torch\n'), ((2924, 2948), 'torch.reshape', 'torch.reshape', (['norm', 'shp'], {}), '(norm, shp)\n', (2937, 2948), False, 'import torch\n'), ((4239, 4325), 'torch.zeros', 'torch.zeros', (['((num_target,) + feat.shape[1:])'], {'dtype': 'feat.dtype', 'device': 'feat.device'}), '((num_target,) + feat.shape[1:], dtype=feat.dtype, device=feat.\n device)\n', (4250, 4325), False, 'import torch\n'), ((6505, 6527), 'torch.load', 'torch.load', (['probs_path'], {}), '(probs_path)\n', (6515, 6527), False, 'import torch\n'), ((10725, 10788), 'os.path.join', 'os.path.join', (['"""../subgraphs"""', 'args.dataset', '"""subgraph_train.pt"""'], {}), "('../subgraphs', args.dataset, 'subgraph_train.pt')\n", (10737, 10788), False, 'import os\n'), ((11795, 11807), 'gc.collect', 'gc.collect', ([], {}), '()\n', (11805, 11807), False, 'import gc\n'), ((12944, 12956), 'gc.collect', 'gc.collect', ([], {}), '()\n', (12954, 12956), False, 'import gc\n'), ((13720, 13732), 'gc.collect', 'gc.collect', ([], {}), '()\n', (13730, 13732), False, 'import gc\n'), ((14243, 14255), 'gc.collect', 'gc.collect', ([], {}), '()\n', (14253, 14255), False, 'import gc\n'), ((1321, 1362), 'dgl.function.copy_src', 'fn.copy_src', ([], {'src': 'f"""feat_{hop}"""', 'out': '"""msg"""'}), "(src=f'feat_{hop}', out='msg')\n", (1332, 1362), True, 'import dgl.function as fn\n'), ((1392, 1428), 'dgl.function.sum', 'fn.sum', ([], {'msg': '"""msg"""', 'out': 'f"""feat_{hop}"""'}), "(msg='msg', out=f'feat_{hop}')\n", (1398, 1428), True, 'import dgl.function as fn\n'), ((1548, 1589), 'dgl.function.copy_src', 'fn.copy_src', ([], {'src': 'f"""feat_{hop}"""', 'out': '"""msg"""'}), "(src=f'feat_{hop}', out='msg')\n", (1559, 1589), True, 'import dgl.function as fn\n'), ((1619, 1656), 'dgl.function.mean', 'fn.mean', ([], {'msg': '"""msg"""', 'out': 'f"""feat_{hop}"""'}), "(msg='msg', out=f'feat_{hop}')\n", (1626, 1656), True, 'import dgl.function as fn\n'), ((6078, 6107), 'torch.load', 'torch.load', (['spectral_emb_path'], {}), '(spectral_emb_path)\n', (6088, 6107), False, 'import torch\n'), ((6263, 6297), 'os.path.dirname', 'os.path.dirname', (['spectral_emb_path'], {}), '(spectral_emb_path)\n', (6278, 6297), False, 'import os\n'), ((6333, 6367), 'os.path.dirname', 'os.path.dirname', (['spectral_emb_path'], {}), '(spectral_emb_path)\n', (6348, 6367), False, 'import os\n'), ((6766, 6788), 'numpy.log', 'np.log', (['args.threshold'], {}), '(args.threshold)\n', (6772, 6788), True, 'import numpy as np\n'), ((6812, 6838), 'numpy.log', 'np.log', (['(1 - args.threshold)'], {}), '(1 - args.threshold)\n', (6818, 6838), True, 'import numpy as np\n'), ((7818, 7852), 'torch.argmax', 'torch.argmax', (['teacher_probs'], {'dim': '(1)'}), '(teacher_probs, dim=1)\n', (7830, 7852), False, 'import torch\n'), ((9033, 9072), 'torch.zeros', 'torch.zeros', (['[feat.shape[0], n_classes]'], {}), '([feat.shape[0], n_classes])\n', (9044, 9072), False, 'import torch\n'), ((10578, 10620), 'os.path.join', 'os.path.join', (['"""../subgraphs"""', 'args.dataset'], {}), "('../subgraphs', args.dataset)\n", (10590, 10620), False, 'import os\n'), ((10650, 10692), 'os.path.join', 'os.path.join', (['"""../subgraphs"""', 'args.dataset'], {}), "('../subgraphs', args.dataset)\n", (10662, 10692), False, 'import os\n'), ((11599, 11623), 'os.path.exists', 'os.path.exists', (['emb_path'], {}), '(emb_path)\n', (11613, 11623), False, 'import os\n'), ((11719, 11746), 'torch.save', 'torch.save', (['feats', 'emb_path'], {}), '(feats, emb_path)\n', (11729, 11746), False, 'import torch\n'), ((11829, 11854), 'torch.cuda.device', 'torch.cuda.device', (['device'], {}), '(device)\n', (11846, 11854), False, 'import torch\n'), ((11876, 11900), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (11898, 11900), False, 'import torch\n'), ((12726, 12756), 'os.path.exists', 'os.path.exists', (['label_emb_path'], {}), '(label_emb_path)\n', (12740, 12756), False, 'import os\n'), ((12860, 12897), 'torch.save', 'torch.save', (['label_emb', 'label_emb_path'], {}), '(label_emb, label_emb_path)\n', (12870, 12897), False, 'import torch\n'), ((12978, 13003), 'torch.cuda.device', 'torch.cuda.device', (['device'], {}), '(device)\n', (12995, 13003), False, 'import torch\n'), ((13025, 13049), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (13047, 13049), False, 'import torch\n'), ((13498, 13528), 'os.path.exists', 'os.path.exists', (['label_emb_path'], {}), '(label_emb_path)\n', (13512, 13528), False, 'import os\n'), ((13633, 13670), 'torch.save', 'torch.save', (['label_emb', 'label_emb_path'], {}), '(label_emb, label_emb_path)\n', (13643, 13670), False, 'import torch\n'), ((13754, 13779), 'torch.cuda.device', 'torch.cuda.device', (['device'], {}), '(device)\n', (13771, 13779), False, 'import torch\n'), ((13801, 13825), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (13823, 13825), False, 'import torch\n'), ((14047, 14071), 'os.path.exists', 'os.path.exists', (['emb_path'], {}), '(emb_path)\n', (14061, 14071), False, 'import os\n'), ((14167, 14194), 'torch.save', 'torch.save', (['feats', 'emb_path'], {}), '(feats, emb_path)\n', (14177, 14194), False, 'import torch\n'), ((14277, 14302), 'torch.cuda.device', 'torch.cuda.device', (['device'], {}), '(device)\n', (14294, 14302), False, 'import torch\n'), ((14324, 14348), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (14346, 14348), False, 'import torch\n'), ((3175, 3206), 'dgl.function.copy_src', 'fn.copy_src', ([], {'src': '"""f"""', 'out': '"""msg"""'}), "(src='f', out='msg')\n", (3186, 3206), True, 'import dgl.function as fn\n'), ((3236, 3262), 'dgl.function.sum', 'fn.sum', ([], {'msg': '"""msg"""', 'out': '"""f"""'}), "(msg='msg', out='f')\n", (3242, 3262), True, 'import dgl.function as fn\n'), ((3649, 3680), 'dgl.function.copy_src', 'fn.copy_src', ([], {'src': '"""f"""', 'out': '"""msg"""'}), "(src='f', out='msg')\n", (3660, 3680), True, 'import dgl.function as fn\n'), ((3710, 3737), 'dgl.function.mean', 'fn.mean', ([], {'msg': '"""msg"""', 'out': '"""f"""'}), "(msg='msg', out='f')\n", (3717, 3737), True, 'import dgl.function as fn\n'), ((8756, 8794), 'torch.ones', 'torch.ones', (['[feat.shape[0], n_classes]'], {}), '([feat.shape[0], n_classes])\n', (8766, 8794), False, 'import torch\n'), ((10346, 10409), 'os.path.join', 'os.path.join', (['"""../subgraphs"""', 'args.dataset', '"""subgraph_train.pt"""'], {}), "('../subgraphs', args.dataset, 'subgraph_train.pt')\n", (10358, 10409), False, 'import os\n'), ((9234, 9311), 'torch.nn.functional.one_hot', 'F.one_hot', (['labels_with_pseudos[train_nid_with_pseudos]'], {'num_classes': 'n_classes'}), '(labels_with_pseudos[train_nid_with_pseudos], num_classes=n_classes)\n', (9243, 9311), True, 'import torch.nn.functional as F\n')] |
"""Get counterfactual prediction for 2000 US dollars tuition subsidy.
Get counterfactual prediction for 2000 US dollars tuition subsidy for
different parametrization of the model with hyperbolic discounting and choice
restrictions based on Keane and Wolpin (1994) :cite:`KeaneWolpin1994`.
Looking at the bivariate distribution of the time preference parameters, it
seems that many combinations of beta (present bias) and delta (discount factor)
are compatible with the empirical data.
Therefore, I study the extent to which the counteractual predictions of these
competing parametrizations differ.
"""
import numpy as np
import pandas as pd
import respy as rp
from bld.project_paths import project_paths_join as ppj
from src.library.housekeeping import _save_to_pickle
from src.library.housekeeping import _temporary_working_directory
from tqdm import tqdm
def simulate_life_cycle_data(params, options):
"""Generate simulated life-cycle data (100 DataFrame).
Args:
params (pd.DataFrame): DataFrame containing model parameters.
options (dict): Dictionary containing model options.
Returns:
List of pd.DataFrames.
"""
params_ = params.copy()
options_ = options.copy()
n_datasets = 100
sim_seeds = np.linspace(0, 99, n_datasets)
sol_seeds = np.linspace(1000, 1099, n_datasets)
col_to_keep = [
"Experience_A",
"Experience_B",
"Experience_Edu",
"Present_Bias",
"Discount_Rate",
"Choice",
"Wage",
]
# generate datasets
list_of_results = [
simulate_life_cycle_df(params_, options_, sim_seed, sol_seed, col_to_keep)
for sim_seed, sol_seed in tqdm(zip(sim_seeds, sol_seeds))
]
return list_of_results
def simulate_life_cycle_df(params, options, sim_seed, sol_seed, col_to_keep):
"""Simulate life cycle dataset, store choices and wages (mean and std).
Args:
params (pd.DataFrame): DataFrame containing model parameters.
options (dict): Dictionary containing model options.
sim_seed (int): Seed for simulation.
sim_seed (int): Seed for solution.
col_to_keep (list): Columns of the simulate data from which to compute
relevant moments (choice and wages).
Returns:
pd.DataFrame.
"""
with _temporary_working_directory(snippet=f"{sim_seed}_{sol_seed}"):
options["simulation_seed"] = int(sim_seed)
options["solution_seed"] = int(sol_seed)
simulate = rp.get_simulate_func(params, options)
df = simulate(params)
# extract choices
choices = df.groupby("Period").Choice.value_counts(normalize=True).unstack()
# extract wages (mean and std)
wages = df[col_to_keep].groupby("Period").describe().loc[:, (slice(None), ["mean", "std"])]
res = pd.concat([wages, choices], axis=1)
return res
if __name__ == "__main__":
# load params
params, options = rp.get_example_model("kw_94_three", with_data=False)
options["simulation_agents"] = 10_000
params_dict = {
"true": {"delta": 0.95, "beta": 0.8},
"miss_exp": {"delta": 0.938, "beta": 1},
"miss_1": {"delta": 0.948, "beta": 0.83},
}
for model, time_params in params_dict.items():
# no tuition subsidy
params.loc[("delta", "delta"), "value"] = time_params["delta"]
params.loc[("beta", "beta"), "value"] = time_params["beta"]
data = simulate_life_cycle_data(params, options)
_save_to_pickle(data, ppj("OUT_DATA", "counterfactual_data", f"data_{model}.pickle"))
# delete saved data to free up memory
del data
# with tuition subsidy
params_ = params.copy()
params_.loc[("nonpec_edu", "at_least_twelve_exp_edu"), "value"] += 2_000
data_subsidy = simulate_life_cycle_data(params_, options)
_save_to_pickle(
data_subsidy,
ppj("OUT_DATA", "counterfactual_data", f"data_{model}_subsidy.pickle"),
)
# delete saved data to free up memory
del data_subsidy
| [
"bld.project_paths.project_paths_join",
"respy.get_simulate_func",
"src.library.housekeeping._temporary_working_directory",
"numpy.linspace",
"respy.get_example_model",
"pandas.concat"
] | [((1260, 1290), 'numpy.linspace', 'np.linspace', (['(0)', '(99)', 'n_datasets'], {}), '(0, 99, n_datasets)\n', (1271, 1290), True, 'import numpy as np\n'), ((1307, 1342), 'numpy.linspace', 'np.linspace', (['(1000)', '(1099)', 'n_datasets'], {}), '(1000, 1099, n_datasets)\n', (1318, 1342), True, 'import numpy as np\n'), ((2966, 3018), 'respy.get_example_model', 'rp.get_example_model', (['"""kw_94_three"""'], {'with_data': '(False)'}), "('kw_94_three', with_data=False)\n", (2986, 3018), True, 'import respy as rp\n'), ((2327, 2389), 'src.library.housekeeping._temporary_working_directory', '_temporary_working_directory', ([], {'snippet': 'f"""{sim_seed}_{sol_seed}"""'}), "(snippet=f'{sim_seed}_{sol_seed}')\n", (2355, 2389), False, 'from src.library.housekeeping import _temporary_working_directory\n'), ((2510, 2547), 'respy.get_simulate_func', 'rp.get_simulate_func', (['params', 'options'], {}), '(params, options)\n', (2530, 2547), True, 'import respy as rp\n'), ((2844, 2879), 'pandas.concat', 'pd.concat', (['[wages, choices]'], {'axis': '(1)'}), '([wages, choices], axis=1)\n', (2853, 2879), True, 'import pandas as pd\n'), ((3542, 3604), 'bld.project_paths.project_paths_join', 'ppj', (['"""OUT_DATA"""', '"""counterfactual_data"""', 'f"""data_{model}.pickle"""'], {}), "('OUT_DATA', 'counterfactual_data', f'data_{model}.pickle')\n", (3545, 3604), True, 'from bld.project_paths import project_paths_join as ppj\n'), ((3945, 4015), 'bld.project_paths.project_paths_join', 'ppj', (['"""OUT_DATA"""', '"""counterfactual_data"""', 'f"""data_{model}_subsidy.pickle"""'], {}), "('OUT_DATA', 'counterfactual_data', f'data_{model}_subsidy.pickle')\n", (3948, 4015), True, 'from bld.project_paths import project_paths_join as ppj\n')] |
#Filename : plankton.py
#written by <NAME>
#on
import csv
import numpy as np
from scipy import interpolate
def reader(filename):
"""reader reads the .csv file and returns the data as list"""
with open(filename, 'r') as fl:
reader = csv.reader(fl, dialect = 'excel')
data = list(reader)
tblock = data[0]
del data[0]
l1 = len(data)
l2 = len(data[1])
for i in range(l1):
for j in range(l2):
d = data[i][j]
data[i][j] = float(d)
for k in range(len(tblock)):
lows = tblock[k]
nospace = lows.lower()
tblock[k] = nospace.replace(" ", "")
return tblock, data
def xyext(tblock, data):
"""this function identifies the data andconverts the data to
the list of [time, x, y] in each trial list"""
for i in range(len(tblock)):
if tblock[i] in ['time']:
tloc = i
elif tblock[i] in ['x']:
xloc = i
elif tblock[i] in ['y']:
yloc = i
elif tblock[i] in ['id']:
iloc = i
elif tblock[i] in ['xvelocity']:
xvloc = i
elif tblock[i] in ['yvelocity']:
yvloc = i
i = 0
iv = data[0][iloc]
coord = []
crd = []
for i in range(len(data)):
if iv == data[i][iloc]:
cod = [data[i][tloc],data[i][xloc],data[i][yloc],data[i][xvloc],data[i][yvloc],data[i][iloc]]
crd.append(cod)
else:
coord.append(crd)
crd = []
iv = data[i][iloc]
cod = [data[i][tloc],data[i][xloc],data[i][yloc],data[i][xvloc],data[i][yvloc],data[i][iloc]]
crd.append(cod)
return coord
def exporter(sortdata):
output = [['Time', 'x', 'y', 'x Velocity', 'y Velocity', 'ID', 'Turn ang.', 'Speaker ang.', 'spl']]
i = 0
for i in range(len(sortdata)):
for j in range(len(sortdata[i])):
output.append(sortdata[i][j])
np.savetxt("out.csv", output, delimiter=",", fmt='%s')
return output
def angl(p1, p2):
"""This function takes two tuples and returns the tangent angle to the y"""
if p2[1] - p1[1] > 0:
ang = np.arctan((p2[0] - p1[0]) / (p2[1] - p1[1]))
elif p2[1] - p1[1] < 0:
if p2[0] - p1[0] >= 0:
ang = np.pi + np.arctan((p2[0] - p1[0]) / (p2[1] - p1[1]))
else:
ang = - np.pi + np.arctan((p2[0] - p1[0]) / (p2[1] - p1[1]))
else:
if p2[0] - p1[0] > 0:
ang = np.pi/2
elif p2[0] - p1[0] < 0:
ang = -np.pi/2
else:
ang = 2 * np.pi
return ang
def angler(data):
"""this function returns the angles as list"""
for i in range(len(data)):
j = 0
k = 0
### a123 calculation
for j in range(len(data[i]) - 2):
# p assignment
if data[i][j][2] * data[i][j + 1][2] >= 0 and data[i][j + 1][2] * data[i][j + 2][2] >= 0:
p1 = (data[i][j][1], data[i][j][2])
p2 = (data[i][j + 1][1],data[i][j + 1][2])
p3 = (data[i][j + 2][1],data[i][j + 2][2])
elif data[i][j][2] * data[i][j + 1][2] < 0 and data[i][j + 1][2] * data[i][j + 2][2] >= 0:
p2 = (data[i][j + 1][1],data[i][j + 1][2])
p3 = (data[i][j + 2][1],data[i][j + 2][2])
if p2[1] >= 0:
p1 = (data[i][j][1], data[i][j][2] - 60)
else:
p1 = (data[i][j][1], data[i][j][2] + 60)
elif data[i][j][2] * data[i][j + 1][2] >= 0 and data[i][j + 1][2] * data[i][j + 2][2] < 0:
p1 = (data[i][j][1], data[i][j][2])
p2 = (data[i][j + 1][1],data[i][j + 1][2])
if p2[1] >= 0:
p3 = (data[i][j + 2][1],data[i][j + 2][2] - 60)
else:
p3 = (data[i][j + 2][1],data[i][j + 2][2] + 60)
#a123 calculation
a12 = angl(p1, p2)
a23 = angl(p2, p3)
if a12 != 2 * np.pi and a23 != 2 * np.pi:
angle = a23 - a12
data[i][j+1].append(angle)
a0 = a23
elif a12 == 2 * np.pi and a23 != 2 * np.pi and j >= 1:
angle = a23 - a0
data[i][j+1].append(angle)
a0 = a23
else:
data[i][j+1].append(0)
data[i][0].append('na')
data[i][-1].append('na')
### a12s calculation
for k in range(len(data[i]) - 1):
if data[i][k][2] * data[i][k + 1][2] >= 0:
p1 = (data[i][k][1], data[i][k][2])
p2 = (data[i][k + 1][1],data[i][k + 1][2])
if p2[1] > 0:
ps = (0, -20)
else:
ps = (0, -20)
a12 = angl(p1, p2)
a2s = angl(p2, ps)
if a12 != 2 * np.pi:
angle = a2s - a12
data[i][k + 1].append(angle)
sa0 = angle
else:
data[i][j+1].append(sa0)
else:
p2 = (data[i][k + 1][1],data[i][k + 1][2])
if p2[1] > 0:
p1 = (data[i][k][1], data[i][k][2] - 60)
ps = (0, -20)
else:
p1 = (data[i][k][1], data[i][k][2] + 60)
ps = (0, -20)
a12 = angl(p1, p2)
a2s = angl(p2, ps)
angle = a2s - a12
data[i][k + 1].append(angle)
sa0 = angle
data[i][0].append('na')
return data
def gillnetmaker(filename):
useless, gillnet = reader(filename)
x = []
y = []
z = []
i = 0
j = 0
for i in range(len(useless)):#check this bit with Nick
if useless[i] in ['x']:
xloc = i
elif useless[i] in ['y']:
yloc = i
elif useless[i] in ['z']:
zloc = i
for j in range(len(gillnet)):
x.append(gillnet[j][xloc])
y.append(gillnet[j][yloc])
z.append(gillnet[j][zloc])
return y, x, z
def interpol(coord, gillnet):
x, y, z = gillnetmaker(gillnet)
net = interpolate.interp2d(x, y, z, kind='cubic')
for i in range(len(coord)):
for j in range(len(coord[i])):
cx = coord[i][j][1]
cy = coord[i][j][2]
caughtfish = int(net(cx, cy))
coord[i][j].append(caughtfish)
return coord
#PROGRAM FISH
tblock, data = reader('in.csv')
coord = xyext(tblock, data)
angles = angler(coord)
fishboat = interpol(angles, 'gillnet.csv')
out = exporter(fishboat)
#END PROGRAM FISH
| [
"csv.reader",
"scipy.interpolate.interp2d",
"numpy.savetxt",
"numpy.arctan"
] | [((1998, 2052), 'numpy.savetxt', 'np.savetxt', (['"""out.csv"""', 'output'], {'delimiter': '""","""', 'fmt': '"""%s"""'}), "('out.csv', output, delimiter=',', fmt='%s')\n", (2008, 2052), True, 'import numpy as np\n'), ((6268, 6311), 'scipy.interpolate.interp2d', 'interpolate.interp2d', (['x', 'y', 'z'], {'kind': '"""cubic"""'}), "(x, y, z, kind='cubic')\n", (6288, 6311), False, 'from scipy import interpolate\n'), ((252, 283), 'csv.reader', 'csv.reader', (['fl'], {'dialect': '"""excel"""'}), "(fl, dialect='excel')\n", (262, 283), False, 'import csv\n'), ((2211, 2255), 'numpy.arctan', 'np.arctan', (['((p2[0] - p1[0]) / (p2[1] - p1[1]))'], {}), '((p2[0] - p1[0]) / (p2[1] - p1[1]))\n', (2220, 2255), True, 'import numpy as np\n'), ((2341, 2385), 'numpy.arctan', 'np.arctan', (['((p2[0] - p1[0]) / (p2[1] - p1[1]))'], {}), '((p2[0] - p1[0]) / (p2[1] - p1[1]))\n', (2350, 2385), True, 'import numpy as np\n'), ((2428, 2472), 'numpy.arctan', 'np.arctan', (['((p2[0] - p1[0]) / (p2[1] - p1[1]))'], {}), '((p2[0] - p1[0]) / (p2[1] - p1[1]))\n', (2437, 2472), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from linear_regression import Linear_regression
def Call_myLRmodel(data):
# add ones column
data.insert(0, 'Ones', 1)
# set X (training data) and y (target variable)
cols = data.shape[1]
X = data.iloc[:,0:cols-1]
y = data.iloc[:,cols-1:cols]
# convert to matrices and initialize theta
X = np.array(X.values)
y = np.array(y.values)
theta = np.zeros([1,cols-1])
#
alpha = 0.01
iters = 1000
model = Linear_regression(theta, alpha)
# perform linear regression on the data set
g, cost = model.gradientDescent(X, y, iters)
# get the cost (error) of the model
all_cost = model.computeCost(X, y, g)
'''
# show the curve of Cost
fig, ax = plt.subplots(figsize=(12,8))
ax.plot(np.arange(iters), cost, 'r')
ax.set_xlabel('Iterations')
ax.set_ylabel('Cost')
ax.set_title('Error vs. Training Epoch')
plt.show()
'''
return g, cost, all_cost
def Call_SklearnLR(data):
# Using sklearn
from sklearn import linear_model
cols = data.shape[1]
# the sklearn_parameters
X_sk = data.iloc[:,0:cols-1]
y_sk = data.iloc[:,cols-1:cols]
X_sk = np.array(X_sk.values)
y_sk = np.array(y_sk.values)
model_sk = linear_model.LinearRegression()
model_sk.fit(X_sk, y_sk)
return model_sk.coef_, model_sk.score(X_sk, y_sk)
if __name__ == "__main__":
path = 'ex1data2.txt'
data = pd.read_csv(path, header=None, names=['Size', 'Bedrooms', 'Price'])
print(data.head())
#Make sure features are on a similar scale.
#Which make the gradient descent faster
data = (data - data.mean()) / data.std()
print(data.head())
Para_1, _, score_1 = Call_myLRmodel(data)
Para_2, score_2 = Call_SklearnLR(data)
dict = [{'Parameter':Para_1[:,1:], 'Score':score_1},
{'Parameter':Para_2[:,1:], 'Score':score_2}]
df = pd.DataFrame(dict)
print(df) | [
"pandas.read_csv",
"numpy.array",
"numpy.zeros",
"pandas.DataFrame",
"linear_regression.Linear_regression",
"sklearn.linear_model.LinearRegression"
] | [((396, 414), 'numpy.array', 'np.array', (['X.values'], {}), '(X.values)\n', (404, 414), True, 'import numpy as np\n'), ((423, 441), 'numpy.array', 'np.array', (['y.values'], {}), '(y.values)\n', (431, 441), True, 'import numpy as np\n'), ((454, 477), 'numpy.zeros', 'np.zeros', (['[1, cols - 1]'], {}), '([1, cols - 1])\n', (462, 477), True, 'import numpy as np\n'), ((527, 558), 'linear_regression.Linear_regression', 'Linear_regression', (['theta', 'alpha'], {}), '(theta, alpha)\n', (544, 558), False, 'from linear_regression import Linear_regression\n'), ((1238, 1259), 'numpy.array', 'np.array', (['X_sk.values'], {}), '(X_sk.values)\n', (1246, 1259), True, 'import numpy as np\n'), ((1271, 1292), 'numpy.array', 'np.array', (['y_sk.values'], {}), '(y_sk.values)\n', (1279, 1292), True, 'import numpy as np\n'), ((1309, 1340), 'sklearn.linear_model.LinearRegression', 'linear_model.LinearRegression', ([], {}), '()\n', (1338, 1340), False, 'from sklearn import linear_model\n'), ((1490, 1557), 'pandas.read_csv', 'pd.read_csv', (['path'], {'header': 'None', 'names': "['Size', 'Bedrooms', 'Price']"}), "(path, header=None, names=['Size', 'Bedrooms', 'Price'])\n", (1501, 1557), True, 'import pandas as pd\n'), ((1959, 1977), 'pandas.DataFrame', 'pd.DataFrame', (['dict'], {}), '(dict)\n', (1971, 1977), True, 'import pandas as pd\n')] |
""" This file does the calculatoin, it saves the files and deletes a file from the do list.
This scripts loads ERA5 hourly atmospheric data and tries to close the AM budget.
This is pyhton 3
"""
import os
import sys
root_path= 'path_to_project'
sys.path.append(root_path)
import tools_AM_budget as M
from tools_AM_budget import write_log as logger
#
import imp
import glob
import xarray as xr
import time
import shutil
import datetime
import numpy as np
print('python version sys.version')
print('xr modules loaded')
print(xr.__version__)
# set varibables
date_str = '1980-01-01'
workernumber= 250
ttotstart = time.time()
# %%
load_path_plev = root_path+'/data/'
save_path = load_path_plev
plot_path = root_path
conf_path = root_path
log_path = root_path
log_name = 'worker_log_num'+str(workernumber)+'_'+date_str
key_name = 'ERA5_AM_eq_'
plot_cont = False
seg_dict = { 'africa_europa':slice(-17.9, 60), 'asia':slice(60.1, 180), 'americas':slice(-180.1, -18) }
mconfig = M.json_load( 'config', root_path)
# define chunk sizes:
time_chunk_size = 3
lat_chunk_size = 60
#flist =os.listdir(load_path)
date_str_short=date_str.replace('-', '')
D= xr.open_dataset(load_path_plev+'ERA5_subdaily_plev_.25deg_'+date_str_short+'.nc', chunks={'time':time_chunk_size, 'latitude':lat_chunk_size})#.isel(level=range(-12, 0))
D['level'] = D.level*100.0 # to get Pascal
print(D.chunks)
len(D.chunks)
Dsurface = xr.open_dataset(load_path_plev+'ERA5_subdaily_plev_.25deg_gph_'+date_str_short+'.nc',
chunks={'time':time_chunk_size, 'latitude':lat_chunk_size})#.sel(latitude=slice(0, -90))
# define varibables
Phi = D.z
data = D.drop('z')
ps = Dsurface.sp
fluxes = Dsurface['iews']
gravity_drag = Dsurface['megwss'] # These are the mean eastward gravity wave stresses,
#eventhough the ERA5 documentation claims that this should have units of Pa = N m**-2,
# they likely miss a factor of
# %%
def rho_beta(levels , ps, check=False):
"""
This function returns the Boer beta field, given the levels in G and the ps (hPa)
uses xarray
returns:
xr.DataArray in the same dimension as ps levels
"""
aa= (levels < ps)
if check is True:
print('ratio ' + str( aa.sum()/float(aa.size)) )
return aa*1 * ps#/ ps.mean()
def rho_beta_heaviside(levels , ps, check=False):
"""
This function returns the Boer beta field, given the levels in G and the ps (hPa)
uses xarray
returns:
xr.DataArray in the same dimension as ps levels
"""
aa= (levels < ps)
if check is True:
print('ratio ' + str( aa.sum()/float(aa.size)) )
return aa*1 #/ ps.mean()
def ddphi_spherical_zm (dd, ps_zm, r_e, lat, time_chunk=None ):
"""
This function calculates the gradient in meridional direction in a spherical system
It takes and returns xarray.DataArrays
inputs:
dd data xarray.DataArray with (latitude, time, level) or (latitude, time), or combinations there off
ps_zm xr.DataArray, Surfare pressure in the dimensions acoording dd, no copying to addional dimensions needed.
2nd dimension should be latitude, if more then 2 dims.
r_e earth radius used in the spherical gradient
lat np.array, latitude values in degree, same size as dd.latitude
returns:
xr.DataArray same dimensions as dd
"""
import xarray as xr
# ensure correct chunks
rechunk_dic=dict()
for k in dd.dims:
rechunk_dic[k]= dd[k].size
if time_chunk is not None:
rechunk_dic['time']= time_chunk
dd= dd.chunk(rechunk_dic)
#plt.hist(np.diff(lat_radiens))
lat_radiens =lat *np.pi/180.0
cos_phi= np.cos(lat_radiens)
if ps_zm is None:
print('no ps weight lat gradient')
ps_dummy = dd.isel(level=1)*0+1
grad_matrix = ps_dummy* r_e *cos_phi**2 * dd
else:
print('ps weight lat gradient')
rechunk_dic=dict()
for k in ps_zm.dims:
rechunk_dic[k]= uzm_vzm_rep[k].size
if time_chunk is not None:
rechunk_dic['time']= time_chunk
ps_zm=ps_zm.chunk(rechunk_dic)
grad_matrix =ps_zm* r_e *cos_phi**2 * dd
if lat.size != grad_matrix.shape[1]:
grad_matrix= grad_matrix.T
if lat.size != grad_matrix.shape[1]:
raise ValueError('the 2nd dimension it not the same size as the latitude. make sure the input arrays as the cooriantes like (time, latitude, level) or (time, latitude)')
grad_matrix_dphi = - grad_matrix.differentiate('latitude', edge_order=2)/(4.0*lat_radiens.diff('latitude').mean())
#grad_matrix_dphi_np =np.gradient(grad_matrix, lat_radiens , axis=1)
# ensure same order of diemnsions when data is returned
# only for non-xarray fileds
# trans_list=list()
# for k in list(dd.shape):
# for i in [i for i,x in enumerate(list(grad_matrix_dphi.shape)) if x == k]:
# trans_list.append(i)
#print(np.shape(r_e**2 *cos_phi**2))
#print(np.shape(ps_zm * r_e**2 *cos_phi**2))
if ps_zm is None:
factor = r_e**2 *cos_phi**2
else:
factor = ps_zm * r_e**2 *cos_phi**2
# non xarray version
#dd_return = xr.DataArray(data=np.transpose(grad_matrix_dphi, trans_list), dims=dd.dims, coords=dd.coords ) /factor
# xarray version
dd_return = grad_matrix_dphi/factor
return dd_return
def ps_weight_timemean(field, ps):
"""
This takes the surface pressure time mean of atmos_fields
input:
field xr.DataArray or xr.Dataset
ps surface pressure field with the same dimensions are field, it does not need the verical coordinates
return
same structure are field but time averaged
"""
return (field * ps).mean('time') /ps.mean('time')
def ddlambda_spherical(dd, ps, r_e, lon, lat ):
"""
This function calculates the gradient in meridional direction in a spherical system
It takes and returns xarray.DataArrays
It wraps around the longitudes, to ensure continous gradients at the boundary
inputs:
dd data xarray.DataArray with (latitude, time, level) or (latitude, time), or combinations there off
ps !!! this variable seam to be not needed !!! xr.DataArray, Surfare pressure in the dimensions acoording dd, no copying to addional dimensions needed.
2nd dimension should be latitude, if more then 2 dims.
r_e earth radius used in the spherical gradient
lon np.array, latitude values in degree, same size as dd.longitude
lat np.array, latitude values in degree, same size as dd.latitude
returns:
xr.DataArray same dimensions as dd
"""
import xarray as xr
lon_radiens =lon * np.pi/180.0
lat_radiens =lat * np.pi/180.0
cos_phi= np.cos(lat_radiens)
#dd.shape
if lon.size != dd.shape[2]:
dd= dd.T
# if lon.size != dd.shape[2]:
# raise ValueError('the 3rd dimension is not the same size as the latitude. make sure the input arrays as the cooriantes like (time, latitude, level) or (time, latitude)')
# wrap longs
if dd.name is None:
dd.name='temp_name_ddy'
dlon=lon.diff('longitude').mean()
da = dd.isel(longitude=slice(0, 2) )
da['longitude'] = np.array([dd.longitude[-1].data + dlon, dd.longitude[-1].data + 2* dlon])
de = dd.isel(longitude=[-2, -1] )
de['longitude'] = np.array([dd.longitude[0].data - 2 * dlon, dd.longitude[0].data - 1* dlon])
dd2 =xr.merge([da, dd, de]).to_array()
if ps is None:
sp_dx = dd2.differentiate('longitude', edge_order=2)/(4*lon_radiens.diff('longitude').mean())
#sp_dx = np.gradient(dd, lon_radiens, axis=dd.shape.index(dd.longitude.size))
print('np pressure weighted lon gradient')
else:
print('pressure weighted lon gradient')
sp_dx = (dd2 * ps).differentiate('longitude', edge_order=2)/(4*lon_radiens.diff('longitude').mean())
#sp_dx = np.gradient(dd * ps, lon_radiens, axis=dd.shape.index(dd.longitude.size))
#dd2_new = dd_new.differentiate('longitude', edge_order=2)/(4*lon_radiens.diff('longitude').mean())
sp_dx= sp_dx.isel( longitude= (sp_dx.longitude >= lon[0]) & (sp_dx.longitude <= lon[-1]) )
# ensure same order of diemnsions when data is returned
# this is only need if sp_dx is not a n xarray
# trans_list=list()
# for k in list(dd.shape):
# for i in [i for i,x in enumerate(list(sp_dx.shape)) if x == k]:
# trans_list.append(i)
if ps is None:
factor = xr.DataArray(r_e*np.cos(lat*np.pi/180.0), dims='latitude', coords=[dd.coords['latitude']])
else:
factor = ps*xr.DataArray(r_e*np.cos(lat*np.pi/180.0), dims='latitude', coords=[dd.coords['latitude']])
# non xarray version
#sp_dx_adjust = xr.DataArray(data=np.transpose(sp_dx, trans_list), dims=dd.dims, coords=dd.coords) / factor
# xarray version
sp_dx_adjust = sp_dx/factor
return sp_dx_adjust
def vertical_integal(dset):
g =9.81
dset_int = dset.integrate('level')/ g
for k in dset_int.keys():
if 'level' not in dset[k].coords.keys():
dset_int[k] = dset_int[k] *g
print(k)
return dset_int
def vertical_integal_Hbeta(dset, Hb):
g =9.81
dset_int = (dset* Hb).integrate('level')/ g
for k in dset_int.keys():
if 'level' not in dset[k].coords.keys():
dset_int[k] = dset[k]
print(k)
return dset_int
def plot_continent_seperation(all_dict, Gbudget):
F = M.figure_axis_xy(6, 8)
plt.suptitle('budget closure for continental seperation' , y=1.025)
plt.subplot(3,1, 1)
#all_CD_rep.keys()
key='F_gwave_zm'
plt.title('1 hour exmpl | '+ key)
plt.plot(lat, all_dict['africa_europa'][key].isel(time=1), 'r-', label='africa_europa')
plt.plot(lat, all_dict['asia'][key].isel(time=1), 'g-', label='asia')
plt.plot(lat, all_dict['americas'][key].isel(time=1), 'b-', label='americas')
plt.plot(lat, (all_dict['africa_europa'][key]+ all_dict['asia'][key]+ all_dict['americas'][key]).isel(time=1), 'k+' , label='sum')
plt.plot(lat,Gbudget[key].isel(time=1), '-k', label='budget')
plt.xlim(-90, -70)
plt.legend()
plt.subplot(3,1, 2)
key='torque_lev_zm'
plt.title('1 day mean | '+ key)
plt.plot(lat, all_dict['africa_europa'][key].mean('time'), 'r-')
plt.plot(lat, all_dict['asia'][key].mean('time'), 'g-')
plt.plot(lat, all_dict['americas'][key].mean('time'), 'b-')
plt.plot(lat, (all_dict['africa_europa'][key]+ all_dict['asia'][key]+ all_dict['americas'][key]).mean('time'), 'k-+')
plt.plot(lat,Gbudget[key].mean('time'), '-k')
plt.xlim(-78, -0)
plt.ylim(-0.7, .7)
#plt.ylim(-1*1e5, 1*1e5)
plt.subplot(3,1, 3)
key='torque_srf_zm'
plt.title('1 hour exmpl | '+ key)
plt.plot(lat, all_dict['africa_europa'][key].isel(time=1), 'r-')
plt.plot(lat, all_dict['asia'][key].isel(time=1), 'g-')
plt.plot(lat, all_dict['americas'][key].isel(time=1), 'b-')
plt.plot(lat, (all_dict['africa_europa'][key]+ all_dict['asia'][key]+ all_dict['americas'][key]).isel(time=1), 'k+')
plt.plot(lat,Gbudget[key].isel(time=1), '-k')
plt.xlim(-78, 0)
return F
# %%
hist = 'Start Processing bata'
tstart=tstart_start=time.time()
BATA = rho_beta(data.level, ps, check=False)
BATA.name='bata'
BATA_zm=BATA.mean('longitude').compute()
BATA_01 = rho_beta_heaviside(data.level, ps, check=False)
BATA_01.name='bata_01'
BATA_01_zm=BATA_01.mean('longitude').compute()
#data['bata']=bata
repres=dict()
budget=dict()
BATAD=dict()
BATAD['BATA_zm'] = BATA_zm
BATAD['BATA_zm_01'] = BATA_01_zm
tend=time.time()
hist = logger(hist, 'bata time: ' + str(tend-tstart) )
tstart=time.time()
print('Start Processing')
# 1. zonal mean terms
hist = logger(hist, '1. zonal mean terms', verbose = False)
print('1. zonal mean terms')
# %% 2. a) mountain torque
ps_zm= ps.mean('longitude').compute()
r_e = float(mconfig['constants']['radius'])
omega = float(mconfig['constants']['omega'])
lon=BATA.longitude
lat=BATA.latitude
f = 2 * omega * np.sin(lat *np.pi/180)
# 2. a) 1. surface tourque
sp_dlambda = ddlambda_spherical(Dsurface.sp, None, r_e, lon, lat ).sel(variable= 'sp').drop('variable')
gph_sp_dlambda= (Dsurface.z * sp_dlambda / 9.81).compute()
# take zonal mean, at this point treat it as a surface variable
gph_sp_dlambda_zm_rep = (gph_sp_dlambda).mean('longitude')
gph_sp_dlambda_zm_rep.name='zonal mean surface mountain torque'
gph_sp_dlambda_zm_rep.attrs['units']='N m**-2'
gph_sp_dlambda_zm_budget = gph_sp_dlambda_zm_rep * ps_zm
gph_sp_dlambda_zm_budget.attrs['units']='N**2 m**-4'
# %% 2. a) 2. gph level tourque
gph_bata_div = ddlambda_spherical(BATA_01 * Phi , None, r_e, lon, lat ).compute().sel(variable='temp_name_ddy').drop('variable')
gph_bata_div_zm_rep = (gph_bata_div * BATA).mean('longitude') / BATA_zm
gph_bata_div_zm_conventional = gph_bata_div.mean('longitude')
# apply devinition of representative mean
gph_bata_div_zm_rep = gph_bata_div_zm_rep.where(BATA_zm != 0, gph_bata_div_zm_conventional)
gph_bata_div_zm_budget = gph_bata_div_zm_rep * BATA_zm #(data * BATA).mean('longitude')
tend=time.time()
hist = logger(hist, 'defining mountain tourque part I: ' + str(tend-tstart) )
tstart=time.time()
# %% 2. b) continenal excurse
""" start of continental excurse """
# %% split out tourque terms per continent seperatly and save them
Nlon=dict()
N_tot= Nlon['N_total'] =float(D.longitude.size)
all_CD_rep=dict()
all_CD_bud=dict()
for kk in seg_dict.keys():
CD_storage_rep=dict()
CD_storage_bud=dict()
lon_seg=seg_dict[kk]
print(kk)
Dsurface_segment = Dsurface.sel(longitude=lon_seg)
#ps_zm_seg = ps.sel(longitude=lon_seg).mean('longitude').compute()
BATA_seg = BATA.sel(longitude=lon_seg)
#BATA_zm_seg = BATA_seg.mean('longitude').compute()
BATA_01_zm_seg = BATA_01.sel(longitude=lon_seg).mean('longitude').compute()
Nlon[kk] = float(Dsurface_segment.longitude.size)
N_weight = Nlon[kk]/N_tot
# a) surface tourque
#sp_dlambda_seg = ddlambda_spherical(Dsurface_segment.sp, None, r_e, lon, lat )
#gph_sp_dlambda_seg= Dsurface_segment.z * sp_dlambda_seg / 9.81
#better copy from above and select
gph_sp_dlambda_seg= gph_sp_dlambda.sel(longitude=lon_seg)
# take zonal mean, at this point treat it as a surface variable
gph_sp_dlambda_zm_rep_seg = (gph_sp_dlambda_seg).mean('longitude')
gph_sp_dlambda_zm_rep_seg.name='zonal mean surface mountain torque'
gph_sp_dlambda_zm_rep_seg.attrs['units']='N m**-2'
gph_sp_dlambda_zm_budget_seg = gph_sp_dlambda_zm_rep_seg * ps_zm
gph_sp_dlambda_zm_budget_seg.attrs['units']='N**2 m**-4'
CD_storage_rep['torque_srf_zm']= gph_sp_dlambda_zm_rep_seg * N_weight
CD_storage_bud['torque_srf_zm']= gph_sp_dlambda_zm_budget_seg * N_weight
# b) GPh level
a =gph_bata_div.sel(longitude=lon_seg)
a_zm_rep = (a * BATA_seg).mean('longitude') / BATA_zm
a_zm_conventional = a.mean('longitude')
# apply devinition of representative mean
a_zm_rep = a_zm_rep.where(BATA_zm != 0, a_zm_conventional)
a_zm_budget = a_zm_rep * BATA_zm #(data * BATA).mean('longitude')
CD_storage_rep['torque_lev_zm']= (a_zm_rep * N_weight)
CD_storage_bud['torque_lev_zm']= (a_zm_budget * N_weight)
# c) gravity wave drag
F_gravity_zm_data_seg =(Dsurface_segment['megwss']* Dsurface_segment.sp).mean('longitude')/ ps_zm
F_gravity_zm_rep_seg = xr.DataArray(data=F_gravity_zm_data_seg, name='Zonal mean zonal gravity wave stress', attrs= gravity_drag.attrs)
F_gravity_zm_rep_seg.attrs['units']='N m**-2'
F_gravity_zm_budget_seg = F_gravity_zm_rep_seg * ps_zm
F_gravity_zm_budget_seg.attrs['units']='N**2 m**-4'
CD_storage_rep['F_gwave_zm']= (F_gravity_zm_rep_seg * N_weight)
CD_storage_bud['F_gwave_zm']= (F_gravity_zm_budget_seg * N_weight)
# d) save
CD_storage_rep = xr.Dataset(CD_storage_rep)
G_CD_int =vertical_integal_Hbeta(CD_storage_rep,BATA_01_zm_seg ).compute()
G_CD_int.attrs['long_name'], G_CD_int.attrs['units'] = 'Continental Surface Drag as Representetive Mean', 'Pa'
CD_storage_bud = xr.Dataset(CD_storage_bud)
GB_CD_int =vertical_integal_Hbeta(CD_storage_bud, BATA_01_zm_seg).compute()
GB_CD_int.attrs['long_name'], GB_CD_int.attrs['units'] = 'Continental Surface Drag as Budget Mean', 'Pa**2'
save_path_local = save_path + '/drag_on_continents_zm/repres/'
M.mkdirs_r(save_path_local)
G_CD_int.to_netcdf(save_path_local + key_name + 'repres_'+ kk+ '_'+ date_str + '.nc')
save_path_local = save_path + '/drag_on_continents_zm/budget/'
M.mkdirs_r(save_path_local)
GB_CD_int.to_netcdf(save_path_local + key_name + 'budget_'+ kk+ '_'+ date_str + '.nc')
all_CD_rep[kk]=G_CD_int
#all_CD_bud[kk]=GB_CD_int
if plot_cont is False:
del all_CD_rep
del all_CD_bud
del GB_CD_int
del G_CD_int
del Dsurface_segment
del BATA_seg
del BATA_01_zm_seg
del CD_storage_rep
del CD_storage_bud
del a_zm_rep
del a_zm_budget
del a_zm_conventional
tend=time.time()
hist = logger(hist, 'continental sepeparatio time: ' + str(tend-tstart) )
tstart=time.time()
""" end of continental excurse """
# %% 2. c) finish up global mean of surface vars. compute and store them
# 2. c) 1 surface tourque
repres['torque_srf_zm']= gph_sp_dlambda_zm_rep.compute()
budget['torque_srf_zm']= gph_sp_dlambda_zm_budget.compute()
del gph_sp_dlambda_zm_rep
del gph_sp_dlambda_zm_budget
del sp_dlambda
del gph_sp_dlambda
# 2. c) 2. gph level
repres['torque_lev_zm']= gph_bata_div_zm_rep.compute()
budget['torque_lev_zm']= gph_bata_div_zm_budget.compute()
del gph_bata_div
del gph_bata_div_zm_conventional
del gph_bata_div_zm_rep
del gph_bata_div_zm_budget
tend=time.time()
hist = logger(hist, 'store and compute global surface tourque part II : ' + str(tend-tstart) )
tstart=time.time()
# %% 3 . global surface drag terms
# %% a) surface var: 1. turbulent stress
F_srf_zm_data =(fluxes* ps).mean('longitude')/ ps_zm
F_srf_zm_rep = xr.DataArray(data=F_srf_zm_data, name='Zonal mean zonal Surface Stress', attrs= fluxes.attrs)
F_srf_zm_rep.attrs['units']='N m**-2'
F_srf_zm_budget = F_srf_zm_rep * ps_zm
F_srf_zm_budget.attrs['units']='N**2 m**-4'
repres['F_tur_zm'] = F_srf_zm_rep
budget['F_tur_zm'] = F_srf_zm_budget
BATAD['ps_zm'] = ps_zm
# %% b) surface var: 2. gravity drag
F_gravity_zm_data =(gravity_drag* ps).mean('longitude')/ ps_zm
F_gravity_zm_rep = xr.DataArray(data=F_gravity_zm_data, name='Zonal mean zonal gravity wave stress', attrs= gravity_drag.attrs)
F_gravity_zm_rep.attrs['units']='N m**-2'
F_gravity_zm_budget = F_gravity_zm_rep * ps_zm
F_gravity_zm_budget.attrs['units']='N**2 m**-4'
repres['F_gwave_zm'] = F_gravity_zm_rep
budget['F_gwave_zm'] = F_gravity_zm_budget
# %% 4 . representative average
data_zm_rep = (data * BATA).mean('longitude') / BATA_zm
data_zm_conventional = data.mean('longitude')
#levmask =data.level < 0.1e5
#data_zm_conventional = data.sel(level=~levmask).mean('longitude')
#data.sel(level=levmask)*np.nan
# apply devinition of representative mean
data_zm_rep = data_zm_rep.where(BATA_zm != 0, data_zm_conventional).compute()
data_zm_budget = (data_zm_rep * BATA_zm).compute() #(data * BATA).mean('longitude')
# store in dict
repres['data_zm'] = data_zm_rep
budget['data_zm'] = data_zm_budget
tend=time.time()
hist = logger(hist, 'Surface stresses and representative means: ' + str(tend-tstart) )
tstart=time.time()
# %% eddy terms
# a) mean flow:
uzm_vzm_rep = data_zm_rep.u * data_zm_rep.v
uzm_vzm_budget = BATA_zm * uzm_vzm_rep
# b) eddies
data_p =data - data_zm_rep
# test if primes are 0 , see Boer eq. sec. 4b.
#(data_p * BATA).mean()
upvp_zm_budget = (data_p.u * data_p.v * BATA_zm).mean('longitude').compute()
upvp_zm_conventional = (data_p.u * data_p.v).mean('longitude')
# apply devinition of representative mean
upvp_zm_rep = (upvp_zm_budget / BATA_zm)
upvp_zm_rep =upvp_zm_rep.where(BATA_zm != 0 , upvp_zm_conventional).compute()
#upvp_zm_rep
# store in dict
repres['uzm_vzm'] = uzm_vzm_rep
repres['uprime_vprime_zm'] = upvp_zm_rep
budget['uzm_vzm'] = uzm_vzm_budget
budget['uprime_vprime_zm'] = upvp_zm_budget
tend=time.time()
hist = logger(hist, 'eddy terms compute: ' + str(tend-tstart) )
tstart=time.time()
# %%
# 2. zonal derivative
# define constants
repres['uzm_vzm_div'] = ddphi_spherical_zm(uzm_vzm_rep ,ps_zm, r_e, lat ).where(lat !=-90, 0).where(lat !=90, 0).compute()
repres['uprime_vprime_zm_div'] = ddphi_spherical_zm(upvp_zm_rep ,ps_zm, r_e, lat ).where(lat !=-90, 0).where(lat !=90, 0).compute()
budget['uzm_vzm_div'] = ddphi_spherical_zm(uzm_vzm_budget ,ps_zm, r_e, lat ).where(lat !=-90, 0).where(lat !=90, 0).compute()
budget['uprime_vprime_zm_div'] = ddphi_spherical_zm(upvp_zm_budget ,ps_zm, r_e, lat ).where(lat !=-90, 0).where(lat !=90, 0).compute()
tend=time.time()
hist = logger(hist, 'phi gradients compute: ' + str(tend-tstart) )
tstart=time.time()
# 3. tendency term
repres['dudt'] = data_zm_rep.u.differentiate('time', edge_order=2, datetime_unit='s').compute()
budget['dudt'] = data_zm_budget.u.differentiate('time', edge_order=2, datetime_unit='s').compute()
del data_zm_rep
del data_zm_budget
del data
tend=time.time()
hist = logger(hist, 'tendency term compute: ' + str(tend-tstart) )
tstart=time.time()
# %% 4. also process gph
Phi_zm_rep = (Phi * BATA).mean('longitude') / BATA_zm
repres['phi_zm'] = Phi_zm_rep.where(BATA_zm != 0, Phi.mean('longitude'))
repres['phi_zm'].attrs =Phi.attrs
budget['phi_zm'] = Phi_zm_rep * BATA_zm #(data * BATA).mean('longitude')
budget['phi_zm'].attrs =Phi.attrs
tend=time.time()
hist = logger(hist, 'Phi single var compute time: ' + str(tend-tstart) )
tstart=time.time()
# %% 4. merge data to xr.DataSets
print('Repack data and Cal data')
# a) representetive means
G = repres['dudt']
G.name , G.attrs['long_name'], G.attrs['units'] = 'dudt' , 'Zonal Mean Tendency', '(m s**-2)'
G =repres['dudt'].to_dataset()
G.attrs['long_name'], G.attrs['units'] = 'Terms for Representative Zonal Mean Momentum Budget', 'm s**-2 (fields var) or N m**-2 (surface var)'
key= 'uzm_vzm'
repres[key].name , repres[key].attrs['long_name'], repres[key].attrs['units'] = 'uzm_vzm' , 'Zonal Mean mean-momentum flux', '(m**2 s**-2)'
G['uzm_vzm']=repres[key]
key= 'uprime_vprime_zm'
repres[key].name , repres[key].attrs['long_name'], repres[key].attrs['units'] = 'uprime_vprime_zm' , 'Zonal Mean eddy-moemntum flux', '(m**2 s**-2)'
G['uprime_vprime_zm']=repres[key]
key= '<KEY>'
repres[key].name , repres[key].attrs['long_name'], repres[key].attrs['units'] = 'uzm_vzm_div' , 'Zonal Mean mean-flux divergence', '(m s**-2)'
G['uzm_vzm_div']=repres[key]
key= 'uprime_vprime_zm_div'
repres[key].name , repres[key].attrs['long_name'], repres[key].attrs['units'] = 'uprime_vprime_zm_div' , 'Zonal Mean eddy-flux divergence', '(m s**-2)'
G['uprime_vprime_zm_div']=repres[key]
key= 'torque_lev_zm'
repres[key].name , repres[key].attrs['long_name'], repres[key].attrs['units'] = 'torque_lev_zm' , 'Zonal Mean Geopotential Height Torque', '(m s**-2)'
G['torque_lev_zm']=repres[key].T
key= 'torque_srf_zm'
repres[key].name , repres[key].attrs['long_name'], repres[key].attrs['units'] = 'torque_srf_zm' , 'Zonal Mean Surface Torque', '(m s**-2)'
G['torque_srf_zm']=repres[key]
key= 'F_tur_zm'
repres[key].name , repres[key].attrs['long_name'] = 'F_tur_zm' , 'Zonal Mean Turbulent Surface Stress'
G['F_tur_zm']=repres[key]
key= 'F_gwave_zm'
repres[key].name , repres[key].attrs['long_name'] = 'F_gwave_zm' , 'Zonal Mean Zonal Gravity Wave Stress'
G['F_gwave_zm']=repres[key]
key= 'data_zm'
tempv =repres[key].v * f
tempv.name , tempv.attrs['long_name'], tempv.attrs['units'] = 'Zonal Mean Advection of Planetary Momentum' , 'Zonal Mean Advection of Planetary Momentum', '(m s**-2)'
G['v_f_zm']=tempv
# save also zonal mean winds and GPH
G_others = repres['data_zm'].u
G_others.name , G_others.attrs['long_name'], G_others.attrs['units'] = 'u_repres' , 'Representative Zonal Mean Zonal Wind' , '(m s**-1)'
G_others = G_others.to_dataset()
key='v_repres'
G_others[key]=repres['data_zm'].v
G_others[key].name , G_others[key].attrs['long_name'], G_others[key].attrs['units'] = key , 'Representative Zonal Mean Meridional Wind' , '(m s**-1)'
key= 'phi_repres'
G_others[key]=repres['phi_zm'].compute()
G_others[key].name , G_others[key].attrs['long_name'], G_others[key].attrs['units'] = key , 'Representative Zonal Mean Geopotential Height' , '(m**2 s**-2)'
# b) budget means
GB = budget['dudt']
GB.name , GB.attrs['long_name'], GB.attrs['units'] = 'dudt' , 'Zonal Mean Tendency', '(Pa m * s**-2)'
GB =budget['dudt'].to_dataset()
GB.attrs['long_name'], GB.attrs['units'] = 'Terms for Zonal Mean Momentum Budget', '(Pa m * s**-2)'
key= 'uzm_vzm'
budget[key].name , budget[key].attrs['long_name'], budget[key].attrs['units'] = 'uzm_vzm' , 'Zonal Mean mean-flux', '(Pa m**2 s**-2)'
GB['uzm_vzm']=budget[key]
key= 'uprime_vprime_zm'
budget[key].name , budget[key].attrs['long_name'], budget[key].attrs['units'] = 'uprime_vprime_zm' , 'Zonal Mean eddy-flux', '(Pa m**2 s**-2)'
GB['uprime_vprime_zm']=budget[key]
key= '<KEY>'
budget[key].name , budget[key].attrs['long_name'], budget[key].attrs['units'] = 'uzm_vzm_div' , 'Zonal Mean mean-flux divergence', '(Pa m s**-2)'
GB['uzm_vzm_div']=budget[key]
key= 'uprime_vprime_zm_div'
budget[key].name , budget[key].attrs['long_name'], budget[key].attrs['units'] = 'uprime_vprime_zm_div' , 'Zonal Mean Eddy-flux divergence', '(Pa m s**-2)'
GB['uprime_vprime_zm_div']=budget[key]
key= 'torque_lev_zm'
budget[key].name , budget[key].attrs['long_name'], budget[key].attrs['units'] = 'torque_lev_zm' , 'Zonal Mean Geopotential Height Torque', '(Pa m s**-2)'
GB['torque_lev_zm']=budget[key].T
key= 'torque_srf_zm'
budget[key].name , budget[key].attrs['long_name'], budget[key].attrs['units'] = 'torque_srf_zm' , 'Zonal Mean Surface Torque', '(Pa m s**-2)'
GB['torque_srf_zm']=budget[key]
key= 'F_tur_zm'
budget[key].name , budget[key].attrs['long_name'] = 'F_tur_zm' , 'Zonal Mean Turbulent Surface Stress'
GB['F_tur_zm']=budget[key]
key= 'F_gwave_zm'
budget[key].name , budget[key].attrs['long_name'] = 'F_gwave_zm' , 'Zonal Mean Zonal Gravity Wave Stress'
GB['F_gwave_zm']=budget[key]
key= 'data_zm'
tempv =budget[key].v * f
tempv.name , tempv.attrs['long_name'], tempv.attrs['units'] = 'Zonal Mean Advection of Planetary Momentum' , 'Zonal Mean Advection of Planetary Momentum', '(Pa m s**-2)'
GB['v_f_zm']=tempv
# save also zonal mean winds and GPH
key='u_budget'
G_others[key]=budget['data_zm'].u
G_others[key].name , G_others[key].attrs['long_name'], G_others[key].attrs['units'] = key , 'Budget Zonal Mean Zonal Wind' , '(Pa m s**-1)'
key='v_budget'
G_others[key]=budget['data_zm'].v
G_others[key].name , G_others[key].attrs['long_name'], G_others[key].attrs['units'] = key , 'Budget Zonal Mean Meridional Wind' , '(Pa m s**-1)'
key= 'phi_budget'
G_others[key]=budget['phi_zm'].compute()
G_others[key].name , G_others[key].attrs['long_name'], G_others[key].attrs['units'] = key , 'Budget Zonal Mean Geopotential Height' , '(Pa m**2 s**-2)'
# close original files
Dsurface.close()
D.close()
tend=time.time()
print('define and zm process time :' + str(tend- tstart))
hist = logger(hist, 'define and zm process time :'+ str(tend- tstart))
tstart=time.time()
# %% 5. Vertical integrals
print('5. Vertical integrals')
G_int =vertical_integal_Hbeta(G, BATA_01_zm) # bata not computed jet.
G_int.attrs['long_name'], G_int.attrs['units'] = 'Momentum Budget in the Representetive Mean', 'Pa'
GB_int =vertical_integal_Hbeta(GB, BATA_01_zm)
GB_int.attrs['long_name'], GB_int.attrs['units'] = 'Momentum Budget in the Budget Mean', 'Pa**2'
level_vars = list(G.keys())
flux_vars=['uprime_vprime_zm','uzm_vzm' ]
#level_vars.remove('F_tur_zm')
#level_vars.remove('F_gwave_zm')
for k in flux_vars:
level_vars.remove(k)
for k in level_vars:
G_int[k].attrs = G[k].attrs
G_int[k].attrs['units'] = 'Pa'
GB_int[k].attrs = GB[k].attrs
GB_int[k].attrs['units'] = 'Pa**2'
for k in flux_vars:
G_int[k].attrs = G[k].attrs
G_int[k].attrs['units'] = 'Pa m'
GB_int[k].attrs = GB[k].attrs
GB_int[k].attrs['units'] = 'Pa**2 m'
# %% 5.b optional plotting of the continenal seperation
if plot_cont:
F =plot_continent_seperation(all_CD_rep, G_int)
F.save(name='exmpl_repres_'+ date_str, path=plot_path+'contitent_separ/')
del all_CD_rep
del all_CD_bud
del GB_CD_int
del G_CD_int
# %% 5.b save zonal mean data
date_str
save_path_local = save_path + '/instantanious_zm/repres/'
os.makedirs(save_path_local, exist_ok = True)
G.to_netcdf(save_path_local + key_name +'repres_zm_'+ date_str + '.nc')
save_path_local = save_path + '/instantanious_zm/budget/'
os.makedirs(save_path_local, exist_ok = True)
GB.to_netcdf(save_path_local + key_name +'budget_zm_'+ date_str + '.nc')
save_path_local = save_path + '/instantanious_eulerian_zm/'
os.makedirs(save_path_local, exist_ok = True)
G_others.to_netcdf(save_path_local + key_name +'zm_others_'+ date_str + '.nc')
G_bata = xr.Dataset(BATAD)
key='BATA_zm'
G_bata[key].name , G_bata[key].attrs['long_name'], G_bata[key].attrs['units'] = 'BATA_zm' , 'Zonal mean rho_beta', 'Pa'
key='BATA_zm_01'
G_bata[key].name , G_bata[key].attrs['long_name'], G_bata[key].attrs['units'] = 'BATA_zm_01' , 'Zonal mean H_beta', 'binary'
key='ps_zm'
G_bata[key].name , G_bata[key].attrs['long_name'], G_bata[key].attrs['units'] = 'ps_zm' , 'Zonal mean surface pressure', 'Pa'
save_path_local = save_path + '/instantanious_bata/'
os.makedirs(save_path_local, exist_ok = True)
G_bata.to_netcdf(save_path_local + key_name + 'bata_'+ date_str + '.nc')
# %%
#G_int['LHS'] = G_int['dudt'] - G_int['v_f_zm'] + G_int['uprime_vprime_zm_div'] + G_int['uzm_vzm_div']
#G_int['LHS'].name , G_int['LHS'].attrs['long_name'], G_int['LHS'].attrs['units'] = 'LHS' , 'Left Hand Side of the Butget', 'Pa'
#GB_int['LHS'] = GB_int['dudt'] - GB_int['v_f_zm'] + GB_int['uprime_vprime_zm_div'] + GB_int['uzm_vzm_div']
#GB_int['LHS'].name , GB_int['LHS'].attrs['long_name'], GB_int['LHS'].attrs['units'] = 'LHS' , 'Left Hand Side of the Butget', 'Pa**2'
#G_int['torque_lev_zm'].plot()
#G_int['torque_srf_zm'].plot()
#(G_int['torque_lev_zm'] - G_int['torque_srf_zm']).plot()
G_int['LHS'] = G_int['dudt'] - G_int['v_f_zm'] + G_int['uprime_vprime_zm_div'] + G_int['uzm_vzm_div'] + G_int['torque_lev_zm'] - G_int['torque_srf_zm'] - G_int['F_gwave_zm']
G_int['LHS'].name , G_int['LHS'].attrs['long_name'], G_int['LHS'].attrs['units'] = 'LHS' , 'Left Hand Side of the Butget', 'Pa'
GB_int['LHS'] = GB_int['dudt'] - GB_int['v_f_zm'] + GB_int['uprime_vprime_zm_div'] + GB_int['uzm_vzm_div'] + GB_int['torque_lev_zm'] - GB_int['torque_srf_zm'] - G_int['F_gwave_zm']
GB_int['LHS'].name , GB_int['LHS'].attrs['long_name'], GB_int['LHS'].attrs['units'] = 'LHS' , 'Left Hand Side of the Butget', 'Pa**2'
# %% delete old variables
del BATAD, repres, budget
del G, GB
del G_others, G_bata
del tempv
del uzm_vzm_rep, upvp_zm_rep, uzm_vzm_budget , upvp_zm_budget
# %% 6. time average
print('6. time average')
G_int_tmean =ps_weight_timemean(G_int, ps_zm )
G_int_tmean.attrs['long_name'], G_int_tmean.attrs['units'] = 'Daily Mean Momentum Budget of Representetive Means', 'Pa'
G_int_tmean.coords['time'] = G_int.time[12].data.astype('M8[h]') #G_int.time.mean().data.astype('M8[D]') # set to mid-da time
G_int_tmean =G_int_tmean.expand_dims('time')
GB_int_tmean =ps_weight_timemean(GB_int, ps_zm )
GB_int_tmean.attrs['long_name'], GB_int_tmean.attrs['units'] = 'Daily Mean Momentum Budget of Budget Means', 'Pa**2'
GB_int_tmean.coords['time'] = GB_int.time[12].data.astype('M8[h]')# set to mid-da time
GB_int_tmean =GB_int_tmean.expand_dims('time')
tend=time.time()
print('vert int and budget time :' + str(tend- tstart))
tstart=time.time()
# %% 6. b) save budgets
save_path_local = save_path + '/instantanious_vert_int/repres/'
os.makedirs(save_path_local, exist_ok = True)
G_int.to_netcdf(save_path_local + key_name +'repres_int_'+ date_str + '.nc')
save_path_local = save_path + '/instantanious_vert_int/budget/'
os.makedirs(save_path_local, exist_ok = True)
GB_int.to_netcdf(save_path_local + key_name +'budget_int_'+ date_str + '.nc')
save_path_local = save_path + '/daily/repres/'
os.makedirs(save_path_local, exist_ok = True)
G_int_tmean['time']=G_int_tmean.time.data.astype('M8[D]')
G_int_tmean.to_netcdf(save_path_local + key_name +'repres_tmean_int_'+ date_str + '.nc')
save_path_local = save_path + '/daily/budget/'
os.makedirs(save_path_local, exist_ok = True)
GB_int_tmean['time']=GB_int_tmean.time.data.astype('M8[D]')
GB_int_tmean.to_netcdf(save_path_local + key_name +'budget_tmean_int_'+ date_str + '.nc')
tend=time.time()
print('save time :' + str(tend- tstart))
hist = logger(hist, 'save time :'+ str(tend- tstart))
tstart=time.time()
# simple test
# %%
import matplotlib.pyplot as plt
import os
imp.reload(M)
# %% vertical integrals
#LHS = G_int['dudt'] - G_int['v_f_zm'] + G_int['uprime_vprime_zm_div'] + G_int['uzm_vzm_div']
F = M.figure_axis_xy()
plt.suptitle('Budget Mean | Veritcal integral | weighted time mean over 1 Day', y= 1.03)
plt.subplot(2,1,1)
pdata= GB_int_tmean.isel(time=0)
xlims=(pdata['LHS'].latitude.min().data , pdata['LHS'].latitude.max().data)
plt.plot(lat, pdata['LHS'],c='k', label=' LHS')
plt.plot(lat, pdata['dudt'], label=' dudt')
plt.plot(lat, - pdata['v_f_zm'], label=' -f v')
plt.plot(lat, + pdata['uzm_vzm_div'], label=' div mean')
plt.plot(lat, + pdata['uprime_vprime_zm_div'], label=' div eddy')
plt.plot(lat, + pdata['torque_lev_zm'] - pdata['torque_srf_zm'], label='Torque Lev - Srf')
plt.plot(lat, -1*pdata['F_tur_zm'],'k--', linewidth= 2, label=' F')
plt.legend(ncol=2)
plt.ylabel(pdata.units)
plt.title('LHS')
plt.grid()
plt.xlim(xlims)
plt.subplot(2,1,2)
plt.title('1 1hour time step')
tt=10
pdata= GB_int
plt.plot(lat, pdata['LHS'].isel(time=tt),c='k', label=' LHS')
plt.plot(lat, pdata['dudt'].isel(time=tt), label=' dudt')
plt.plot(lat, - pdata['v_f_zm'].isel(time=tt), label=' -f v')
plt.plot(lat, + pdata['uzm_vzm_div'].isel(time=tt), label=' div mean')
plt.plot(lat, + pdata['uprime_vprime_zm_div'].isel(time=tt), label=' div eddy')
plt.plot(lat, + pdata['torque_lev_zm'].isel(time=tt) - pdata['torque_srf_zm'].isel(time=tt), label='Torque Lev - Srf')
plt.plot(lat, -1*pdata['F_tur_zm'].isel(time=tt),'k--', linewidth= 2, label=' F')
plt.ylabel(pdata.units)
plt.title('F')
plt.grid()
plt.xlim(xlims)
#date_str =str(pdata.time[0].data.astype('M8[D]'))
F.save(name='exmpl_budget_dmean_'+ date_str, path=plot_path)
# %%
lat_radiens =lat *np.pi/180.0
cos_phi= np.cos(lat_radiens)
F = M.figure_axis_xy(6.5, 11)
plt.suptitle('Veritcal integral', y= 1.03)
plt.subplot(5,1,1)
plt.title('Representative Mean | Vertical Integral | weighed 1 Day Time mean')
pdata= G_int_tmean.isel(time=0)
plt.plot(lat, pdata['LHS'],c='k', label=' LHS')
plt.plot(lat, pdata['dudt'], label=' dudt')
plt.plot(lat, - pdata['v_f_zm'], label=' -f v')
plt.plot(lat, + pdata['uzm_vzm_div'], label=' div mean')
plt.plot(lat, + pdata['uprime_vprime_zm_div'], label=' div eddy')
plt.plot(lat, + pdata['torque_lev_zm'] - pdata['torque_srf_zm'], label='Torque Lev - Srf')
plt.plot(lat, -1*pdata['F_tur_zm'],'k--', linewidth= 2, label=' F')
plt.ylabel(pdata.units)
plt.legend(ncol=2)#loc='upper right',
plt.xlim(xlims)
plt.grid()
plt.subplot(5,1,2)
plt.title('Grouped | Vertical Integral | weighed 1 Day Time mean')
pdata= G_int_tmean.isel(time=0)
plt.plot(lat, pdata['LHS'],c='k', label=' LHS')
plt.plot(lat, pdata['dudt'] + pdata['uprime_vprime_zm_div'] + pdata['uzm_vzm_div'], label='dudt - eddy div ')
#plt.plot(lat, + pdata['uzm_vzm_div'], label=' div mean')
#plt.plot(lat, , label=' div eddy')
plt.plot(lat, - pdata['v_f_zm'] + pdata['torque_lev_zm'] - pdata['torque_srf_zm'], label='- fv + Torque Lev - Srf')
plt.plot(lat, -1*pdata['F_tur_zm'],'k--', linewidth= 2, label=' F')
plt.ylabel(pdata.units)
plt.legend(ncol=2)#loc='upper right',
plt.xlim(xlims)
plt.grid()
plt.subplot(5,1,3)
plt.title('single 1 hour time step ')
pdata= G_int
tt=1
plt.plot(lat, pdata['LHS'].isel(time=tt),c='k', label=' LHS')
plt.plot(lat, pdata['dudt'].isel(time=tt), label=' dudt')
plt.plot(lat, - pdata['v_f_zm'].isel(time=tt), label=' -f v')
plt.plot(lat, + pdata['uzm_vzm_div'].isel(time=tt), label=' div mean')
plt.plot(lat, + pdata['uprime_vprime_zm_div'].isel(time=tt), label=' div eddy')
plt.plot(lat, -1*pdata['F_tur_zm'].isel(time=tt),'k--', linewidth= 2, label=' F')
plt.plot(lat, + pdata['torque_lev_zm'].isel(time=tt) - pdata['torque_srf_zm'].isel(time=tt), label='Torque (Lev - Srf)')
plt.ylabel(pdata.units)
plt.xlim(xlims)
plt.grid()
plt.subplot(5,1,4)
plt.title('Balance for 1 day mean, expressed as AM/r ')
# plt.plot(lat, pdata['LHS'].isel(time=tt)* cos_phi,c='k', label=' LHS')
# plt.plot(lat, - pdata['F_tur_zm'].isel(time=tt)* cos_phi ,'k--', linewidth= 2, label='F')
plt.plot(lat, pdata['LHS'].mean('time')* cos_phi,c='k', label='LHS')
plt.plot(lat, - pdata['F_tur_zm'].mean('time')* cos_phi ,'k--', linewidth= 2, label='F')
plt.plot(lat, (pdata['LHS'] + pdata['F_tur_zm'] ).mean('time')* cos_phi,'r-', linewidth= 0.8, label='residual 1d mean')
# plt.plot(lat, + torge_zm* cos_phi,'g-', linewidth= 1, label='M tourque')
# plt.plot(lat, (pdata['LHS'].isel(time=tt) + pdata['F_tur_zm'].isel(time=tt) - torge_zm )* cos_phi,'g--', linewidth= 1.3, label='residual of 1 timestep')
#plt.plot(lat, (pdata['LHS'].isel(time=tt) + pdata['F_tur_zm'].isel(time=tt) )* cos_phi,'r--', linewidth= 1.3, label='residual of 1 timestep')
plt.legend(loc='upper right', ncol=2)
plt.ylabel('$N /m^2$')
plt.xlim(xlims)
plt.grid()
plt.subplot(5,1,5)
plt.title('Eddy Fluxes')
# plt.plot(lat, pdata['LHS'].isel(time=tt)* cos_phi,c='k', label=' LHS')
# plt.plot(lat, - pdata['F_tur_zm'].isel(time=tt)* cos_phi ,'k--', linewidth= 2, label='F')
#plt.plot(lat, (pdata['LHS'].isel(time=tt) + pdata['F_tur_zm'].isel(time=tt) )* cos_phi,'r', linewidth= 1.3, label='residual')
plt.plot(lat, pdata['uzm_vzm'].mean('time')* cos_phi,c='g', label='Northward Fluxes by mean')
plt.plot(lat, pdata['uprime_vprime_zm'].mean('time')* cos_phi ,'b', linewidth= 2, label='Northward Eddy-Flux')
plt.legend(loc='upper right', ncol=2)
plt.ylabel(pdata['uprime_vprime_zm'].units)
plt.xlim(xlims)
plt.grid()
#date_str =str(pdata.time[0].data.astype('M8[D]'))
F.save(name='exmpl_repres_dmean_ps_iews_'+ date_str, path=plot_path)
print(' all done and saved')
tend=time.time()
print('plot time :' + str(tend- tstart))
hist = logger(hist, 'plot time :' + str(tend- tstart))
hist = logger(hist, 'total time :' + str(tend-tstart_start) )
M.save_log_txt(log_name ,log_path, hist , verbose=True )
#shutil.move(conf_path+'../active/'+date_str+'.json' , conf_path+'../processed/'+date_str+'.json', )
data_conf= dict()
data_conf['processed_tstamp']= datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
data_conf['processed_time']= 'total time :' + str(tend-tstart_start)
try:
del data_conf['conf_surface']
del data_conf['conf_level']
except:
pass
M.json_save(name= date_str, path=conf_path+'/data/' , data=data_conf)
#exit()
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"tools_AM_budget.save_log_txt",
"tools_AM_budget.json_load",
"numpy.array",
"tools_AM_budget.write_log",
"numpy.sin",
"sys.path.append",
"xarray.merge",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"tools_AM_budget.json_save",
"too... | [((249, 275), 'sys.path.append', 'sys.path.append', (['root_path'], {}), '(root_path)\n', (264, 275), False, 'import sys\n'), ((627, 638), 'time.time', 'time.time', ([], {}), '()\n', (636, 638), False, 'import time\n'), ((1018, 1050), 'tools_AM_budget.json_load', 'M.json_load', (['"""config"""', 'root_path'], {}), "('config', root_path)\n", (1029, 1050), True, 'import tools_AM_budget as M\n'), ((1191, 1348), 'xarray.open_dataset', 'xr.open_dataset', (["(load_path_plev + 'ERA5_subdaily_plev_.25deg_' + date_str_short + '.nc')"], {'chunks': "{'time': time_chunk_size, 'latitude': lat_chunk_size}"}), "(load_path_plev + 'ERA5_subdaily_plev_.25deg_' +\n date_str_short + '.nc', chunks={'time': time_chunk_size, 'latitude':\n lat_chunk_size})\n", (1206, 1348), True, 'import xarray as xr\n'), ((1449, 1610), 'xarray.open_dataset', 'xr.open_dataset', (["(load_path_plev + 'ERA5_subdaily_plev_.25deg_gph_' + date_str_short + '.nc')"], {'chunks': "{'time': time_chunk_size, 'latitude': lat_chunk_size}"}), "(load_path_plev + 'ERA5_subdaily_plev_.25deg_gph_' +\n date_str_short + '.nc', chunks={'time': time_chunk_size, 'latitude':\n lat_chunk_size})\n", (1464, 1610), True, 'import xarray as xr\n'), ((11362, 11373), 'time.time', 'time.time', ([], {}), '()\n', (11371, 11373), False, 'import time\n'), ((11744, 11755), 'time.time', 'time.time', ([], {}), '()\n', (11753, 11755), False, 'import time\n'), ((11819, 11830), 'time.time', 'time.time', ([], {}), '()\n', (11828, 11830), False, 'import time\n'), ((11886, 11936), 'tools_AM_budget.write_log', 'logger', (['hist', '"""1. zonal mean terms"""'], {'verbose': '(False)'}), "(hist, '1. zonal mean terms', verbose=False)\n", (11892, 11936), True, 'from tools_AM_budget import write_log as logger\n'), ((13298, 13309), 'time.time', 'time.time', ([], {}), '()\n', (13307, 13309), False, 'import time\n'), ((13396, 13407), 'time.time', 'time.time', ([], {}), '()\n', (13405, 13407), False, 'import time\n'), ((17283, 17294), 'time.time', 'time.time', ([], {}), '()\n', (17292, 17294), False, 'import time\n'), ((17377, 17388), 'time.time', 'time.time', ([], {}), '()\n', (17386, 17388), False, 'import time\n'), ((17978, 17989), 'time.time', 'time.time', ([], {}), '()\n', (17987, 17989), False, 'import time\n'), ((18093, 18104), 'time.time', 'time.time', ([], {}), '()\n', (18102, 18104), False, 'import time\n'), ((18251, 18347), 'xarray.DataArray', 'xr.DataArray', ([], {'data': 'F_srf_zm_data', 'name': '"""Zonal mean zonal Surface Stress"""', 'attrs': 'fluxes.attrs'}), "(data=F_srf_zm_data, name='Zonal mean zonal Surface Stress',\n attrs=fluxes.attrs)\n", (18263, 18347), True, 'import xarray as xr\n'), ((18689, 18801), 'xarray.DataArray', 'xr.DataArray', ([], {'data': 'F_gravity_zm_data', 'name': '"""Zonal mean zonal gravity wave stress"""', 'attrs': 'gravity_drag.attrs'}), "(data=F_gravity_zm_data, name=\n 'Zonal mean zonal gravity wave stress', attrs=gravity_drag.attrs)\n", (18701, 18801), True, 'import xarray as xr\n'), ((19595, 19606), 'time.time', 'time.time', ([], {}), '()\n', (19604, 19606), False, 'import time\n'), ((19702, 19713), 'time.time', 'time.time', ([], {}), '()\n', (19711, 19713), False, 'import time\n'), ((20465, 20476), 'time.time', 'time.time', ([], {}), '()\n', (20474, 20476), False, 'import time\n'), ((20549, 20560), 'time.time', 'time.time', ([], {}), '()\n', (20558, 20560), False, 'import time\n'), ((21160, 21171), 'time.time', 'time.time', ([], {}), '()\n', (21169, 21171), False, 'import time\n'), ((21247, 21258), 'time.time', 'time.time', ([], {}), '()\n', (21256, 21258), False, 'import time\n'), ((21528, 21539), 'time.time', 'time.time', ([], {}), '()\n', (21537, 21539), False, 'import time\n'), ((21615, 21626), 'time.time', 'time.time', ([], {}), '()\n', (21624, 21626), False, 'import time\n'), ((21939, 21950), 'time.time', 'time.time', ([], {}), '()\n', (21948, 21950), False, 'import time\n'), ((22032, 22043), 'time.time', 'time.time', ([], {}), '()\n', (22041, 22043), False, 'import time\n'), ((27498, 27509), 'time.time', 'time.time', ([], {}), '()\n', (27507, 27509), False, 'import time\n'), ((27646, 27657), 'time.time', 'time.time', ([], {}), '()\n', (27655, 27657), False, 'import time\n'), ((28921, 28964), 'os.makedirs', 'os.makedirs', (['save_path_local'], {'exist_ok': '(True)'}), '(save_path_local, exist_ok=True)\n', (28932, 28964), False, 'import os\n'), ((29099, 29142), 'os.makedirs', 'os.makedirs', (['save_path_local'], {'exist_ok': '(True)'}), '(save_path_local, exist_ok=True)\n', (29110, 29142), False, 'import os\n'), ((29280, 29323), 'os.makedirs', 'os.makedirs', (['save_path_local'], {'exist_ok': '(True)'}), '(save_path_local, exist_ok=True)\n', (29291, 29323), False, 'import os\n'), ((29417, 29434), 'xarray.Dataset', 'xr.Dataset', (['BATAD'], {}), '(BATAD)\n', (29427, 29434), True, 'import xarray as xr\n'), ((29905, 29948), 'os.makedirs', 'os.makedirs', (['save_path_local'], {'exist_ok': '(True)'}), '(save_path_local, exist_ok=True)\n', (29916, 29948), False, 'import os\n'), ((32096, 32107), 'time.time', 'time.time', ([], {}), '()\n', (32105, 32107), False, 'import time\n'), ((32171, 32182), 'time.time', 'time.time', ([], {}), '()\n', (32180, 32182), False, 'import time\n'), ((32273, 32316), 'os.makedirs', 'os.makedirs', (['save_path_local'], {'exist_ok': '(True)'}), '(save_path_local, exist_ok=True)\n', (32284, 32316), False, 'import os\n'), ((32462, 32505), 'os.makedirs', 'os.makedirs', (['save_path_local'], {'exist_ok': '(True)'}), '(save_path_local, exist_ok=True)\n', (32473, 32505), False, 'import os\n'), ((32635, 32678), 'os.makedirs', 'os.makedirs', (['save_path_local'], {'exist_ok': '(True)'}), '(save_path_local, exist_ok=True)\n', (32646, 32678), False, 'import os\n'), ((32877, 32920), 'os.makedirs', 'os.makedirs', (['save_path_local'], {'exist_ok': '(True)'}), '(save_path_local, exist_ok=True)\n', (32888, 32920), False, 'import os\n'), ((33081, 33092), 'time.time', 'time.time', ([], {}), '()\n', (33090, 33092), False, 'import time\n'), ((33195, 33206), 'time.time', 'time.time', ([], {}), '()\n', (33204, 33206), False, 'import time\n'), ((33270, 33283), 'imp.reload', 'imp.reload', (['M'], {}), '(M)\n', (33280, 33283), False, 'import imp\n'), ((33407, 33425), 'tools_AM_budget.figure_axis_xy', 'M.figure_axis_xy', ([], {}), '()\n', (33423, 33425), True, 'import tools_AM_budget as M\n'), ((33426, 33517), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Budget Mean | Veritcal integral | weighted time mean over 1 Day"""'], {'y': '(1.03)'}), "('Budget Mean | Veritcal integral | weighted time mean over 1 Day',\n y=1.03)\n", (33438, 33517), True, 'import matplotlib.pyplot as plt\n'), ((33515, 33535), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (33526, 33535), True, 'import matplotlib.pyplot as plt\n'), ((33644, 33692), 'matplotlib.pyplot.plot', 'plt.plot', (['lat', "pdata['LHS']"], {'c': '"""k"""', 'label': '""" LHS"""'}), "(lat, pdata['LHS'], c='k', label=' LHS')\n", (33652, 33692), True, 'import matplotlib.pyplot as plt\n'), ((33694, 33737), 'matplotlib.pyplot.plot', 'plt.plot', (['lat', "pdata['dudt']"], {'label': '""" dudt"""'}), "(lat, pdata['dudt'], label=' dudt')\n", (33702, 33737), True, 'import matplotlib.pyplot as plt\n'), ((33738, 33784), 'matplotlib.pyplot.plot', 'plt.plot', (['lat', "(-pdata['v_f_zm'])"], {'label': '""" -f v"""'}), "(lat, -pdata['v_f_zm'], label=' -f v')\n", (33746, 33784), True, 'import matplotlib.pyplot as plt\n'), ((33786, 33841), 'matplotlib.pyplot.plot', 'plt.plot', (['lat', "(+pdata['uzm_vzm_div'])"], {'label': '""" div mean"""'}), "(lat, +pdata['uzm_vzm_div'], label=' div mean')\n", (33794, 33841), True, 'import matplotlib.pyplot as plt\n'), ((33843, 33907), 'matplotlib.pyplot.plot', 'plt.plot', (['lat', "(+pdata['uprime_vprime_zm_div'])"], {'label': '""" div eddy"""'}), "(lat, +pdata['uprime_vprime_zm_div'], label=' div eddy')\n", (33851, 33907), True, 'import matplotlib.pyplot as plt\n'), ((33910, 34004), 'matplotlib.pyplot.plot', 'plt.plot', (['lat', "(+pdata['torque_lev_zm'] - pdata['torque_srf_zm'])"], {'label': '"""Torque Lev - Srf"""'}), "(lat, +pdata['torque_lev_zm'] - pdata['torque_srf_zm'], label=\n 'Torque Lev - Srf')\n", (33918, 34004), True, 'import matplotlib.pyplot as plt\n'), ((34002, 34071), 'matplotlib.pyplot.plot', 'plt.plot', (['lat', "(-1 * pdata['F_tur_zm'])", '"""k--"""'], {'linewidth': '(2)', 'label': '""" F"""'}), "(lat, -1 * pdata['F_tur_zm'], 'k--', linewidth=2, label=' F')\n", (34010, 34071), True, 'import matplotlib.pyplot as plt\n'), ((34072, 34090), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'ncol': '(2)'}), '(ncol=2)\n', (34082, 34090), True, 'import matplotlib.pyplot as plt\n'), ((34091, 34114), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['pdata.units'], {}), '(pdata.units)\n', (34101, 34114), True, 'import matplotlib.pyplot as plt\n'), ((34116, 34132), 'matplotlib.pyplot.title', 'plt.title', (['"""LHS"""'], {}), "('LHS')\n", (34125, 34132), True, 'import matplotlib.pyplot as plt\n'), ((34133, 34143), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (34141, 34143), True, 'import matplotlib.pyplot as plt\n'), ((34144, 34159), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlims'], {}), '(xlims)\n', (34152, 34159), True, 'import matplotlib.pyplot as plt\n'), ((34161, 34181), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (34172, 34181), True, 'import matplotlib.pyplot as plt\n'), ((34180, 34210), 'matplotlib.pyplot.title', 'plt.title', (['"""1 1hour time step"""'], {}), "('1 1hour time step')\n", (34189, 34210), True, 'import matplotlib.pyplot as plt\n'), ((34771, 34794), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['pdata.units'], {}), '(pdata.units)\n', (34781, 34794), True, 'import matplotlib.pyplot as plt\n'), ((34796, 34810), 'matplotlib.pyplot.title', 'plt.title', (['"""F"""'], {}), "('F')\n", (34805, 34810), True, 'import matplotlib.pyplot as plt\n'), ((34811, 34821), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (34819, 34821), True, 'import matplotlib.pyplot as plt\n'), ((34822, 34837), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlims'], {}), '(xlims)\n', (34830, 34837), True, 'import matplotlib.pyplot as plt\n'), ((34996, 35015), 'numpy.cos', 'np.cos', (['lat_radiens'], {}), '(lat_radiens)\n', (35002, 35015), True, 'import numpy as np\n'), ((35021, 35046), 'tools_AM_budget.figure_axis_xy', 'M.figure_axis_xy', (['(6.5)', '(11)'], {}), '(6.5, 11)\n', (35037, 35046), True, 'import tools_AM_budget as M\n'), ((35047, 35088), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Veritcal integral"""'], {'y': '(1.03)'}), "('Veritcal integral', y=1.03)\n", (35059, 35088), True, 'import matplotlib.pyplot as plt\n'), ((35090, 35110), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(5)', '(1)', '(1)'], {}), '(5, 1, 1)\n', (35101, 35110), True, 'import matplotlib.pyplot as plt\n'), ((35110, 35188), 'matplotlib.pyplot.title', 'plt.title', (['"""Representative Mean | Vertical Integral | weighed 1 Day Time mean"""'], {}), "('Representative Mean | Vertical Integral | weighed 1 Day Time mean')\n", (35119, 35188), True, 'import matplotlib.pyplot as plt\n'), ((35222, 35270), 'matplotlib.pyplot.plot', 'plt.plot', (['lat', "pdata['LHS']"], {'c': '"""k"""', 'label': '""" LHS"""'}), "(lat, pdata['LHS'], c='k', label=' LHS')\n", (35230, 35270), True, 'import matplotlib.pyplot as plt\n'), ((35272, 35315), 'matplotlib.pyplot.plot', 'plt.plot', (['lat', "pdata['dudt']"], {'label': '""" dudt"""'}), "(lat, pdata['dudt'], label=' dudt')\n", (35280, 35315), True, 'import matplotlib.pyplot as plt\n'), ((35316, 35362), 'matplotlib.pyplot.plot', 'plt.plot', (['lat', "(-pdata['v_f_zm'])"], {'label': '""" -f v"""'}), "(lat, -pdata['v_f_zm'], label=' -f v')\n", (35324, 35362), True, 'import matplotlib.pyplot as plt\n'), ((35364, 35419), 'matplotlib.pyplot.plot', 'plt.plot', (['lat', "(+pdata['uzm_vzm_div'])"], {'label': '""" div mean"""'}), "(lat, +pdata['uzm_vzm_div'], label=' div mean')\n", (35372, 35419), True, 'import matplotlib.pyplot as plt\n'), ((35421, 35485), 'matplotlib.pyplot.plot', 'plt.plot', (['lat', "(+pdata['uprime_vprime_zm_div'])"], {'label': '""" div eddy"""'}), "(lat, +pdata['uprime_vprime_zm_div'], label=' div eddy')\n", (35429, 35485), True, 'import matplotlib.pyplot as plt\n'), ((35488, 35582), 'matplotlib.pyplot.plot', 'plt.plot', (['lat', "(+pdata['torque_lev_zm'] - pdata['torque_srf_zm'])"], {'label': '"""Torque Lev - Srf"""'}), "(lat, +pdata['torque_lev_zm'] - pdata['torque_srf_zm'], label=\n 'Torque Lev - Srf')\n", (35496, 35582), True, 'import matplotlib.pyplot as plt\n'), ((35580, 35649), 'matplotlib.pyplot.plot', 'plt.plot', (['lat', "(-1 * pdata['F_tur_zm'])", '"""k--"""'], {'linewidth': '(2)', 'label': '""" F"""'}), "(lat, -1 * pdata['F_tur_zm'], 'k--', linewidth=2, label=' F')\n", (35588, 35649), True, 'import matplotlib.pyplot as plt\n'), ((35651, 35674), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['pdata.units'], {}), '(pdata.units)\n', (35661, 35674), True, 'import matplotlib.pyplot as plt\n'), ((35675, 35693), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'ncol': '(2)'}), '(ncol=2)\n', (35685, 35693), True, 'import matplotlib.pyplot as plt\n'), ((35714, 35729), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlims'], {}), '(xlims)\n', (35722, 35729), True, 'import matplotlib.pyplot as plt\n'), ((35730, 35740), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (35738, 35740), True, 'import matplotlib.pyplot as plt\n'), ((35743, 35763), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(5)', '(1)', '(2)'], {}), '(5, 1, 2)\n', (35754, 35763), True, 'import matplotlib.pyplot as plt\n'), ((35763, 35829), 'matplotlib.pyplot.title', 'plt.title', (['"""Grouped | Vertical Integral | weighed 1 Day Time mean"""'], {}), "('Grouped | Vertical Integral | weighed 1 Day Time mean')\n", (35772, 35829), True, 'import matplotlib.pyplot as plt\n'), ((35863, 35911), 'matplotlib.pyplot.plot', 'plt.plot', (['lat', "pdata['LHS']"], {'c': '"""k"""', 'label': '""" LHS"""'}), "(lat, pdata['LHS'], c='k', label=' LHS')\n", (35871, 35911), True, 'import matplotlib.pyplot as plt\n'), ((35913, 36027), 'matplotlib.pyplot.plot', 'plt.plot', (['lat', "(pdata['dudt'] + pdata['uprime_vprime_zm_div'] + pdata['uzm_vzm_div'])"], {'label': '"""dudt - eddy div """'}), "(lat, pdata['dudt'] + pdata['uprime_vprime_zm_div'] + pdata[\n 'uzm_vzm_div'], label='dudt - eddy div ')\n", (35921, 36027), True, 'import matplotlib.pyplot as plt\n'), ((36118, 36237), 'matplotlib.pyplot.plot', 'plt.plot', (['lat', "(-pdata['v_f_zm'] + pdata['torque_lev_zm'] - pdata['torque_srf_zm'])"], {'label': '"""- fv + Torque Lev - Srf"""'}), "(lat, -pdata['v_f_zm'] + pdata['torque_lev_zm'] - pdata[\n 'torque_srf_zm'], label='- fv + Torque Lev - Srf')\n", (36126, 36237), True, 'import matplotlib.pyplot as plt\n'), ((36235, 36304), 'matplotlib.pyplot.plot', 'plt.plot', (['lat', "(-1 * pdata['F_tur_zm'])", '"""k--"""'], {'linewidth': '(2)', 'label': '""" F"""'}), "(lat, -1 * pdata['F_tur_zm'], 'k--', linewidth=2, label=' F')\n", (36243, 36304), True, 'import matplotlib.pyplot as plt\n'), ((36306, 36329), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['pdata.units'], {}), '(pdata.units)\n', (36316, 36329), True, 'import matplotlib.pyplot as plt\n'), ((36330, 36348), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'ncol': '(2)'}), '(ncol=2)\n', (36340, 36348), True, 'import matplotlib.pyplot as plt\n'), ((36369, 36384), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlims'], {}), '(xlims)\n', (36377, 36384), True, 'import matplotlib.pyplot as plt\n'), ((36385, 36395), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (36393, 36395), True, 'import matplotlib.pyplot as plt\n'), ((36398, 36418), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(5)', '(1)', '(3)'], {}), '(5, 1, 3)\n', (36409, 36418), True, 'import matplotlib.pyplot as plt\n'), ((36418, 36455), 'matplotlib.pyplot.title', 'plt.title', (['"""single 1 hour time step """'], {}), "('single 1 hour time step ')\n", (36427, 36455), True, 'import matplotlib.pyplot as plt\n'), ((37018, 37041), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['pdata.units'], {}), '(pdata.units)\n', (37028, 37041), True, 'import matplotlib.pyplot as plt\n'), ((37042, 37057), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlims'], {}), '(xlims)\n', (37050, 37057), True, 'import matplotlib.pyplot as plt\n'), ((37058, 37068), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (37066, 37068), True, 'import matplotlib.pyplot as plt\n'), ((37070, 37090), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(5)', '(1)', '(4)'], {}), '(5, 1, 4)\n', (37081, 37090), True, 'import matplotlib.pyplot as plt\n'), ((37090, 37145), 'matplotlib.pyplot.title', 'plt.title', (['"""Balance for 1 day mean, expressed as AM/r """'], {}), "('Balance for 1 day mean, expressed as AM/r ')\n", (37099, 37145), True, 'import matplotlib.pyplot as plt\n'), ((37979, 38016), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""', 'ncol': '(2)'}), "(loc='upper right', ncol=2)\n", (37989, 38016), True, 'import matplotlib.pyplot as plt\n'), ((38017, 38039), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$N /m^2$"""'], {}), "('$N /m^2$')\n", (38027, 38039), True, 'import matplotlib.pyplot as plt\n'), ((38040, 38055), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlims'], {}), '(xlims)\n', (38048, 38055), True, 'import matplotlib.pyplot as plt\n'), ((38056, 38066), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (38064, 38066), True, 'import matplotlib.pyplot as plt\n'), ((38068, 38088), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(5)', '(1)', '(5)'], {}), '(5, 1, 5)\n', (38079, 38088), True, 'import matplotlib.pyplot as plt\n'), ((38088, 38112), 'matplotlib.pyplot.title', 'plt.title', (['"""Eddy Fluxes"""'], {}), "('Eddy Fluxes')\n", (38097, 38112), True, 'import matplotlib.pyplot as plt\n'), ((38620, 38657), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""', 'ncol': '(2)'}), "(loc='upper right', ncol=2)\n", (38630, 38657), True, 'import matplotlib.pyplot as plt\n'), ((38658, 38701), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["pdata['uprime_vprime_zm'].units"], {}), "(pdata['uprime_vprime_zm'].units)\n", (38668, 38701), True, 'import matplotlib.pyplot as plt\n'), ((38703, 38718), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlims'], {}), '(xlims)\n', (38711, 38718), True, 'import matplotlib.pyplot as plt\n'), ((38719, 38729), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (38727, 38729), True, 'import matplotlib.pyplot as plt\n'), ((38886, 38897), 'time.time', 'time.time', ([], {}), '()\n', (38895, 38897), False, 'import time\n'), ((39057, 39111), 'tools_AM_budget.save_log_txt', 'M.save_log_txt', (['log_name', 'log_path', 'hist'], {'verbose': '(True)'}), '(log_name, log_path, hist, verbose=True)\n', (39071, 39111), True, 'import tools_AM_budget as M\n'), ((39478, 39547), 'tools_AM_budget.json_save', 'M.json_save', ([], {'name': 'date_str', 'path': "(conf_path + '/data/')", 'data': 'data_conf'}), "(name=date_str, path=conf_path + '/data/', data=data_conf)\n", (39489, 39547), True, 'import tools_AM_budget as M\n'), ((3723, 3742), 'numpy.cos', 'np.cos', (['lat_radiens'], {}), '(lat_radiens)\n', (3729, 3742), True, 'import numpy as np\n'), ((6843, 6862), 'numpy.cos', 'np.cos', (['lat_radiens'], {}), '(lat_radiens)\n', (6849, 6862), True, 'import numpy as np\n'), ((7319, 7393), 'numpy.array', 'np.array', (['[dd.longitude[-1].data + dlon, dd.longitude[-1].data + 2 * dlon]'], {}), '([dd.longitude[-1].data + dlon, dd.longitude[-1].data + 2 * dlon])\n', (7327, 7393), True, 'import numpy as np\n'), ((7454, 7530), 'numpy.array', 'np.array', (['[dd.longitude[0].data - 2 * dlon, dd.longitude[0].data - 1 * dlon]'], {}), '([dd.longitude[0].data - 2 * dlon, dd.longitude[0].data - 1 * dlon])\n', (7462, 7530), True, 'import numpy as np\n'), ((9594, 9616), 'tools_AM_budget.figure_axis_xy', 'M.figure_axis_xy', (['(6)', '(8)'], {}), '(6, 8)\n', (9610, 9616), True, 'import tools_AM_budget as M\n'), ((9621, 9687), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""budget closure for continental seperation"""'], {'y': '(1.025)'}), "('budget closure for continental seperation', y=1.025)\n", (9633, 9687), True, 'import matplotlib.pyplot as plt\n'), ((9693, 9713), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (9704, 9713), True, 'import matplotlib.pyplot as plt\n'), ((9761, 9795), 'matplotlib.pyplot.title', 'plt.title', (["('1 hour exmpl | ' + key)"], {}), "('1 hour exmpl | ' + key)\n", (9770, 9795), True, 'import matplotlib.pyplot as plt\n'), ((10250, 10268), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-90)', '(-70)'], {}), '(-90, -70)\n', (10258, 10268), True, 'import matplotlib.pyplot as plt\n'), ((10274, 10286), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (10284, 10286), True, 'import matplotlib.pyplot as plt\n'), ((10291, 10311), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(2)'], {}), '(3, 1, 2)\n', (10302, 10311), True, 'import matplotlib.pyplot as plt\n'), ((10341, 10373), 'matplotlib.pyplot.title', 'plt.title', (["('1 day mean | ' + key)"], {}), "('1 day mean | ' + key)\n", (10350, 10373), True, 'import matplotlib.pyplot as plt\n'), ((10744, 10761), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-78)', '(-0)'], {}), '(-78, -0)\n', (10752, 10761), True, 'import matplotlib.pyplot as plt\n'), ((10766, 10785), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-0.7)', '(0.7)'], {}), '(-0.7, 0.7)\n', (10774, 10785), True, 'import matplotlib.pyplot as plt\n'), ((10820, 10840), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (10831, 10840), True, 'import matplotlib.pyplot as plt\n'), ((10868, 10902), 'matplotlib.pyplot.title', 'plt.title', (["('1 hour exmpl | ' + key)"], {}), "('1 hour exmpl | ' + key)\n", (10877, 10902), True, 'import matplotlib.pyplot as plt\n'), ((11273, 11289), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-78)', '(0)'], {}), '(-78, 0)\n', (11281, 11289), True, 'import matplotlib.pyplot as plt\n'), ((12177, 12202), 'numpy.sin', 'np.sin', (['(lat * np.pi / 180)'], {}), '(lat * np.pi / 180)\n', (12183, 12202), True, 'import numpy as np\n'), ((15677, 15793), 'xarray.DataArray', 'xr.DataArray', ([], {'data': 'F_gravity_zm_data_seg', 'name': '"""Zonal mean zonal gravity wave stress"""', 'attrs': 'gravity_drag.attrs'}), "(data=F_gravity_zm_data_seg, name=\n 'Zonal mean zonal gravity wave stress', attrs=gravity_drag.attrs)\n", (15689, 15793), True, 'import xarray as xr\n'), ((16133, 16159), 'xarray.Dataset', 'xr.Dataset', (['CD_storage_rep'], {}), '(CD_storage_rep)\n', (16143, 16159), True, 'import xarray as xr\n'), ((16376, 16402), 'xarray.Dataset', 'xr.Dataset', (['CD_storage_bud'], {}), '(CD_storage_bud)\n', (16386, 16402), True, 'import xarray as xr\n'), ((16668, 16695), 'tools_AM_budget.mkdirs_r', 'M.mkdirs_r', (['save_path_local'], {}), '(save_path_local)\n', (16678, 16695), True, 'import tools_AM_budget as M\n'), ((16857, 16884), 'tools_AM_budget.mkdirs_r', 'M.mkdirs_r', (['save_path_local'], {}), '(save_path_local)\n', (16867, 16884), True, 'import tools_AM_budget as M\n'), ((39265, 39288), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (39286, 39288), False, 'import datetime\n'), ((7539, 7561), 'xarray.merge', 'xr.merge', (['[da, dd, de]'], {}), '([da, dd, de])\n', (7547, 7561), True, 'import xarray as xr\n'), ((8624, 8651), 'numpy.cos', 'np.cos', (['(lat * np.pi / 180.0)'], {}), '(lat * np.pi / 180.0)\n', (8630, 8651), True, 'import numpy as np\n'), ((8746, 8773), 'numpy.cos', 'np.cos', (['(lat * np.pi / 180.0)'], {}), '(lat * np.pi / 180.0)\n', (8752, 8773), True, 'import numpy as np\n')] |
import codecs
import gzip
import logging
import numpy
from ..layers.time_distributed_embedding import TimeDistributedEmbedding
from .data_indexer import DataIndexer
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class PretrainedEmbeddings:
@staticmethod
def initialize_random_matrix(shape, seed=1337):
# TODO(matt): we now already set the random seed, in run_solver.py. This should be
# changed.
numpy_rng = numpy.random.RandomState(seed)
return numpy_rng.uniform(size=shape, low=0.05, high=-0.05)
@staticmethod
def get_embedding_layer(embeddings_filename: str,
data_indexer: DataIndexer,
trainable=False,
log_misses=False,
name="pretrained_embedding"):
"""
Reads a pre-trained embedding file and generates a Keras Embedding layer that has weights
initialized to the pre-trained embeddings. The Embedding layer can either be trainable or
not.
We use the DataIndexer to map from the word strings in the embeddings file to the indices
that we need, and to know which words from the embeddings file we can safely ignore. If we
come across a word in DataIndexer that does not show up with the embeddings file, we give
it a zero vector.
The embeddings file is assumed to be gzipped, formatted as [word] [dim 1] [dim 2] ...
"""
words_to_keep = set(data_indexer.words_in_index())
vocab_size = data_indexer.get_vocab_size()
embeddings = {}
embedding_dim = None
# TODO(matt): make this a parameter
embedding_misses_filename = 'embedding_misses.txt'
# First we read the embeddings from the file, only keeping vectors for the words we need.
logger.info("Reading embeddings from file")
with gzip.open(embeddings_filename, 'rb') as embeddings_file:
for line in embeddings_file:
fields = line.decode('utf-8').strip().split(' ')
if embedding_dim is None:
embedding_dim = len(fields) - 1
assert embedding_dim > 1, "Found embedding size of 1; do you have a header?"
else:
if len(fields) - 1 != embedding_dim:
# Sometimes there are funny unicode parsing problems that lead to different
# fields lengths (e.g., a word with a unicode space character that splits
# into more than one column). We skip those lines. Note that if you have
# some kind of long header, this could result in all of your lines getting
# skipped. It's hard to check for that here; you just have to look in the
# embedding_misses_file and at the model summary to make sure things look
# like they are supposed to.
continue
word = fields[0]
if word in words_to_keep:
vector = numpy.asarray(fields[1:], dtype='float32')
embeddings[word] = vector
# Now we initialize the weight matrix for an embedding layer, starting with random vectors,
# then filling in the word vectors we just read.
logger.info("Initializing pre-trained embedding layer")
if log_misses:
logger.info("Logging embedding misses to %s", embedding_misses_filename)
embedding_misses_file = codecs.open(embedding_misses_filename, 'w', 'utf-8')
embedding_matrix = PretrainedEmbeddings.initialize_random_matrix((vocab_size, embedding_dim))
# The 2 here is because we know too much about the DataIndexer. Index 0 is the padding
# index, and the vector for that dimension is going to be 0. Index 1 is the OOV token, and
# we can't really set a vector for the OOV token.
for i in range(2, vocab_size):
word = data_indexer.get_word_from_index(i)
# If we don't have a pre-trained vector for this word, we'll just leave this row alone,
# so the word has a random initialization.
if word in embeddings:
embedding_matrix[i] = embeddings[word]
elif log_misses:
print(word, file=embedding_misses_file)
if log_misses:
embedding_misses_file.close()
# The weight matrix is initialized, so we construct and return the actual Embedding layer.
return TimeDistributedEmbedding(input_dim=vocab_size,
output_dim=embedding_dim,
mask_zero=True,
weights=[embedding_matrix],
trainable=trainable,
name=name)
| [
"logging.getLogger",
"gzip.open",
"numpy.asarray",
"codecs.open",
"numpy.random.RandomState"
] | [((177, 204), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (194, 204), False, 'import logging\n'), ((468, 498), 'numpy.random.RandomState', 'numpy.random.RandomState', (['seed'], {}), '(seed)\n', (492, 498), False, 'import numpy\n'), ((1926, 1962), 'gzip.open', 'gzip.open', (['embeddings_filename', '"""rb"""'], {}), "(embeddings_filename, 'rb')\n", (1935, 1962), False, 'import gzip\n'), ((3598, 3650), 'codecs.open', 'codecs.open', (['embedding_misses_filename', '"""w"""', '"""utf-8"""'], {}), "(embedding_misses_filename, 'w', 'utf-8')\n", (3609, 3650), False, 'import codecs\n'), ((3142, 3184), 'numpy.asarray', 'numpy.asarray', (['fields[1:]'], {'dtype': '"""float32"""'}), "(fields[1:], dtype='float32')\n", (3155, 3184), False, 'import numpy\n')] |
# Copyright (c) 2015, <NAME>
# See LICENSE file for details: <https://github.com/moble/scri/blob/master/LICENSE>
import math
import numpy as np
import quaternion
import scri
import spherical_functions as sf
import warnings
def modes_constructor(constructor_statement, data_functor, **kwargs):
"""WaveformModes object filled with data from the input functor
Additional keyword arguments are mostly passed to the WaveformModes initializer, though some more reasonable
defaults are provided.
Parameters
----------
constructor_statement : str
This is a string form of the function call used to create the object. This is passed to the WaveformBase
initializer as the parameter of the same name. See the docstring for more information.
data_functor : function
Takes a 1-d array of time values and an array of (ell, m) values and returns the complex array of data.
t : float array, optional
Time values of the data. Default is `np.linspace(-10., 100., num=1101))`.
ell_min, ell_max : int, optional
Smallest and largest ell value present in the data. Defaults are 2 and 8.
"""
t = np.array(kwargs.pop("t", np.linspace(-10.0, 100.0, num=1101)), dtype=float)
frame = np.array(kwargs.pop("frame", []), dtype=np.quaternion)
frameType = int(kwargs.pop("frameType", scri.Inertial))
dataType = int(kwargs.pop("dataType", scri.h))
r_is_scaled_out = bool(kwargs.pop("r_is_scaled_out", True))
m_is_scaled_out = bool(kwargs.pop("m_is_scaled_out", True))
ell_min = int(kwargs.pop("ell_min", abs(scri.SpinWeights[dataType])))
ell_max = int(kwargs.pop("ell_max", 8))
if kwargs:
import pprint
warnings.warn(f"\nUnused kwargs passed to this function:\n{pprint.pformat(kwargs, width=1)}")
data = data_functor(t, sf.LM_range(ell_min, ell_max))
w = scri.WaveformModes(
t=t,
frame=frame,
data=data,
history=["# Called from constant_waveform"],
frameType=frameType,
dataType=dataType,
r_is_scaled_out=r_is_scaled_out,
m_is_scaled_out=m_is_scaled_out,
constructor_statement=constructor_statement,
ell_min=ell_min,
ell_max=ell_max,
)
return w
def constant_waveform(**kwargs):
"""WaveformModes object with constant values in each mode
Additional keyword arguments are passed to `modes_constructor`.
"""
if kwargs:
import pprint
warnings.warn(f"\nUnused kwargs passed to this function:\n{pprint.pformat(kwargs, width=1)}")
def data_functor(t, LM):
data = np.zeros((t.shape[0], LM.shape[0]), dtype=complex)
for i, m in enumerate(LM[:, 1]):
data[:, i] = m - 1j * m
return data
return modes_constructor(f"constant_waveform(**{kwargs})", data_functor)
def single_mode(ell, m, **kwargs):
"""WaveformModes object with 1 in selected slot and 0 elsewhere
Additional keyword arguments are passed to `modes_constructor`.
Parameters
----------
ell, m : int
The (ell, m) value of the nonzero mode
"""
if kwargs:
import pprint
warnings.warn(f"\nUnused kwargs passed to this function:\n{pprint.pformat(kwargs, width=1)}")
def data_functor(t, LM):
data = np.zeros((t.shape[0], LM.shape[0]), dtype=complex)
data[:, sf.LM_index(ell, m, min(LM[:, 0]))] = 1.0 + 0.0j
return data
return modes_constructor(f"single_mode({ell}, {m}, **{kwargs})", data_functor)
def random_waveform(**kwargs):
"""WaveformModes object filled with random data at each time step
Additional keyword arguments are passed to `modes_constructor`.
Parameters
----------
uniform_time : bool, optional
Use uniform, rather than random, time steps. Default is False.
begin, end : float, optional
Initial and final times of the time data. Default values are -10., 100.
n_times : int, optional
Number of time steps in the time data. Default is 1101
rotating : bool, optional
If True, use a `Corotating` frame, with random orientation at each instant. Default is True.
"""
np.random.seed(hash("random_waveform") % 4294967294)
begin = float(kwargs.pop("begin", -10.0))
end = float(kwargs.pop("end", 100.0))
n_times = int(kwargs.pop("n_times", 1101))
if kwargs.pop("uniform_time", False):
t = np.array(kwargs.pop("t", np.linspace(begin, end, num=n_times)), dtype=float)
else:
t = np.sort(np.random.uniform(begin, end, size=n_times))
rotating = bool(kwargs.pop("rotating", True))
if kwargs:
import pprint
warnings.warn(f"\nUnused kwargs passed to this function:\n{pprint.pformat(kwargs, width=1)}")
def data_functor(tin, LM):
data = np.random.normal(size=(tin.shape[0], LM.shape[0], 2)).view(complex)[:, :, 0]
return data
if rotating:
frame = np.array([np.quaternion(*np.random.uniform(-1, 1, 4)).normalized() for t_i in t])
return modes_constructor(
f"random_waveform(**{kwargs})", data_functor, t=t, frame=frame, frameType=scri.Corotating
)
else:
return modes_constructor(f"random_waveform(**{kwargs})", data_functor, t=t)
def random_waveform_proportional_to_time(**kwargs):
"""WaveformModes object filled with random data times the time coordinate
Additional keyword arguments are passed to `modes_constructor`.
Parameters
----------
uniform_time : bool, optional
Use uniform, rather than random, time steps. Default is False.
begin, end : float, optional
Initial and final times of the time data. Default values are -10., 100.
n_times : int, optional
Number of time steps in the time data. Default is 1101
rotating : bool, optional
If True, use a `Corotating` frame, with random orientation at each instant. Default is True.
"""
np.random.seed(hash("random_waveform_proportional_to_time") % 4294967294) # Use mod to get in an acceptable range
begin = float(kwargs.pop("begin", -10.0))
end = float(kwargs.pop("end", 100.0))
n_times = int(kwargs.pop("n_times", 1101))
if kwargs.pop("uniform_time", False):
t = np.array(kwargs.pop("t", np.linspace(begin, end, num=n_times)), dtype=float)
else:
t = np.sort(np.random.uniform(begin, end, size=n_times))
rotating = bool(kwargs.pop("rotating", True))
if kwargs:
import pprint
warnings.warn(f"\nUnused kwargs passed to this function:\n{pprint.pformat(kwargs, width=1)}")
def data_functor(tin, LM):
return np.outer(tin, np.random.normal(size=(LM.shape[0], 2)).view(complex)[:, 0])
if rotating:
axis = np.quaternion(0.0, *np.random.uniform(-1, 1, size=3)).normalized()
omega = 2 * np.pi * 4 / (t[-1] - t[0])
frame = np.array([np.exp(axis * (omega * t_i / 2)) for t_i in t])
return modes_constructor(
f"random_waveform(**{kwargs})", data_functor, t=t, frame=frame, frameType=scri.Corotating
)
else:
return modes_constructor(f"random_waveform(**{kwargs})", data_functor, t=t)
def single_mode_constant_rotation(**kwargs):
"""Return WaveformModes object a single nonzero mode, with phase proportional to time
The waveform output by this function will have just one nonzero mode. The behavior of that mode will be fairly
simple; it will be given by exp(i*omega*t). Note that omega can be complex, which gives damping.
Parameters
----------
s : int, optional
Spin weight of the waveform field. Default is -2.
ell, m : int, optional
The (ell, m) values of the nonzero mode in the returned waveform. Default value is (abs(s), -abs(s)).
ell_min, ell_max : int, optional
Smallest and largest ell values present in the output. Default values are abs(s) and 8.
data_type : int, optional
Default value is whichever psi_n corresponds to the input spin. It is important to choose these, rather than
`h` or `sigma` for the analytical solution to translations, which doesn't account for the direct contribution
of supertranslations (as opposed to the indirect contribution, which involves moving points around).
t_0, t_1 : float, optional
Beginning and end of time. Default values are -20. and 20.
dt : float, optional
Time step. Default value is 0.1.
omega : complex, optional
Constant of proportionality such that nonzero mode is exp(i*omega*t). Note that this can be complex, which
implies damping. Default is 0.5.
"""
s = kwargs.pop("s", -2)
ell = kwargs.pop("ell", abs(s))
m = kwargs.pop("m", -ell)
ell_min = kwargs.pop("ell_min", abs(s))
ell_max = kwargs.pop("ell_max", 8)
data_type = kwargs.pop("data_type", scri.DataType[scri.SpinWeights.index(s)])
t_0 = kwargs.pop("t_0", -20.0)
t_1 = kwargs.pop("t_1", 20.0)
dt = kwargs.pop("dt", 1.0 / 10.0)
t = np.arange(t_0, t_1 + dt, dt)
n_times = t.size
omega = complex(kwargs.pop("omega", 0.5))
data = np.zeros((n_times, sf.LM_total_size(ell_min, ell_max)), dtype=complex)
data[:, sf.LM_index(ell, m, ell_min)] = np.exp(1j * omega * t)
if kwargs:
import pprint
warnings.warn(f"\nUnused kwargs passed to this function:\n{pprint.pformat(kwargs, width=1)}")
return scri.WaveformModes(
t=t,
data=data,
ell_min=ell_min,
ell_max=ell_max,
frameType=scri.Inertial,
dataType=data_type,
r_is_scaled_out=True,
m_is_scaled_out=True,
)
def single_mode_proportional_to_time(**kwargs):
"""Return WaveformModes object a single nonzero mode, proportional to time
The waveform output by this function will have just one nonzero mode. The behavior of that mode will be
particularly simple; it will just be proportional to time.
Parameters
----------
s : int, optional
Spin weight of the waveform field. Default is -2.
ell, m : int, optional
The (ell, m) values of the nonzero mode in the returned waveform. Default value is (abs(s), -abs(s)).
ell_min, ell_max : int, optional
Smallest and largest ell values present in the output. Default values are abs(s) and 8.
data_type : int, optional
Default value is whichever psi_n corresponds to the input spin. It is important to choose these, rather than
`h` or `sigma` for the analytical solution to translations, which doesn't account for the direct contribution
of supertranslations (as opposed to the indirect contribution, which involves moving points around).
t_0, t_1 : float, optional
Beginning and end of time. Default values are -20. and 20.
dt : float, optional
Time step. Default value is 0.1.
beta : complex, optional
Constant of proportionality such that nonzero mode is beta*t. Default is 1.
"""
s = kwargs.pop("s", -2)
ell = kwargs.pop("ell", abs(s))
m = kwargs.pop("m", -ell)
ell_min = kwargs.pop("ell_min", abs(s))
ell_max = kwargs.pop("ell_max", 8)
data_type = kwargs.pop("data_type", scri.DataType[scri.SpinWeights.index(s)])
t_0 = kwargs.pop("t_0", -20.0)
t_1 = kwargs.pop("t_1", 20.0)
dt = kwargs.pop("dt", 1.0 / 10.0)
t = np.arange(t_0, t_1 + dt, dt)
n_times = t.size
beta = kwargs.pop("beta", 1.0)
data = np.zeros((n_times, sf.LM_total_size(ell_min, ell_max)), dtype=complex)
data[:, sf.LM_index(ell, m, ell_min)] = beta * t
if kwargs:
import pprint
warnings.warn(f"\nUnused kwargs passed to this function:\n{pprint.pformat(kwargs, width=1)}")
return scri.WaveformModes(
t=t,
data=data,
ell_min=ell_min,
ell_max=ell_max,
frameType=scri.Inertial,
dataType=data_type,
r_is_scaled_out=True,
m_is_scaled_out=True,
)
def single_mode_proportional_to_time_supertranslated(**kwargs):
"""Return WaveformModes as in single_mode_proportional_to_time, with analytical supertranslation
This function constructs the same basic object as the `single_mode_proportional_to_time`, but then applies an
analytical supertranslation. The arguments to this function are the same as to the other, with two additions:
Additional parameters
---------------------
supertranslation : complex array, optional
Spherical-harmonic modes of the supertranslation to apply to the waveform. This is overwritten by
`space_translation` if present. Default value is `None`.
space_translation : float array of length 3, optional
This is just the 3-vector representing the displacement to apply to the waveform. Note that if
`supertranslation`, this parameter overwrites it. Default value is [1.0, 0.0, 0.0].
"""
s = kwargs.pop("s", -2)
ell = kwargs.pop("ell", abs(s))
m = kwargs.pop("m", -ell)
ell_min = kwargs.pop("ell_min", abs(s))
ell_max = kwargs.pop("ell_max", 8)
data_type = kwargs.pop("data_type", scri.DataType[scri.SpinWeights.index(s)])
t_0 = kwargs.pop("t_0", -20.0)
t_1 = kwargs.pop("t_1", 20.0)
dt = kwargs.pop("dt", 1.0 / 10.0)
t = np.arange(t_0, t_1 + dt, dt)
n_times = t.size
beta = kwargs.pop("beta", 1.0)
data = np.zeros((n_times, sf.LM_total_size(ell_min, ell_max)), dtype=complex)
data[:, sf.LM_index(ell, m, ell_min)] = beta * t
supertranslation = np.array(kwargs.pop("supertranslation", np.array([], dtype=complex)), dtype=complex)
if "space_translation" in kwargs:
if supertranslation.size < 4:
supertranslation.resize((4,))
supertranslation[1:4] = -sf.vector_as_ell_1_modes(kwargs.pop("space_translation"))
supertranslation_ell_max = int(math.sqrt(supertranslation.size) - 1)
if supertranslation_ell_max * (supertranslation_ell_max + 2) + 1 != supertranslation.size:
raise ValueError(f"Bad number of elements in supertranslation: {supertranslation.size}")
for i, (ellpp, mpp) in enumerate(sf.LM_range(0, supertranslation_ell_max)):
if supertranslation[i] != 0.0:
mp = m + mpp
for ellp in range(ell_min, min(ell_max, (ell + ellpp)) + 1):
if ellp >= abs(mp):
addition = (
beta
* supertranslation[i]
* math.sqrt(((2 * ellpp + 1) * (2 * ell + 1) * (2 * ellp + 1)) / (4 * math.pi))
* sf.Wigner3j(ellpp, ell, ellp, 0, -s, s)
* sf.Wigner3j(ellpp, ell, ellp, mpp, m, -mp)
)
if (s + mp) % 2 == 1:
addition *= -1
data[:, sf.LM_index(ellp, mp, ell_min)] += addition
if kwargs:
import pprint
warnings.warn(f"\nUnused kwargs passed to this function:\n{pprint.pformat(kwargs, width=1)}")
return scri.WaveformModes(
t=t,
data=data,
ell_min=ell_min,
ell_max=ell_max,
frameType=scri.Inertial,
dataType=data_type,
r_is_scaled_out=True,
m_is_scaled_out=True,
)
def fake_precessing_waveform(
t_0=-20.0,
t_1=20_000.0,
dt=0.1,
ell_max=8,
mass_ratio=2.0,
precession_opening_angle=np.pi / 6.0,
precession_opening_angle_dot=None,
precession_relative_rate=0.1,
precession_nutation_angle=None,
inertial=True,
):
"""Construct a strain waveform with realistic precession effects.
This model is intended to be weird enough that it breaks any overly simplistic assumptions about
waveform symmetries while still being (mostly) realistic.
This waveform uses only the very lowest-order terms from PN theory to evolve the orbital
frequency up to a typical constant value (with a smooth transition), and to construct modes that
have very roughly the correct amplitudes as a function of the orbital frequency. Modes with
equal ell values but opposite m values are modulated antisymmetrically, though this modulation
decays quickly after merger -- roughly as it would behave in a precessing system. The modes are
then smoothly transitioned to an exponential decay after merger. The frame is a simulated
precession involving the basic orbital rotation precessing about a cone of increasing opening
angle and nutating about that cone on the orbital time scale, but settling down to a constant
direction shortly after merger. (Though there is little precise physical content, these
features are all found in real waveforms.) If the input argument `inertial` is `True` (the
default), the waveform is transformed back to the inertial frame before returning.
Parameters
==========
t_0: float [defaults to -20.0]
t_1: float [defaults to 20_000.0]
The initial and final times in the output waveform. Note that the merger is placed 100.0
time units before `t_1`, and several transitions are made before this, so `t_0` must be that
far ahead of `t_1`.
dt: float [defaults to 0.1]
Spacing of output time series.
ell_max: int [defaults to 8]
Largest ell value in the output modes.
mass_ratio: float [defaults to 2.0]
Ratio of BH masses to use as input to rough approximations for orbital evolution and mode
amplitudes.
precession_opening_angle: float [defaults to pi/6]
Opening angle of the precession cone.
precession_opening_angle_dot: float [defaults to 2*precession_opening_angle/(t_merger-t_0)]
Rate at which precession cone opens.
precession_relative_rate: float [defaults to 0.1]
Fraction of the magnitude of the orbital angular velocity at which it precesses.
precession_nutation_angle: float [defaults to precession_opening_angle/10]
Angle (relative to precession_opening_angle) by which the orbital angular velocity nutates.
"""
import warnings
import numpy as np
import quaternion
from quaternion.calculus import indefinite_integral
from .utilities import transition_function, transition_to_constant
if mass_ratio < 1.0:
mass_ratio = 1.0 / mass_ratio
s = -2
ell_min = abs(s)
data_type = scri.h
nu = mass_ratio / (1 + mass_ratio) ** 2
t = np.arange(t_0, t_1 + 0.99 * dt, dt)
t_merger = t_1 - 100.0
i_merger = np.argmin(abs(t - t_merger))
if i_merger < 20:
raise ValueError(f"Insufficient space between initial time (t={t_merger}) and merger (t={t_0}).")
n_times = t.size
data = np.zeros((n_times, sf.LM_total_size(ell_min, ell_max)), dtype=complex)
# Get a rough approximation to the phasing through merger
tau = nu * (t_merger - t) / 5
with warnings.catch_warnings(): # phi and omega will have NaNs after t_merger for now
warnings.simplefilter("ignore")
phi = -4 * tau ** (5 / 8)
omega = (nu / 2) * tau ** (-3 / 8)
# Now, transition omega smoothly up to a constant value of 0.25
omega_transition_width = 5.0
i1 = np.argmin(np.abs(omega[~np.isnan(omega)] - 0.25))
i0 = np.argmin(np.abs(t - (t[i1] - omega_transition_width)))
transition = transition_function(t, t[i0], t[i1])
zero_point_two_five = 0.25 * np.ones_like(t)
omega[:i1] = omega[:i1] * (1 - transition[:i1]) + zero_point_two_five[:i1] * transition[:i1]
omega[i1:] = 0.25
# Integrate phi after i0 to agree with the new omega
phi[i0:] = phi[i0] + indefinite_integral(omega[i0:], t[i0:])
# Construct ringdown-transition function
ringdown_transition_width = 20
t0 = t_merger
i0 = np.argmin(np.abs(t - t_merger))
i1 = np.argmin(np.abs(t - (t[i0] + ringdown_transition_width)))
t0 = t[i0]
t1 = t[i1]
transition = transition_function(t, t0, t1)
ringdown = np.ones_like(t)
ringdown[i0:] = ringdown[i0:] * (1 - transition[i0:]) + 2.25 * np.exp(-(t[i0:] - t_merger) / 11.5) * transition[i0:]
# Construct frame
if precession_opening_angle_dot is None:
precession_opening_angle_dot = 2.0 * precession_opening_angle / (t[i1] - t[0])
if precession_nutation_angle is None:
precession_nutation_angle = precession_opening_angle / 10.0
R_orbital = np.exp(phi * quaternion.z / 2)
R_opening = np.exp(
transition_to_constant(precession_opening_angle + precession_opening_angle_dot * t, t, t0, t1)
* quaternion.x
/ 2
)
R_precession = np.exp(transition_to_constant(phi / precession_relative_rate, t, t0, t1) * quaternion.z / 2)
R_nutation = np.exp(precession_nutation_angle * transition * quaternion.x / 2)
frame = (
R_orbital * R_nutation * R_orbital.conjugate() * R_precession * R_opening * R_precession.conjugate() * R_orbital
)
frame = frame[0].sqrt().conjugate() * frame # Just give the initial angle a weird little tweak to screw things up
# Construct the modes
x = omega ** (2 / 3)
modulation = transition_function(t, t[i0], t[i1], 1, 0) * np.cos(phi) / 40.0
for ell in range(ell_min, ell_max + 1):
for m in range(-ell, ell + 1):
data[:, sf.LM_index(ell, m, ell_min)] = pn_leading_order_amplitude(ell, m, x, mass_ratio=mass_ratio) * (
1 + np.sign(m) * modulation
)
# Apply ringdown (mode amplitudes are constant after t_merger)
data *= ringdown[:, np.newaxis]
h_corot = scri.WaveformModes(
t=t,
frame=frame,
data=data,
ell_min=ell_min,
ell_max=ell_max,
frameType=scri.Corotating,
dataType=data_type,
r_is_scaled_out=True,
m_is_scaled_out=True,
)
if inertial:
return h_corot.to_inertial_frame()
else:
return h_corot
def pn_leading_order_amplitude(ell, m, x, mass_ratio=1.0):
"""Return the leading-order amplitude of r*h/M in PN theory
These expressions are from Eqs. (330) of Blanchet's Living Review (2014).
Note that `x` is just the orbital angular velocity to the (2/3) power.
"""
from scipy.special import factorial, factorial2
if m < 0:
return (-1) ** ell * np.conjugate(pn_leading_order_amplitude(ell, -m, x, mass_ratio=mass_ratio))
if mass_ratio < 1.0:
mass_ratio = 1.0 / mass_ratio
nu = mass_ratio / (1 + mass_ratio) ** 2
X1 = mass_ratio / (mass_ratio + 1)
X2 = 1 / (mass_ratio + 1)
def sigma(ell):
return X2 ** (ell - 1) + (-1) ** ell * X1 ** (ell - 1)
if (ell + m) % 2 == 0:
amplitude = (
(
(-1) ** ((ell - m + 2) / 2)
/ (2 ** (ell + 1) * factorial((ell + m) // 2) * factorial((ell - m) // 2) * factorial2(2 * ell - 1))
)
* np.sqrt(
(5 * (ell + 1) * (ell + 2) * factorial(ell + m) * factorial(ell - m))
/ (ell * (ell - 1) * (2 * ell + 1))
)
* sigma(ell)
* (1j * m) ** ell
* x ** (ell / 2 - 1)
)
else:
amplitude = (
(
(-1) ** ((ell - m - 1) / 2)
/ (
2 ** (ell - 1)
* factorial((ell + m - 1) // 2)
* factorial((ell - m - 1) // 2)
* factorial2(2 * ell + 1)
)
)
* np.sqrt(
(5 * (ell + 2) * (2 * ell + 1) * factorial(ell + m) * factorial(ell - m))
/ (ell * (ell - 1) * (ell + 1))
)
* sigma(ell + 1)
* 1j
* (1j * m) ** ell
* x ** ((ell - 1) / 2)
)
return 8 * np.sqrt(np.pi / 5) * nu * x * amplitude
def create_fake_finite_radius_strain_h5file(
output_file_path="./rh_FiniteRadii_CodeUnits.h5",
n_subleading=3,
amp=1.0,
t_0=0.0,
t_1=3000.0,
dt=0.1,
r_min=100.0,
r_max=600.0,
n_radii=24,
ell_max=8,
initial_adm_energy=0.99,
avg_lapse=0.99,
avg_areal_radius_diff=1.0,
mass_ratio=1.0,
precession_opening_angle=0.0,
**kwargs,
):
"""
Create an HDF5 file with fake finite-radius GW strain data in the NRAR format, as
used by the Spectral Einstein Code (SpEC). The asymptotic waveform is created by
the function scri.sample_waveforms.fake_precessing_waveform and then radius-dependent
terms are added to it. These subleading artificial "near-zone" effects are simple
sinusoids. The finite-radius waveform, scaled by a factor of the radius, is then,
r*h(r) = h_0 + h_1*r**-1 + h_2*r**-2 + ... + h_n*r**-n
where h_0 is the waveform from scri.sample_waveforms.fake_precessing_waveform() and n is
chosen by the user.
Finally, the finite-radius waveforms as output by SpEC uses simulation coordinates for
radius and time, which do not perfectly parametrize outgoing null rays. This function
approximates these simulation coordinates so that the data in the resulting HDF5 file
most nearly mimics that of a SpEC HDF5 output file.
Parameters
==========
output_file_path: str
The name and path of the output file. The filename must be "rh_FiniteRadius_CodeUnits.h5"
if you wish to be able to run scri.extrapolation.extrapolate on it.
n_subleading: int [defaults to 3]
The number of subleading, radius-dependent terms to add to the asymptotic waveform.
amp: float [defaults to 1.0]
The amplitude of the subleading, radius-dependent terms.
t_0: float [defaults to 0.0]
t_1: float [defaults to 3000.0]
The initial and final times in the output waveform. Note that the merger is placed
100.0 time units before `t_1`, and several transitions are made before this, so `t_0`
must be that far ahead of `t_1`.
dt: float [defaults to 0.1]
Spacing of output time series.
r_min: float [defaults to 100.0]
r_max: float [defaults to 600.0]
n_radii: float [defaults to 24]
The minimum and maximum radius, and the number of radii between these values, at which
to produce a finite-radius waveform. These will be equally spaced in inverse radius.
ell_max: int [defaults to 8]
Largest ell value in the output modes.
initial_adm_energy: float [defaults to 0.99]
The intial ADM energy as would be computed by the SpEC intitial data solver.
avg_lapse: float [defaults to 0.99]
The value of the lapse averaged over a sphere at
avg_areal_radius_diff: float [defaults to 1.0]
How much on average the areal radius is larger than the coord radius, may be negative.
mass_ratio: float [defaults to 1.0]
Ratio of BH masses to use as input to rough approximations for orbital evolution
and mode amplitudes.
precession_opening_angle: float [defaults to 0.0]
Opening angle of the precession cone. The default results in no precession. If
this value is non-zero, then the following options from fake_precessing_waveform
may be supplied as kwagrs:
* precession_opening_angle_dot
* precession_relative_rate
* precession_nutation_angle
See the help text of fake_precessing_waveform for documentation.
"""
import h5py
from scipy.interpolate import CubicSpline
with h5py.File(output_file_path, "x") as h5file:
# Set up an H5 group for each radius
coord_radii = (1 / np.linspace(1 / r_min, 1 / r_max, n_radii)).astype(int)
groups = [f"R{R:04}.dir" for R in coord_radii]
for group in groups:
h5file.create_group(group)
# Create version history dataset
# We mimic how SpEC encodes the strings in VersionHist.ver to ensure we
# don't hit any errors due to the encoding.
version_hist = [["ef51849550a1d8a5bbdd810c7debf0dd839e86dd", "Overall sign change in complex strain h."]]
reencoded_version_hist = [
[entry[0].encode("ascii", "ignore"), entry[1].encode("ascii", "ignore")] for entry in version_hist
]
dset = h5file.create_dataset(
"VersionHist.ver",
(len(version_hist), 2),
maxshape=(None, 2),
data=reencoded_version_hist,
dtype=h5py.special_dtype(vlen=bytes),
)
dset.attrs.create("Version", len(version_hist), shape=(1,), dtype=np.uint64)
# Generate the asymptotic waveform
h0 = scri.sample_waveforms.fake_precessing_waveform(
t_0=t_0, t_1=t_1, dt=dt, ell_max=ell_max, precession_opening_angle=precession_opening_angle, **kwargs
)
n_times = int(h0.t.shape[0] + r_max / dt)
t_1 += r_max
all_times = np.linspace(0, t_1, n_times)
# Set auxiliary datasetsss
for i in range(len(groups)):
# Set coordinate radius dataset
coord_radius = np.vstack((all_times, [coord_radii[i]] * n_times)).T
dset = h5file.create_dataset(f"{groups[i]}/CoordRadius.dat", data=coord_radius)
dset.attrs.create("Legend", ["time", "CoordRadius"])
# Set areal radius dataset
areal_radius = coord_radius
areal_radius[:, 1] += avg_areal_radius_diff
dset = h5file.create_dataset(f"{groups[i]}/ArealRadius.dat", data=areal_radius)
dset.attrs.create("Legend", ["time", "ArealRadius"])
# Set initial ADM energy dataset
seg_start_times = h0.t[:: int(n_times / 20)]
adm_energy_dset = np.vstack((seg_start_times, [initial_adm_energy] * len(seg_start_times))).T
dset = h5file.create_dataset(f"{groups[i]}/InitialAdmEnergy.dat", data=adm_energy_dset)
dset.attrs.create("Legend", ["time", "InitialAdmEnergy"])
# Set average lapse dataset
avg_lapse_dset = np.vstack((all_times, [avg_lapse] * n_times)).T
dset = h5file.create_dataset(f"{groups[i]}/AverageLapse.dat", data=avg_lapse_dset)
dset.attrs.create("Legend", ["time", "AverageLapse"])
# Set finite radius data
R = areal_radius[0, 1]
tortoise_coord = R + 2 * initial_adm_energy * np.log(R / (2 * initial_adm_energy) - 1)
# Compute the approximate time in SpEC simulation coordinates
simulation_time = (h0.t + tortoise_coord) * np.sqrt(1 - 2 * initial_adm_energy / R) / avg_lapse
for l in range(2, ell_max + 1):
for m in range(-l, l + 1):
index = sf.LM_index(l, m, 2)
new_data = h0.data[:, index]
for n in range(1, n_subleading + 1):
new_data += amp * R ** -n * np.exp((1j * n * 50 * np.pi / h0.n_times) * h0.t) * h0.abs[:, index]
new_data = CubicSpline(simulation_time, new_data)(all_times)
new_data[(all_times < simulation_time[0]) | (all_times > simulation_time[-1])] = 0.0
new_data += (1 + 1j) * 1e-14 * all_times
new_dset = np.vstack((all_times, new_data.real, new_data.imag)).T
dset = h5file.create_dataset(f"{groups[i]}/Y_l{l}_m{m}.dat", data=new_dset)
dset.attrs.create(
"Legend",
[
"time",
f"Re[rh]_l{l}_m{m}(R={coord_radii[i]}.00)",
f"Im[rh]_l{l}_m{m}(R={coord_radii[i]}.00)",
],
)
| [
"numpy.sqrt",
"scipy.special.factorial",
"numpy.log",
"math.sqrt",
"spherical_functions.Wigner3j",
"numpy.array",
"scri.SpinWeights.index",
"spherical_functions.LM_index",
"numpy.arange",
"scri.sample_waveforms.fake_precessing_waveform",
"scipy.interpolate.CubicSpline",
"numpy.exp",
"numpy.l... | [((1874, 2171), 'scri.WaveformModes', 'scri.WaveformModes', ([], {'t': 't', 'frame': 'frame', 'data': 'data', 'history': "['# Called from constant_waveform']", 'frameType': 'frameType', 'dataType': 'dataType', 'r_is_scaled_out': 'r_is_scaled_out', 'm_is_scaled_out': 'm_is_scaled_out', 'constructor_statement': 'constructor_statement', 'ell_min': 'ell_min', 'ell_max': 'ell_max'}), "(t=t, frame=frame, data=data, history=[\n '# Called from constant_waveform'], frameType=frameType, dataType=\n dataType, r_is_scaled_out=r_is_scaled_out, m_is_scaled_out=\n m_is_scaled_out, constructor_statement=constructor_statement, ell_min=\n ell_min, ell_max=ell_max)\n", (1892, 2171), False, 'import scri\n'), ((9045, 9073), 'numpy.arange', 'np.arange', (['t_0', '(t_1 + dt)', 'dt'], {}), '(t_0, t_1 + dt, dt)\n', (9054, 9073), True, 'import numpy as np\n'), ((9267, 9291), 'numpy.exp', 'np.exp', (['(1.0j * omega * t)'], {}), '(1.0j * omega * t)\n', (9273, 9291), True, 'import numpy as np\n'), ((9443, 9608), 'scri.WaveformModes', 'scri.WaveformModes', ([], {'t': 't', 'data': 'data', 'ell_min': 'ell_min', 'ell_max': 'ell_max', 'frameType': 'scri.Inertial', 'dataType': 'data_type', 'r_is_scaled_out': '(True)', 'm_is_scaled_out': '(True)'}), '(t=t, data=data, ell_min=ell_min, ell_max=ell_max,\n frameType=scri.Inertial, dataType=data_type, r_is_scaled_out=True,\n m_is_scaled_out=True)\n', (9461, 9608), False, 'import scri\n'), ((11396, 11424), 'numpy.arange', 'np.arange', (['t_0', '(t_1 + dt)', 'dt'], {}), '(t_0, t_1 + dt, dt)\n', (11405, 11424), True, 'import numpy as np\n'), ((11769, 11934), 'scri.WaveformModes', 'scri.WaveformModes', ([], {'t': 't', 'data': 'data', 'ell_min': 'ell_min', 'ell_max': 'ell_max', 'frameType': 'scri.Inertial', 'dataType': 'data_type', 'r_is_scaled_out': '(True)', 'm_is_scaled_out': '(True)'}), '(t=t, data=data, ell_min=ell_min, ell_max=ell_max,\n frameType=scri.Inertial, dataType=data_type, r_is_scaled_out=True,\n m_is_scaled_out=True)\n', (11787, 11934), False, 'import scri\n'), ((13307, 13335), 'numpy.arange', 'np.arange', (['t_0', '(t_1 + dt)', 'dt'], {}), '(t_0, t_1 + dt, dt)\n', (13316, 13335), True, 'import numpy as np\n'), ((15037, 15202), 'scri.WaveformModes', 'scri.WaveformModes', ([], {'t': 't', 'data': 'data', 'ell_min': 'ell_min', 'ell_max': 'ell_max', 'frameType': 'scri.Inertial', 'dataType': 'data_type', 'r_is_scaled_out': '(True)', 'm_is_scaled_out': '(True)'}), '(t=t, data=data, ell_min=ell_min, ell_max=ell_max,\n frameType=scri.Inertial, dataType=data_type, r_is_scaled_out=True,\n m_is_scaled_out=True)\n', (15055, 15202), False, 'import scri\n'), ((18422, 18457), 'numpy.arange', 'np.arange', (['t_0', '(t_1 + 0.99 * dt)', 'dt'], {}), '(t_0, t_1 + 0.99 * dt, dt)\n', (18431, 18457), True, 'import numpy as np\n'), ((19937, 19952), 'numpy.ones_like', 'np.ones_like', (['t'], {}), '(t)\n', (19949, 19952), True, 'import numpy as np\n'), ((20355, 20385), 'numpy.exp', 'np.exp', (['(phi * quaternion.z / 2)'], {}), '(phi * quaternion.z / 2)\n', (20361, 20385), True, 'import numpy as np\n'), ((20683, 20748), 'numpy.exp', 'np.exp', (['(precession_nutation_angle * transition * quaternion.x / 2)'], {}), '(precession_nutation_angle * transition * quaternion.x / 2)\n', (20689, 20748), True, 'import numpy as np\n'), ((21519, 21701), 'scri.WaveformModes', 'scri.WaveformModes', ([], {'t': 't', 'frame': 'frame', 'data': 'data', 'ell_min': 'ell_min', 'ell_max': 'ell_max', 'frameType': 'scri.Corotating', 'dataType': 'data_type', 'r_is_scaled_out': '(True)', 'm_is_scaled_out': '(True)'}), '(t=t, frame=frame, data=data, ell_min=ell_min, ell_max=\n ell_max, frameType=scri.Corotating, dataType=data_type, r_is_scaled_out\n =True, m_is_scaled_out=True)\n', (21537, 21701), False, 'import scri\n'), ((1835, 1864), 'spherical_functions.LM_range', 'sf.LM_range', (['ell_min', 'ell_max'], {}), '(ell_min, ell_max)\n', (1846, 1864), True, 'import spherical_functions as sf\n'), ((2620, 2670), 'numpy.zeros', 'np.zeros', (['(t.shape[0], LM.shape[0])'], {'dtype': 'complex'}), '((t.shape[0], LM.shape[0]), dtype=complex)\n', (2628, 2670), True, 'import numpy as np\n'), ((3309, 3359), 'numpy.zeros', 'np.zeros', (['(t.shape[0], LM.shape[0])'], {'dtype': 'complex'}), '((t.shape[0], LM.shape[0]), dtype=complex)\n', (3317, 3359), True, 'import numpy as np\n'), ((14146, 14186), 'spherical_functions.LM_range', 'sf.LM_range', (['(0)', 'supertranslation_ell_max'], {}), '(0, supertranslation_ell_max)\n', (14157, 14186), True, 'import spherical_functions as sf\n'), ((18866, 18891), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (18889, 18891), False, 'import warnings\n'), ((18956, 18987), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (18977, 18987), False, 'import warnings\n'), ((19245, 19289), 'numpy.abs', 'np.abs', (['(t - (t[i1] - omega_transition_width))'], {}), '(t - (t[i1] - omega_transition_width))\n', (19251, 19289), True, 'import numpy as np\n'), ((19378, 19393), 'numpy.ones_like', 'np.ones_like', (['t'], {}), '(t)\n', (19390, 19393), True, 'import numpy as np\n'), ((19596, 19635), 'quaternion.calculus.indefinite_integral', 'indefinite_integral', (['omega[i0:]', 't[i0:]'], {}), '(omega[i0:], t[i0:])\n', (19615, 19635), False, 'from quaternion.calculus import indefinite_integral\n'), ((19754, 19774), 'numpy.abs', 'np.abs', (['(t - t_merger)'], {}), '(t - t_merger)\n', (19760, 19774), True, 'import numpy as np\n'), ((19795, 19842), 'numpy.abs', 'np.abs', (['(t - (t[i0] + ringdown_transition_width))'], {}), '(t - (t[i0] + ringdown_transition_width))\n', (19801, 19842), True, 'import numpy as np\n'), ((27399, 27431), 'h5py.File', 'h5py.File', (['output_file_path', '"""x"""'], {}), "(output_file_path, 'x')\n", (27408, 27431), False, 'import h5py\n'), ((28519, 28677), 'scri.sample_waveforms.fake_precessing_waveform', 'scri.sample_waveforms.fake_precessing_waveform', ([], {'t_0': 't_0', 't_1': 't_1', 'dt': 'dt', 'ell_max': 'ell_max', 'precession_opening_angle': 'precession_opening_angle'}), '(t_0=t_0, t_1=t_1, dt=dt,\n ell_max=ell_max, precession_opening_angle=precession_opening_angle, **\n kwargs)\n', (28565, 28677), False, 'import scri\n'), ((28782, 28810), 'numpy.linspace', 'np.linspace', (['(0)', 't_1', 'n_times'], {}), '(0, t_1, n_times)\n', (28793, 28810), True, 'import numpy as np\n'), ((1193, 1228), 'numpy.linspace', 'np.linspace', (['(-10.0)', '(100.0)'], {'num': '(1101)'}), '(-10.0, 100.0, num=1101)\n', (1204, 1228), True, 'import numpy as np\n'), ((4538, 4581), 'numpy.random.uniform', 'np.random.uniform', (['begin', 'end'], {'size': 'n_times'}), '(begin, end, size=n_times)\n', (4555, 4581), True, 'import numpy as np\n'), ((6374, 6417), 'numpy.random.uniform', 'np.random.uniform', (['begin', 'end'], {'size': 'n_times'}), '(begin, end, size=n_times)\n', (6391, 6417), True, 'import numpy as np\n'), ((8902, 8927), 'scri.SpinWeights.index', 'scri.SpinWeights.index', (['s'], {}), '(s)\n', (8924, 8927), False, 'import scri\n'), ((9171, 9205), 'spherical_functions.LM_total_size', 'sf.LM_total_size', (['ell_min', 'ell_max'], {}), '(ell_min, ell_max)\n', (9187, 9205), True, 'import spherical_functions as sf\n'), ((9235, 9263), 'spherical_functions.LM_index', 'sf.LM_index', (['ell', 'm', 'ell_min'], {}), '(ell, m, ell_min)\n', (9246, 9263), True, 'import spherical_functions as sf\n'), ((11253, 11278), 'scri.SpinWeights.index', 'scri.SpinWeights.index', (['s'], {}), '(s)\n', (11275, 11278), False, 'import scri\n'), ((11511, 11545), 'spherical_functions.LM_total_size', 'sf.LM_total_size', (['ell_min', 'ell_max'], {}), '(ell_min, ell_max)\n', (11527, 11545), True, 'import spherical_functions as sf\n'), ((11575, 11603), 'spherical_functions.LM_index', 'sf.LM_index', (['ell', 'm', 'ell_min'], {}), '(ell, m, ell_min)\n', (11586, 11603), True, 'import spherical_functions as sf\n'), ((13164, 13189), 'scri.SpinWeights.index', 'scri.SpinWeights.index', (['s'], {}), '(s)\n', (13186, 13189), False, 'import scri\n'), ((13422, 13456), 'spherical_functions.LM_total_size', 'sf.LM_total_size', (['ell_min', 'ell_max'], {}), '(ell_min, ell_max)\n', (13438, 13456), True, 'import spherical_functions as sf\n'), ((13486, 13514), 'spherical_functions.LM_index', 'sf.LM_index', (['ell', 'm', 'ell_min'], {}), '(ell, m, ell_min)\n', (13497, 13514), True, 'import spherical_functions as sf\n'), ((13590, 13617), 'numpy.array', 'np.array', (['[]'], {'dtype': 'complex'}), '([], dtype=complex)\n', (13598, 13617), True, 'import numpy as np\n'), ((13879, 13911), 'math.sqrt', 'math.sqrt', (['supertranslation.size'], {}), '(supertranslation.size)\n', (13888, 13911), False, 'import math\n'), ((18708, 18742), 'spherical_functions.LM_total_size', 'sf.LM_total_size', (['ell_min', 'ell_max'], {}), '(ell_min, ell_max)\n', (18724, 18742), True, 'import spherical_functions as sf\n'), ((21123, 21134), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (21129, 21134), True, 'import numpy as np\n'), ((4456, 4492), 'numpy.linspace', 'np.linspace', (['begin', 'end'], {'num': 'n_times'}), '(begin, end, num=n_times)\n', (4467, 4492), True, 'import numpy as np\n'), ((6292, 6328), 'numpy.linspace', 'np.linspace', (['begin', 'end'], {'num': 'n_times'}), '(begin, end, num=n_times)\n', (6303, 6328), True, 'import numpy as np\n'), ((6905, 6937), 'numpy.exp', 'np.exp', (['(axis * (omega * t_i / 2))'], {}), '(axis * (omega * t_i / 2))\n', (6911, 6937), True, 'import numpy as np\n'), ((20020, 20055), 'numpy.exp', 'np.exp', (['(-(t[i0:] - t_merger) / 11.5)'], {}), '(-(t[i0:] - t_merger) / 11.5)\n', (20026, 20055), True, 'import numpy as np\n'), ((28335, 28365), 'h5py.special_dtype', 'h5py.special_dtype', ([], {'vlen': 'bytes'}), '(vlen=bytes)\n', (28353, 28365), False, 'import h5py\n'), ((28955, 29005), 'numpy.vstack', 'np.vstack', (['(all_times, [coord_radii[i]] * n_times)'], {}), '((all_times, [coord_radii[i]] * n_times))\n', (28964, 29005), True, 'import numpy as np\n'), ((29907, 29952), 'numpy.vstack', 'np.vstack', (['(all_times, [avg_lapse] * n_times)'], {}), '((all_times, [avg_lapse] * n_times))\n', (29916, 29952), True, 'import numpy as np\n'), ((1773, 1804), 'pprint.pformat', 'pprint.pformat', (['kwargs'], {'width': '(1)'}), '(kwargs, width=1)\n', (1787, 1804), False, 'import pprint\n'), ((2540, 2571), 'pprint.pformat', 'pprint.pformat', (['kwargs'], {'width': '(1)'}), '(kwargs, width=1)\n', (2554, 2571), False, 'import pprint\n'), ((3229, 3260), 'pprint.pformat', 'pprint.pformat', (['kwargs'], {'width': '(1)'}), '(kwargs, width=1)\n', (3243, 3260), False, 'import pprint\n'), ((4739, 4770), 'pprint.pformat', 'pprint.pformat', (['kwargs'], {'width': '(1)'}), '(kwargs, width=1)\n', (4753, 4770), False, 'import pprint\n'), ((4821, 4874), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(tin.shape[0], LM.shape[0], 2)'}), '(size=(tin.shape[0], LM.shape[0], 2))\n', (4837, 4874), True, 'import numpy as np\n'), ((6575, 6606), 'pprint.pformat', 'pprint.pformat', (['kwargs'], {'width': '(1)'}), '(kwargs, width=1)\n', (6589, 6606), False, 'import pprint\n'), ((9396, 9427), 'pprint.pformat', 'pprint.pformat', (['kwargs'], {'width': '(1)'}), '(kwargs, width=1)\n', (9410, 9427), False, 'import pprint\n'), ((11722, 11753), 'pprint.pformat', 'pprint.pformat', (['kwargs'], {'width': '(1)'}), '(kwargs, width=1)\n', (11736, 11753), False, 'import pprint\n'), ((14990, 15021), 'pprint.pformat', 'pprint.pformat', (['kwargs'], {'width': '(1)'}), '(kwargs, width=1)\n', (15004, 15021), False, 'import pprint\n'), ((21245, 21273), 'spherical_functions.LM_index', 'sf.LM_index', (['ell', 'm', 'ell_min'], {}), '(ell, m, ell_min)\n', (21256, 21273), True, 'import spherical_functions as sf\n'), ((23738, 23756), 'numpy.sqrt', 'np.sqrt', (['(np.pi / 5)'], {}), '(np.pi / 5)\n', (23745, 23756), True, 'import numpy as np\n'), ((27516, 27558), 'numpy.linspace', 'np.linspace', (['(1 / r_min)', '(1 / r_max)', 'n_radii'], {}), '(1 / r_min, 1 / r_max, n_radii)\n', (27527, 27558), True, 'import numpy as np\n'), ((30247, 30287), 'numpy.log', 'np.log', (['(R / (2 * initial_adm_energy) - 1)'], {}), '(R / (2 * initial_adm_energy) - 1)\n', (30253, 30287), True, 'import numpy as np\n'), ((30418, 30457), 'numpy.sqrt', 'np.sqrt', (['(1 - 2 * initial_adm_energy / R)'], {}), '(1 - 2 * initial_adm_energy / R)\n', (30425, 30457), True, 'import numpy as np\n'), ((30585, 30605), 'spherical_functions.LM_index', 'sf.LM_index', (['l', 'm', '(2)'], {}), '(l, m, 2)\n', (30596, 30605), True, 'import spherical_functions as sf\n'), ((6671, 6710), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(LM.shape[0], 2)'}), '(size=(LM.shape[0], 2))\n', (6687, 6710), True, 'import numpy as np\n'), ((6785, 6817), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': '(3)'}), '(-1, 1, size=3)\n', (6802, 6817), True, 'import numpy as np\n'), ((14666, 14708), 'spherical_functions.Wigner3j', 'sf.Wigner3j', (['ellpp', 'ell', 'ellp', 'mpp', 'm', '(-mp)'], {}), '(ellpp, ell, ellp, mpp, m, -mp)\n', (14677, 14708), True, 'import spherical_functions as sf\n'), ((19200, 19215), 'numpy.isnan', 'np.isnan', (['omega'], {}), '(omega)\n', (19208, 19215), True, 'import numpy as np\n'), ((21362, 21372), 'numpy.sign', 'np.sign', (['m'], {}), '(m)\n', (21369, 21372), True, 'import numpy as np\n'), ((30864, 30902), 'scipy.interpolate.CubicSpline', 'CubicSpline', (['simulation_time', 'new_data'], {}), '(simulation_time, new_data)\n', (30875, 30902), False, 'from scipy.interpolate import CubicSpline\n'), ((31111, 31163), 'numpy.vstack', 'np.vstack', (['(all_times, new_data.real, new_data.imag)'], {}), '((all_times, new_data.real, new_data.imag))\n', (31120, 31163), True, 'import numpy as np\n'), ((14600, 14639), 'spherical_functions.Wigner3j', 'sf.Wigner3j', (['ellpp', 'ell', 'ellp', '(0)', '(-s)', 's'], {}), '(ellpp, ell, ellp, 0, -s, s)\n', (14611, 14639), True, 'import spherical_functions as sf\n'), ((14840, 14870), 'spherical_functions.LM_index', 'sf.LM_index', (['ellp', 'mp', 'ell_min'], {}), '(ellp, mp, ell_min)\n', (14851, 14870), True, 'import spherical_functions as sf\n'), ((4977, 5004), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', '(4)'], {}), '(-1, 1, 4)\n', (4994, 5004), True, 'import numpy as np\n'), ((14496, 14571), 'math.sqrt', 'math.sqrt', (['((2 * ellpp + 1) * (2 * ell + 1) * (2 * ellp + 1) / (4 * math.pi))'], {}), '((2 * ellpp + 1) * (2 * ell + 1) * (2 * ellp + 1) / (4 * math.pi))\n', (14505, 14571), False, 'import math\n'), ((22787, 22810), 'scipy.special.factorial2', 'factorial2', (['(2 * ell - 1)'], {}), '(2 * ell - 1)\n', (22797, 22810), False, 'from scipy.special import factorial, factorial2\n'), ((30764, 30813), 'numpy.exp', 'np.exp', (['(1.0j * n * 50 * np.pi / h0.n_times * h0.t)'], {}), '(1.0j * n * 50 * np.pi / h0.n_times * h0.t)\n', (30770, 30813), True, 'import numpy as np\n'), ((22759, 22784), 'scipy.special.factorial', 'factorial', (['((ell - m) // 2)'], {}), '((ell - m) // 2)\n', (22768, 22784), False, 'from scipy.special import factorial, factorial2\n'), ((22915, 22933), 'scipy.special.factorial', 'factorial', (['(ell - m)'], {}), '(ell - m)\n', (22924, 22933), False, 'from scipy.special import factorial, factorial2\n'), ((23370, 23393), 'scipy.special.factorial2', 'factorial2', (['(2 * ell + 1)'], {}), '(2 * ell + 1)\n', (23380, 23393), False, 'from scipy.special import factorial, factorial2\n'), ((22731, 22756), 'scipy.special.factorial', 'factorial', (['((ell + m) // 2)'], {}), '((ell + m) // 2)\n', (22740, 22756), False, 'from scipy.special import factorial, factorial2\n'), ((22894, 22912), 'scipy.special.factorial', 'factorial', (['(ell + m)'], {}), '(ell + m)\n', (22903, 22912), False, 'from scipy.special import factorial, factorial2\n'), ((23318, 23347), 'scipy.special.factorial', 'factorial', (['((ell - m - 1) // 2)'], {}), '((ell - m - 1) // 2)\n', (23327, 23347), False, 'from scipy.special import factorial, factorial2\n'), ((23519, 23537), 'scipy.special.factorial', 'factorial', (['(ell - m)'], {}), '(ell - m)\n', (23528, 23537), False, 'from scipy.special import factorial, factorial2\n'), ((23266, 23295), 'scipy.special.factorial', 'factorial', (['((ell + m - 1) // 2)'], {}), '((ell + m - 1) // 2)\n', (23275, 23295), False, 'from scipy.special import factorial, factorial2\n'), ((23498, 23516), 'scipy.special.factorial', 'factorial', (['(ell + m)'], {}), '(ell + m)\n', (23507, 23516), False, 'from scipy.special import factorial, factorial2\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
" Location Head."
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.init import kaiming_uniform, normal
from alphastarmini.lib.hyper_parameters import Arch_Hyper_Parameters as AHP
from alphastarmini.lib.hyper_parameters import MiniStar_Arch_Hyper_Parameters as MAHP
from alphastarmini.lib.hyper_parameters import StarCraft_Hyper_Parameters as SCHP
from alphastarmini.lib.hyper_parameters import Scalar_Feature_Size as SFS
from alphastarmini.lib import utils as L
__author__ = "<NAME>"
debug = False
class ResBlockFiLM(nn.Module):
# some copy from https://github.com/rosinality/film-pytorch/blob/master/model.py
def __init__(self, filter_size):
super().__init__()
self.conv1 = nn.Conv2d(filter_size, filter_size, kernel_size=[1, 1], stride=1, padding=0)
self.conv2 = nn.Conv2d(filter_size, filter_size, kernel_size=[3, 3], stride=1, padding=1, bias=False)
self.bn = nn.BatchNorm2d(filter_size, affine=False)
self.reset()
def forward(self, x, gamma, beta):
out = self.conv1(x)
resid = F.relu(out)
out = self.conv2(resid)
out = self.bn(out)
gamma = gamma.unsqueeze(2).unsqueeze(3)
beta = beta.unsqueeze(2).unsqueeze(3)
out = gamma * out + beta
out = F.relu(out)
out = out + resid
return out
def reset(self):
# deprecated, should try to find others
# kaiming_uniform(self.conv1.weight)
# self.conv1.bias.data.zero_()
# kaiming_uniform(self.conv2.weight)
pass
class FiLM(nn.Module):
# some copy from https://github.com/rosinality/film-pytorch/blob/master/model.py
def __init__(self, n_resblock=4, conv_hidden=128, gate_size=1024):
super().__init__()
self.n_resblock = n_resblock
self.conv_hidden = conv_hidden
self.resblocks = nn.ModuleList()
for i in range(n_resblock):
self.resblocks.append(ResBlockFiLM(conv_hidden))
self.film_net = nn.Linear(gate_size, conv_hidden * 2 * n_resblock)
def reset(self):
# deprecated, should try to find others
# kaiming_uniform(self.film_net.weight)
# self.film_net.bias.data.zero_()
pass
def forward(self, x, gate):
out = x
film = self.film_net(gate).chunk(self.n_resblock * 2, 1)
for i, resblock in enumerate(self.resblocks):
out = resblock(out, film[i * 2], film[i * 2 + 1])
return out
class FiLMplusMapSkip(nn.Module):
# Thanks mostly from https://github.com/metataro/sc2_imitation_learning in spatial_decoder
def __init__(self, n_resblock=4, conv_hidden=128, gate_size=1024):
super().__init__()
self.n_resblock = n_resblock
self.conv_hidden = conv_hidden
self.resblocks = nn.ModuleList()
for i in range(n_resblock):
self.resblocks.append(ResBlockFiLM(conv_hidden))
self.film_net = nn.Linear(gate_size, conv_hidden * 2 * n_resblock)
def reset(self):
# deprecated, should try to find others
# kaiming_uniform(self.film_net.weight)
# self.film_net.bias.data.zero_()
pass
def forward(self, x, gate, map_skip):
out = x
film = self.film_net(gate).chunk(self.n_resblock * 2, 1)
for i, resblock in enumerate(self.resblocks):
out = resblock(out, film[i * 2], film[i * 2 + 1])
out = out + map_skip[i]
# TODO: should we add a relu?
return out
class LocationHead(nn.Module):
'''
Inputs: autoregressive_embedding, action_type, map_skip
Outputs:
target_location_logits - The logits corresponding to the probabilities of targeting each location
target_location - The sampled target location
'''
def __init__(self, autoregressive_embedding_size=AHP.autoregressive_embedding_size,
output_map_size=SCHP.world_size, is_sl_training=True,
max_map_channels=AHP.location_head_max_map_channels,
temperature=0.8):
super().__init__()
self.use_improved_one = True
self.is_sl_training = is_sl_training
if not self.is_sl_training:
self.temperature = temperature
else:
self.temperature = 1.0
mmc = max_map_channels
self.ds_1 = nn.Conv2d(mmc + 4, mmc, kernel_size=1, stride=1,
padding=0, bias=True)
self.film_blocks_num = 4
if not self.use_improved_one:
self.film_net = FiLM(n_resblock=self.film_blocks_num,
conv_hidden=mmc,
gate_size=autoregressive_embedding_size)
else:
self.film_net_mapskip = FiLMplusMapSkip(n_resblock=self.film_blocks_num,
conv_hidden=mmc,
gate_size=autoregressive_embedding_size)
self.us_1 = nn.ConvTranspose2d(mmc, int(mmc / 2), kernel_size=4, stride=2,
padding=1, bias=True)
self.us_2 = nn.ConvTranspose2d(int(mmc / 2), int(mmc / 4),
kernel_size=4, stride=2,
padding=1, bias=True)
self.us_3 = nn.ConvTranspose2d(int(mmc / 4), int(mmc / 8),
kernel_size=4, stride=2,
padding=1, bias=True)
self.us_4 = nn.ConvTranspose2d(int(mmc / 8), int(mmc / 16),
kernel_size=4, stride=2,
padding=1, bias=True)
self.us_4_original = nn.ConvTranspose2d(int(mmc / 8), 1,
kernel_size=4, stride=2,
padding=1, bias=True)
# note: in mAS, we add a upsampling layer to transfer from 8x8 to 256x256
self.us_5 = nn.ConvTranspose2d(int(mmc / 16), 1, kernel_size=4, stride=2,
padding=1, bias=True)
self.output_map_size = output_map_size
self.softmax = nn.Softmax(dim=-1)
def forward(self, autoregressive_embedding, action_type, map_skip):
'''
Inputs:
autoregressive_embedding: [batch_size x autoregressive_embedding_size]
action_type: [batch_size x 1]
map_skip: [batch_size x channel x height x width]
Output:
target_location_logits: [batch_size x self.output_map_size x self.output_map_size]
location_out: [batch_size x 2 (x and y)]
'''
# AlphaStar: `autoregressive_embedding` is reshaped to have the same height/width as the final skip in `map_skip`
# AlphaStar: (which was just before map information was reshaped to a 1D embedding) with 4 channels
# sc2_imitation_learning: map_skip = list(reversed(map_skip))
# sc2_imitation_learning: inputs, map_skip = map_skip[0], map_skip[1:]
map_skip = list(reversed(map_skip))
x, map_skip = map_skip[0], map_skip[1:]
print("x.shape:", map_skip.shape) if debug else None
batch_size = x.shape[0]
assert autoregressive_embedding.shape[0] == action_type.shape[0]
assert autoregressive_embedding.shape[0] == x.shape[0]
reshap_size = x.shape[-1]
reshape_channels = int(AHP.autoregressive_embedding_size / (reshap_size * reshap_size))
print("autoregressive_embedding.shape:", autoregressive_embedding.shape) if debug else None
ar_map = autoregressive_embedding.reshape(batch_size, -1, reshap_size, reshap_size)
print("ar_map.shape:", ar_map.shape) if debug else None
# AlphaStar: and the two are concatenated together along the channel dimension,
# map skip shape: (-1, 128, 16, 16)
# x shape: (-1, 132, 16, 16)
x = torch.cat([ar_map, x], dim=1)
print("x.shape:", x.shape) if debug else None
# AlphaStar: passed through a ReLU,
# AlphaStar: passed through a 2D convolution with 128 channels and kernel size 1,
# AlphaStar: then passed through another ReLU.
x = F.relu(self.ds_1(F.relu(x)))
if not self.use_improved_one:
# AlphaStar: The 3D tensor (height, width, and channels) is then passed through a series of Gated ResBlocks
# AlphaStar: with 128 channels, kernel size 3, and FiLM, gated on `autoregressive_embedding`
# note: FilM is Feature-wise Linear Modulation, please see the paper "FiLM: Visual Reasoning with
# a General Conditioning Layer"
# in here we use 4 Gated ResBlocks, and the value can be changed
x = self.film_net(x, gate=autoregressive_embedding)
# x shape (-1, 128, 16, 16)
# AlphaStar: and using the elements of `map_skip` in order of last ResBlock skip to first.
x = x + map_skip
else:
# Referenced mostly from "sc2_imitation_learning" project in spatial_decoder
assert len(map_skip) == self.film_blocks_num
# use the new FiLMplusMapSkip class
x = self.film_net_mapskip(x, gate=autoregressive_embedding,
map_skip=map_skip)
# Compared to AS, we a relu, referred from "sc2_imitation_learning"
x = F.relu(x)
# AlphaStar: Afterwards, it is upsampled 2x by each of a series of transposed 2D convolutions
# AlphaStar: with kernel size 4 and channel sizes 128, 64, 16, and 1 respectively
# AlphaStar: (upsampled beyond the 128x128 input to 256x256 target location selection).
x = F.relu(self.us_1(x))
x = F.relu(self.us_2(x))
x = F.relu(self.us_3(x))
if AHP == MAHP:
x = F.relu(self.us_4(x))
# only in mAS, we need one more upsample step
# x = F.relu(self.us_5(x))
# Note: in the final layer, we don't use relu
x = self.us_5(x)
else:
x = self.us_4_original(x)
# AlphaStar: Those final logits are flattened and sampled (masking out invalid locations using `action_type`,
# AlphaStar: such as those outside the camera for build actions) with temperature 0.8
# AlphaStar: to get the actual target position.
# x shape: (-1, 1, 256, 256)
print('x.shape:', x.shape) if debug else None
y = x.reshape(batch_size, 1 * self.output_map_size * self.output_map_size)
device = next(self.parameters()).device
print("y:", y) if debug else None
print("y_.shape:", y.shape) if debug else None
target_location_logits = y.div(self.temperature)
print("target_location_logits:", target_location_logits) if debug else None
print("target_location_logits.shape:", target_location_logits.shape) if debug else None
# AlphaStar: (masking out invalid locations using `action_type`, such as those outside
# the camera for build actions)
# TODO: use action to decide the mask
if True:
# referenced from lib/utils.py function of masked_softmax()
mask = torch.zeros(batch_size, 1 * self.output_map_size * self.output_map_size, device=device)
mask = L.get_location_mask(mask)
mask_fill_value = -1e32 # a very small number
masked_vector = target_location_logits.masked_fill((1 - mask).bool(), mask_fill_value)
target_location_probs = self.softmax(masked_vector)
else:
target_location_probs = self.softmax(target_location_logits)
location_id = torch.multinomial(target_location_probs, num_samples=1, replacement=True)
print("location_id:", location_id) if debug else None
print("location_id.shape:", location_id.shape) if debug else None
location_out = location_id.squeeze(-1).cpu().numpy().tolist()
print("location_out:", location_out) if debug else None
# print("location_out.shape:", location_out.shape) if debug else None
for i, idx in enumerate(location_id):
row_number = idx // self.output_map_size
col_number = idx - self.output_map_size * row_number
target_location_y = row_number
target_location_x = col_number
print("target_location_y, target_location_x", target_location_y, target_location_x) if debug else None
# note! sc2 and pysc2 all accept the position as [x, y], so x be the first, y be the last!
# this is not right : location_out[i] = [target_location_y.item(), target_location_x.item()]
# below is right! so the location point map to the point in the matrix!
location_out[i] = [target_location_x.item(), target_location_y.item()]
# AlphaStar: If `action_type` does not involve targetting location, this head is ignored.
target_location_mask = L.action_involve_targeting_location_mask(action_type)
# target_location_mask: [batch_size x 1]
print("target_location_mask:", target_location_mask) if debug else None
print("location_out:", location_out) if debug else None
location_out = np.array(location_out)
print("location_out:", location_out) if debug else None
location_out = torch.tensor(location_out, device=device)
print("location_out:", location_out) if debug else None
print("location_out.shape:", location_out.shape) if debug else None
target_location_logits = target_location_logits.reshape(-1, self.output_map_size, self.output_map_size)
target_location_logits = target_location_logits * target_location_mask.float().unsqueeze(-1)
location_out = location_out * target_location_mask.long()
location_out = location_out
return target_location_logits, location_out
def test():
batch_size = 2
autoregressive_embedding = torch.randn(batch_size, AHP.autoregressive_embedding_size)
action_type_sample = 65 # func: 65/Effect_PsiStorm_pt (1/queued [2]; 2/unit_tags [512]; 0/world [0, 0])
action_type = torch.randint(low=0, high=SFS.available_actions, size=(batch_size, 1))
map_skip = []
if AHP == MAHP:
for i in range(5):
map_skip.append(torch.randn(batch_size, AHP.location_head_max_map_channels, 8, 8))
else:
for i in range(5):
map_skip.append(torch.randn(batch_size, AHP.location_head_max_map_channels, 16, 16))
location_head = LocationHead()
print("autoregressive_embedding:", autoregressive_embedding) if debug else None
print("autoregressive_embedding.shape:", autoregressive_embedding.shape) if 1 else None
target_location_logits, target_location = \
location_head.forward(autoregressive_embedding, action_type, map_skip)
if target_location_logits is not None:
print("target_location_logits:", target_location_logits) if debug else None
print("target_location_logits.shape:", target_location_logits.shape) if debug else None
else:
print("target_location_logits is None!")
if target_location is not None:
print("target_location:", target_location) if debug else None
# print("target_location.shape:", target_location.shape) if debug else None
else:
print("target_location is None!")
print("This is a test!") if debug else None
if __name__ == '__main__':
test()
| [
"torch.nn.BatchNorm2d",
"torch.multinomial",
"torch.nn.Softmax",
"torch.nn.ModuleList",
"alphastarmini.lib.utils.get_location_mask",
"torch.nn.Conv2d",
"torch.randint",
"numpy.array",
"torch.tensor",
"torch.nn.functional.relu",
"torch.nn.Linear",
"torch.zeros",
"alphastarmini.lib.utils.actio... | [((14470, 14528), 'torch.randn', 'torch.randn', (['batch_size', 'AHP.autoregressive_embedding_size'], {}), '(batch_size, AHP.autoregressive_embedding_size)\n', (14481, 14528), False, 'import torch\n'), ((14658, 14728), 'torch.randint', 'torch.randint', ([], {'low': '(0)', 'high': 'SFS.available_actions', 'size': '(batch_size, 1)'}), '(low=0, high=SFS.available_actions, size=(batch_size, 1))\n', (14671, 14728), False, 'import torch\n'), ((837, 913), 'torch.nn.Conv2d', 'nn.Conv2d', (['filter_size', 'filter_size'], {'kernel_size': '[1, 1]', 'stride': '(1)', 'padding': '(0)'}), '(filter_size, filter_size, kernel_size=[1, 1], stride=1, padding=0)\n', (846, 913), True, 'import torch.nn as nn\n'), ((936, 1028), 'torch.nn.Conv2d', 'nn.Conv2d', (['filter_size', 'filter_size'], {'kernel_size': '[3, 3]', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(filter_size, filter_size, kernel_size=[3, 3], stride=1, padding=1,\n bias=False)\n', (945, 1028), True, 'import torch.nn as nn\n'), ((1044, 1085), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['filter_size'], {'affine': '(False)'}), '(filter_size, affine=False)\n', (1058, 1085), True, 'import torch.nn as nn\n'), ((1198, 1209), 'torch.nn.functional.relu', 'F.relu', (['out'], {}), '(out)\n', (1204, 1209), True, 'import torch.nn.functional as F\n'), ((1422, 1433), 'torch.nn.functional.relu', 'F.relu', (['out'], {}), '(out)\n', (1428, 1433), True, 'import torch.nn.functional as F\n'), ((2022, 2037), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (2035, 2037), True, 'import torch.nn as nn\n'), ((2164, 2214), 'torch.nn.Linear', 'nn.Linear', (['gate_size', '(conv_hidden * 2 * n_resblock)'], {}), '(gate_size, conv_hidden * 2 * n_resblock)\n', (2173, 2214), True, 'import torch.nn as nn\n'), ((2995, 3010), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (3008, 3010), True, 'import torch.nn as nn\n'), ((3137, 3187), 'torch.nn.Linear', 'nn.Linear', (['gate_size', '(conv_hidden * 2 * n_resblock)'], {}), '(gate_size, conv_hidden * 2 * n_resblock)\n', (3146, 3187), True, 'import torch.nn as nn\n'), ((4579, 4649), 'torch.nn.Conv2d', 'nn.Conv2d', (['(mmc + 4)', 'mmc'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(True)'}), '(mmc + 4, mmc, kernel_size=1, stride=1, padding=0, bias=True)\n', (4588, 4649), True, 'import torch.nn as nn\n'), ((6489, 6507), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (6499, 6507), True, 'import torch.nn as nn\n'), ((8286, 8315), 'torch.cat', 'torch.cat', (['[ar_map, x]'], {'dim': '(1)'}), '([ar_map, x], dim=1)\n', (8295, 8315), False, 'import torch\n'), ((12137, 12210), 'torch.multinomial', 'torch.multinomial', (['target_location_probs'], {'num_samples': '(1)', 'replacement': '(True)'}), '(target_location_probs, num_samples=1, replacement=True)\n', (12154, 12210), False, 'import torch\n'), ((13453, 13506), 'alphastarmini.lib.utils.action_involve_targeting_location_mask', 'L.action_involve_targeting_location_mask', (['action_type'], {}), '(action_type)\n', (13493, 13506), True, 'from alphastarmini.lib import utils as L\n'), ((13729, 13751), 'numpy.array', 'np.array', (['location_out'], {}), '(location_out)\n', (13737, 13751), True, 'import numpy as np\n'), ((13841, 13882), 'torch.tensor', 'torch.tensor', (['location_out'], {'device': 'device'}), '(location_out, device=device)\n', (13853, 13882), False, 'import torch\n'), ((9806, 9815), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (9812, 9815), True, 'import torch.nn.functional as F\n'), ((11664, 11755), 'torch.zeros', 'torch.zeros', (['batch_size', '(1 * self.output_map_size * self.output_map_size)'], {'device': 'device'}), '(batch_size, 1 * self.output_map_size * self.output_map_size,\n device=device)\n', (11675, 11755), False, 'import torch\n'), ((11772, 11797), 'alphastarmini.lib.utils.get_location_mask', 'L.get_location_mask', (['mask'], {}), '(mask)\n', (11791, 11797), True, 'from alphastarmini.lib import utils as L\n'), ((8600, 8609), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (8606, 8609), True, 'import torch.nn.functional as F\n'), ((14828, 14893), 'torch.randn', 'torch.randn', (['batch_size', 'AHP.location_head_max_map_channels', '(8)', '(8)'], {}), '(batch_size, AHP.location_head_max_map_channels, 8, 8)\n', (14839, 14893), False, 'import torch\n'), ((14963, 15030), 'torch.randn', 'torch.randn', (['batch_size', 'AHP.location_head_max_map_channels', '(16)', '(16)'], {}), '(batch_size, AHP.location_head_max_map_channels, 16, 16)\n', (14974, 15030), False, 'import torch\n')] |
import unittest
import numpy as np
import tensorflow as tf
import twodlearn as tdl
class ConstrainedTest(unittest.TestCase):
def test_positive_variable(self):
with tf.Session().as_default():
test = tdl.constrained.PositiveVariableExp(
initial_value=lambda:
tf.exp(tf.truncated_normal_initializer()(shape=[5, 5])))
test.initializer.run()
x1 = test.value.eval()
test.initializer.run()
x2 = test.value.eval()
with self.assertRaises(AssertionError):
assert np.testing.assert_almost_equal(x1, x2)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"numpy.testing.assert_almost_equal",
"tensorflow.Session",
"tensorflow.truncated_normal_initializer"
] | [((661, 676), 'unittest.main', 'unittest.main', ([], {}), '()\n', (674, 676), False, 'import unittest\n'), ((589, 627), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['x1', 'x2'], {}), '(x1, x2)\n', (619, 627), True, 'import numpy as np\n'), ((178, 190), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (188, 190), True, 'import tensorflow as tf\n'), ((322, 355), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {}), '()\n', (353, 355), True, 'import tensorflow as tf\n')] |
import numpy as np
import wave
import struct
def note( pitch, beat ):
fs = 44000
amplitude = 30000
frequency = np.array( [ 261.6, 293.7, 329.6, 349.2, 392.0, 440.0, 493.9 ] )
num_samples = beat * fs
t = np.linspace( 0, beat, num_samples, endpoint = False )
a = np.linspace( 0, 1, num_samples, endpoint = False )
x = amplitude * a * np.cos( 2 * np.pi * frequency[ pitch - 1 ] * t )
return x
def main():
file = "little_bee.wav" # 檔案名稱
pitches = np.array( [ 5, 3, 3, 4, 2, 2, 1, 2, 3, 4, 5, 5, 5, \
5, 3, 3, 4, 2, 2, 1, 3, 5, 5, 3, \
2, 2, 2, 2, 2, 3, 4, 3, 3, 3, 3, 3, 4, 5, \
5, 3, 3, 4, 2, 2, 1, 3, 5, 5, 1 ] )
beats = np.array( [ 1, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 2, \
1, 1, 2, 1, 1, 2, 1, 1, 1, 1, 4, \
1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 2, \
1, 1, 2, 1, 1, 2, 1, 1, 1, 1, 4 ] )
tempo = 0.5 # 節奏(每拍0.5秒)
fs = 44000
duration = sum( beats ) * tempo
num_samples = int( duration * fs )
num_channels = 1 # 通道數
samwidth = 2 # 樣本寬度
num_frames = num_samples # 音框數 = 樣本數
comptype = "NONE" # 壓縮型態
compname = "not compressed" # 無壓縮
num_notes = np.size( pitches )
y = np.array( [ ] )
for i in range( num_notes ):
x = note( pitches[i], beats[i] * tempo )
y = np.append( y, x )
wav_file = wave.open( file, 'w' )
wav_file.setparams(( num_channels, samwidth, fs, num_frames, comptype, compname ))
for s in y:
wav_file.writeframes( struct.pack( 'h', int( s ) ) )
wav_file.close( )
main() | [
"wave.open",
"numpy.size",
"numpy.append",
"numpy.array",
"numpy.linspace",
"numpy.cos"
] | [((115, 174), 'numpy.array', 'np.array', (['[261.6, 293.7, 329.6, 349.2, 392.0, 440.0, 493.9]'], {}), '([261.6, 293.7, 329.6, 349.2, 392.0, 440.0, 493.9])\n', (123, 174), True, 'import numpy as np\n'), ((209, 258), 'numpy.linspace', 'np.linspace', (['(0)', 'beat', 'num_samples'], {'endpoint': '(False)'}), '(0, beat, num_samples, endpoint=False)\n', (220, 258), True, 'import numpy as np\n'), ((268, 314), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'num_samples'], {'endpoint': '(False)'}), '(0, 1, num_samples, endpoint=False)\n', (279, 314), True, 'import numpy as np\n'), ((456, 621), 'numpy.array', 'np.array', (['[5, 3, 3, 4, 2, 2, 1, 2, 3, 4, 5, 5, 5, 5, 3, 3, 4, 2, 2, 1, 3, 5, 5, 3, 2,\n 2, 2, 2, 2, 3, 4, 3, 3, 3, 3, 3, 4, 5, 5, 3, 3, 4, 2, 2, 1, 3, 5, 5, 1]'], {}), '([5, 3, 3, 4, 2, 2, 1, 2, 3, 4, 5, 5, 5, 5, 3, 3, 4, 2, 2, 1, 3, 5,\n 5, 3, 2, 2, 2, 2, 2, 3, 4, 3, 3, 3, 3, 3, 4, 5, 5, 3, 3, 4, 2, 2, 1, 3,\n 5, 5, 1])\n', (464, 621), True, 'import numpy as np\n'), ((673, 838), 'numpy.array', 'np.array', (['[1, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2, 1, 1, 2, 1, 1, 1, 1, 4, 1,\n 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2, 1, 1, 2, 1, 1, 1, 1, 4]'], {}), '([1, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2, 1, 1, 2, 1, 1, 1,\n 1, 4, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2, 1, 1, 2, 1, 1,\n 1, 1, 4])\n', (681, 838), True, 'import numpy as np\n'), ((1153, 1169), 'numpy.size', 'np.size', (['pitches'], {}), '(pitches)\n', (1160, 1169), True, 'import numpy as np\n'), ((1178, 1190), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1186, 1190), True, 'import numpy as np\n'), ((1304, 1324), 'wave.open', 'wave.open', (['file', '"""w"""'], {}), "(file, 'w')\n", (1313, 1324), False, 'import wave\n'), ((340, 384), 'numpy.cos', 'np.cos', (['(2 * np.pi * frequency[pitch - 1] * t)'], {}), '(2 * np.pi * frequency[pitch - 1] * t)\n', (346, 384), True, 'import numpy as np\n'), ((1273, 1288), 'numpy.append', 'np.append', (['y', 'x'], {}), '(y, x)\n', (1282, 1288), True, 'import numpy as np\n')] |
import sys
import queue
import random
import time
import copy
import numpy as np
from multiprocessing import Process,Queue
file_path=sys.argv[1]
termin_time=sys.argv[3]
random_seed=sys.argv[5]
start=time.time()
random.seed(random_seed)
f=open(file_path,encoding='utf-8')
sentimentlist = []
for line in f:
s = line.strip().split('\t')
slist=s[0].split()
sentimentlist.append(slist)
f.close()
vertices=0
depot=0
required=0
non_required=0
vehicles=0
capacity=0
total_cost=0
edge_list=[]
for i in sentimentlist:
if i[0]=='VERTICES':
vertices=int(i[2])
elif i[0]=='DEPOT':
depot=int(i[2])
elif i[0]=='REQUIRED':
required=int(i[3])
elif i[0]=='NON-REQUIRED':
non_required=int(i[3])
elif i[0]=='VEHICLES':
vehicles=int(i[2])
elif i[0]=='CAPACITY':
capacity=int(i[2])
elif i[0]=='TOTAL':
total_cost=int(i[6])
elif str.isdigit(i[0]):
edge_list.append(i)
class Node(object):
def __init__(self, dis, index):
self.dis = dis
self.index = index
def __lt__(self, other):
if self.dis!=other.dis:
return self.dis < other.dis
elif self.dis==other.dis:
p=np.random.rand()
if p>0.5 :return True
else: return False
class Edge(object):
def __init__(self, s, t ,c ,d ):
self.s = s
self.t = t
self.c = c
self.d = d
def __lt__(self, other):
if self.d!=other.d:
return self.d < other.d
elif self.d==other.d:
p=np.random.rand()
if p>0.5 :return True
else: return False
class Individual(object):
def __init__(self, gene,q):
self.gene=gene
self.q=q
def __lt__(self, other):
return self.q < other.q
class Graph:
def __init__(self,n_vertices,depot,required,non_required,vehicles,capacity,total_cost,edge_list):
self._n_vertices = n_vertices
self._depot=depot
self._required=required
self._non_required=non_required
self._vehicles=vehicles
self._capacity=capacity
self._total_cost=total_cost
self._edge_list=edge_list
self._all_distance= [[0 for _ in range(n_vertices+1)] for _ in range(n_vertices+1)]
self._adj = [[] for _ in range(n_vertices+1)]
self.cost_dic={}
self.demand_dic={}
self.task_dic={}
self.id_dic={}
idcounter=1
for i in self._edge_list:
s=int(i[0])
t=int(i[1])
c=int(i[2])
d=int(i[3])
self.add_edge(s,t)
self.add_edge(t,s)
self.cost_dic[(s,t)]=c
self.cost_dic[(t,s)]=c
self.demand_dic[(s,t)]=d
self.demand_dic[(t,s)]=d
self.task_dic[idcounter]=(s,t)
self.task_dic[-idcounter]=(t,s)
self.id_dic[(s,t)]=idcounter
self.id_dic[(t,s)]=-idcounter
idcounter+=1
# for i in range(1,n_vertices+1):
# for j in range(1,n_vertices+1):
# self._all_distance[i][j]=self.dijkstra(i, j)
for i in range(1,n_vertices+1):
for j in range(1,n_vertices+1):
if (i,j) in self.cost_dic:
self._all_distance[i][j]=self.cost_dic[(i,j)]
elif i==j:
self._all_distance[i][j]=0
else:
self._all_distance[i][j]=10000000000000
for k in range(1,n_vertices+1):
for i in range(1,n_vertices+1):
for j in range(1,n_vertices+1):
if self._all_distance[i][j]> self._all_distance[i][k]+self._all_distance[k][j]:
self._all_distance[i][j]=self._all_distance[i][k]+self._all_distance[k][j]
# print(time.time()-start)
self.tasklist=[]
self.tasklist2=[]
for i in self._edge_list:
s=int(i[0])
t=int(i[1])
c=int(i[2])
d=int(i[3])
if d!=0:
self.tasklist.append(Edge(s,t,c,d))
self.tasklist2.append([s,t,c,d])
def add_edge(self, s, t):
self._adj[s].append(t)
def dijkstra(self, s ,t):
S=set()
visit=set()
disdic={}
pq = queue.PriorityQueue()
for i in range(1,self._n_vertices+1):
if i !=s:
disdic[i]=1000000000000
pq.put_nowait(Node(1000000000000,i))
else:
disdic[i]=0
pq.put_nowait(Node(0,i))
while not pq.empty():
u = pq.get()
u_index=u.index
if u_index not in visit:
if u_index==t:
return u.dis
visit.add(u_index)
for i in self._adj[u_index]:
if disdic[u_index]+self.cost_dic[(u_index,i)] <disdic[i]:
pq.put_nowait(Node(disdic[u_index]+self.cost_dic[(u_index,i)],i))
disdic[i]=disdic[u_index]+self.cost_dic[(u_index,i)]
def finish_one_task(self,s,t):
cost_sum=0
cost_sum+=self._all_distance[self._depot][t]
cost_sum+=self._all_distance[self._depot][s]
cost_sum+=self.cost_dic[(s,t)]
return cost_sum
def gene_to_string(self,gene):
sline='s '
first=True
for i in gene:
if i==[]:
continue
first_task=True
for j in i:
# j=self.task_dic[j]
task=self.task_dic[j]
# task=j
if first:
addstr=f'0,({task[0]},{task[1]})'
sline=sline+addstr
first=False
first_task=False
else:
if first_task:
addstr=f',0,({task[0]},{task[1]})'
sline=sline+addstr
first_task=False
else:
addstr=f',({task[0]},{task[1]})'
sline=sline+addstr
addstr=f',0'
sline=sline+addstr
return sline
def gene_to_q(self,gene):
q=0
for i in gene:
now=self._depot
for j in i:
j=self.task_dic[j]
# if self._all_distance[j[0]][now]!=0:
# print(f'Dijkstra: go from {now} to {j[0]} and cost is {self._all_distance[j[0]][now]}')
# print(f'Cost: go from {j[0]} to {j[1]} and cost is {self.cost_dic[j]}')
q+=self._all_distance[j[0]][now]
q+=self.cost_dic[j]
now=j[1]
# if now!= self._depot:
# print(f'Dijkstra: go from {now} to {self._depot} and cost is {self._all_distance[now][self._depot]}')
q+=self._all_distance[now][self._depot]
# print('Next_Car')
return f'q {q}'
def get_q(self,gene):
q=0
for i in gene:
now=self._depot
for j in i:
j=self.task_dic[j]
# print(f'Dijkstra: go from {now} to {j[0]} and cost is {self._all_distance[j[0]][now]}')
# print(f'Cost: go from {j[0]} to {j[1]} and cost is {self.cost_dic[j]}')
q+=self._all_distance[j[0]][now]
q+=self.cost_dic[j]
now=j[1]
# if now!= self._depot:
# print(f'Dijkstra: go from {now} to {self._depot} and cost is {self._all_distance[now][self._depot]}')
q+=self._all_distance[now][self._depot]
# print('Next_Car')
return q
def gene_output(self,gene):
print(self.gene_to_string(gene[:]))
print(self.gene_to_q(gene[:]))
def get_gene(self):
tasklist=queue.PriorityQueue()
for i in self._edge_list:
s=int(i[0])
t=int(i[1])
c=int(i[2])
d=int(i[3])
if d!=0:
tasklist.put_nowait(Edge(s,t,c,d))
candidate=[]
route=[]
gene=[]
now=self._depot
task_sum=0
while not tasklist.empty():
while not tasklist.empty():
leastd=tasklist.get()
if leastd.d+task_sum<=self._capacity:
candidate.append(leastd)
else:
tasklist.put_nowait(leastd)
break
if len(candidate)==0:
task_sum=0
gene.append(route)
route=[]
now=self._depot
else:
min_distance=1000000000
min_list=[]
for i in range(len(candidate)):
taski=candidate[i]
disx=self._all_distance[taski.s][now]
disy=self._all_distance[taski.t][now]
if disx<min_distance :
min_list=[]
min_list.append((i,True))
min_distance=disx
elif disx==min_distance:
min_list.append((i,True))
min_distance=disx
if disy<min_distance:
min_list=[]
min_list.append((i,False))
min_distance=disy
elif disy==min_distance:
min_list.append((i,False))
min_distance=disy
k=random.randint(0,len(min_list)-1)
min_index=min_list[k][0]
min_s=min_list[k][1]
min_task=candidate.pop(min_index)
if not min_s:
temp=min_task.s
min_task.s=min_task.t
min_task.t=temp
for i in candidate:
tasklist.put_nowait(i)
candidate=[]
task_sum+=min_task.d
route.append(self.id_dic[(min_task.s,min_task.t)])
now=min_task.t
gene.append(route)
task_sum=0
now=self._depot
return gene
def get_gene2(self):
tasklist=copy.deepcopy(self.tasklist2)
route=[]
gene=[]
now=self._depot
task_sum=0
while len(tasklist)>0:
tasklist.sort(key = lambda x:min(graph._all_distance[now][x[0]],graph._all_distance[now][x[1]]))
min_list=[]
min_dis=min(self._all_distance[now][tasklist[0][0]],self._all_distance[now][tasklist[0][1]])
for i in tasklist:
if min(self._all_distance[now][i[0]],self._all_distance[now][i[1]])==min_dis and i[3]+task_sum<self._capacity :
min_list.append(i)
if min_list==[]:
task_sum=0
gene.append(route)
route=[]
now=self._depot
continue
np.random.shuffle(min_list)
min_task=min_list[0]
tasklist.remove(min_task)
task_sum+=min_task[3]
if self._all_distance[now][min_task[0]]<self._all_distance[now][min_task[1]]:
route.append(self.id_dic[(min_task[0],min_task[1])])
else:
route.append(self.id_dic[(min_task[1],min_task[0])])
now=min_task[1]
if now==self._depot:
task_sum=0
gene.append(route)
route=[]
gene.append(route)
task_sum=0
now=self._depot
return gene
def single_insertion(self,gene,p,k1,k2,k3):
routek=gene[k1]
if len(routek)>1:
pass
else:
k2=-1
if k2!=-1:
task_k_index=routek.pop(k2)
task_k=self.task_dic[task_k_index]
rp=random.random()
if rp < p:
if len(routek)==0:
insert_index=0
routek.append(task_k_index)
else:
insert_index=k3
if insert_index==0:
after=self.task_dic[routek[insert_index]]
disx=self._all_distance[self._depot][task_k[0]]+self.cost_dic[task_k]+self._all_distance[task_k[1]][after[0]]
disy=self._all_distance[self._depot][task_k[1]]+self.cost_dic[task_k]+self._all_distance[task_k[0]][after[0]]
if disx<disy:
routek.insert(insert_index,self.id_dic[task_k])
else:
routek.insert(insert_index,self.id_dic[(task_k[1],task_k[0])])
elif insert_index==(len(routek)-1):
before=self.task_dic[routek[insert_index]]
disx=self._all_distance[before[1]][task_k[0]]+self.cost_dic[task_k]+self._all_distance[task_k[1]][self._depot]
disy=self._all_distance[before[1]][task_k[1]]+self.cost_dic[task_k]+self._all_distance[task_k[0]][self._depot]
if disx<disy:
routek.append(self.id_dic[task_k])
else:
routek.append(self.id_dic[(task_k[1],task_k[0])])
else:
before=self.task_dic[routek[insert_index-1]]
after=self.task_dic[routek[insert_index]]
disx=self._all_distance[before[1]][task_k[0]]+self.cost_dic[task_k]+self._all_distance[task_k[1]][after[0]]
disy=self._all_distance[before[1]][task_k[1]]+self.cost_dic[task_k]+self._all_distance[task_k[0]][after[0]]
if disx<disy:
routek.insert(insert_index,self.id_dic[task_k])
else:
routek.insert(insert_index,self.id_dic[(task_k[1],task_k[0])])
else:
if routek==[]:
gene.pop(k1)
return gene
def double_insertion(self,gene,p,k1,k2,k3):
routek=gene[k1]
if len(routek)>2:
pass
else:
k2=-1
if k2!=-1:
task_k_index=routek.pop(k2)
task_k_index2=routek.pop(k2)
task_k=self.task_dic[task_k_index]
task_k2=self.task_dic[task_k_index2]
rp=random.random()
if rp < p:
if len(routek)==0:
routek.append(task_k_index)
else:
insert_index=k3
if insert_index== 0:
after=self.task_dic[routek[insert_index]]
disx=self._all_distance[self._depot][task_k[0]]+self._all_distance[task_k2[1]][after[0]]
disy=self._all_distance[self._depot][task_k2[1]]+self._all_distance[task_k[0]][after[0]]
if disx<disy:
routek.insert(insert_index,self.id_dic[(task_k[0],task_k[1])])
routek.insert(insert_index,self.id_dic[(task_k2[0],task_k2[1])])
else:
routek.insert(insert_index,self.id_dic[(task_k2[1],task_k2[0])])
routek.insert(insert_index,self.id_dic[(task_k[1],task_k[0])])
elif insert_index==(len(routek)-1):
before=self.task_dic[routek[insert_index]]
disx=self._all_distance[before[1]][task_k[0]]+self._all_distance[task_k2[1]][self._depot]
disy=self._all_distance[before[1]][task_k2[1]]+self._all_distance[task_k[0]][self._depot]
if disx<disy:
routek.append(self.id_dic[(task_k[0],task_k[1])])
routek.append(self.id_dic[(task_k2[0],task_k2[1])])
else:
routek.append(self.id_dic[(task_k2[1],task_k2[0])])
routek.append(self.id_dic[(task_k[1],task_k[0])])
else:
before=self.task_dic[routek[insert_index-1]]
after=self.task_dic[routek[insert_index]]
disx=self._all_distance[before[1]][task_k[0]]+self._all_distance[task_k2[1]][after[0]]
disy=self._all_distance[before[1]][task_k2[1]]+self._all_distance[task_k[0]][after[0]]
if disx<disy:
routek.insert(insert_index,self.id_dic[(task_k[0],task_k[1])])
routek.insert(insert_index,self.id_dic[(task_k2[0],task_k2[1])])
else:
routek.insert(insert_index,self.id_dic[(task_k2[1],task_k2[0])])
routek.insert(insert_index,self.id_dic[(task_k[1],task_k[0])])
else:
if routek==[]:
gene.pop(k1)
route=[]
route.append(self.id_dic[(task_k[0],task_k[1])])
route.append(self.id_dic[(task_k2[0],task_k2[1])])
gene.append(route)
return gene
def swap(self,gene,k1,k2,k3):
routek=gene[k1]
if len(routek)>2:
pass
else:
k2=-1
if k2!=-1:
task_k_index=routek[k2]
task_k_index2=routek[k3]
task_k=self.task_dic[task_k_index]
task_k2=self.task_dic[task_k_index2]
if k2==0:
after=self.task_dic[routek[k2+1]]
disx=self._all_distance[self._depot][task_k2[0]]+self._all_distance[task_k2[1]][after[0]]
disy=self._all_distance[self._depot][task_k2[1]]+self._all_distance[task_k2[0]][after[0]]
if disx<disy:
routek.pop(k2)
routek.insert(k2,self.id_dic[(task_k2[0],task_k2[1])])
else:
routek.pop(k2)
routek.insert(k2,self.id_dic[(task_k2[1],task_k2[0])])
elif k2== len(routek)-1:
before=self.task_dic[routek[k2-1]]
disx=self._all_distance[before[1]][task_k2[0]]+self._all_distance[task_k2[1]][self._depot]
disy=self._all_distance[before[1]][task_k2[1]]+self._all_distance[task_k2[0]][self._depot]
if disx<disy:
routek.pop(k2)
routek.insert(k2,self.id_dic[(task_k2[0],task_k2[1])])
else:
routek.pop(k2)
routek.insert(k2,self.id_dic[(task_k2[1],task_k2[0])])
else:
before=self.task_dic[routek[k2-1]]
after=self.task_dic[routek[k2+1]]
disx=self._all_distance[before[1]][task_k2[0]]+self._all_distance[task_k2[1]][after[0]]
disy=self._all_distance[before[1]][task_k2[1]]+self._all_distance[task_k2[0]][after[0]]
if disx<disy:
routek.pop(k2)
routek.insert(k2,self.id_dic[(task_k2[0],task_k2[1])])
else:
routek.pop(k2)
routek.insert(k2,self.id_dic[(task_k2[1],task_k2[0])])
if k3==0:
after=self.task_dic[routek[k3+1]]
disx=self._all_distance[self._depot][task_k[0]]+self._all_distance[task_k[1]][after[0]]
disy=self._all_distance[self._depot][task_k[1]]+self._all_distance[task_k[0]][after[0]]
if disx<disy:
routek.pop(k3)
routek.insert(k3,self.id_dic[(task_k[0],task_k[1])])
else:
routek.pop(k3)
routek.insert(k3,self.id_dic[(task_k[1],task_k[0])])
elif k3== len(routek)-1:
before=self.task_dic[routek[k3-1]]
disx=self._all_distance[before[1]][task_k[0]]+self._all_distance[task_k[1]][self._depot]
disy=self._all_distance[before[1]][task_k[1]]+self._all_distance[task_k[0]][self._depot]
if disx<disy:
routek.pop(k3)
routek.insert(k3,self.id_dic[(task_k[0],task_k[1])])
else:
routek.pop(k3)
routek.insert(k3,self.id_dic[(task_k[1],task_k[0])])
else:
before=self.task_dic[routek[k3-1]]
after=self.task_dic[routek[k3+1]]
disx=self._all_distance[before[1]][task_k[0]]+self._all_distance[task_k[1]][after[0]]
disy=self._all_distance[before[1]][task_k[1]]+self._all_distance[task_k[0]][after[0]]
if disx<disy:
routek.pop(k3)
routek.insert(k3,self.id_dic[(task_k[0],task_k[1])])
else:
routek.pop(k3)
routek.insert(k3,self.id_dic[(task_k[1],task_k[0])])
return gene
def list_2_tuple(self,lst):
result=[]
for i in lst:
ti=tuple(i)
result.append(ti)
return tuple(result)
def single_local_search(self,gene):
before=1000000
best_q=1000000
best_gene=gene
time_out=False
while True:
if time_out:
break
for i in range(len(gene)):
if time_out:
break
if len(gene[i])>1:
for j in range(len(gene[i])):
if time_out:
break
for k in range(len(gene[i])-1):
if time.time()-start>float(termin_time)-0.5:
time_out=True
if time_out:
break
copy_gene=copy.deepcopy(gene)
self.single_insertion(copy_gene,1,i,j,k)
q=self.get_q(copy_gene)
if q<best_q:
best_gene=copy_gene
best_q=q
if best_q==before:
break
else:
before=best_q
return best_gene
def double_local_search(self,gene):
before=1000000
best_q=1000000
best_gene=gene
time_out=False
while True:
if time_out:
break
for i in range(len(gene)):
if time_out:
break
if len(gene[i])>2:
for j in range(len(gene[i])-1):
if time_out:
break
for k in range(len(gene[i])-2):
if time.time()-start>float(termin_time)-0.5:
time_out=True
if time_out:
break
copy_gene=copy.deepcopy(gene)
self.double_insertion(copy_gene,1,i,j,k)
q=self.get_q(copy_gene)
if q<best_q:
best_gene=copy_gene
best_q=q
if best_q==before:
break
else:
before=best_q
return best_gene
def swap_local_search(self,gene):
before=1000000
best_q=1000000
best_gene=gene
time_out=False
while True:
if time_out:
break
for i in range(len(gene)):
if time_out:
break
if len(gene[i])>2:
for j in range(len(gene[i])):
if time_out:
break
for k in range(len(gene[i])):
if k!=j:
if time.time()-start>float(termin_time)-0.5:
time_out=True
if time_out:
break
copy_gene=copy.deepcopy(gene)
self.swap(copy_gene,i,j,k)
q=self.get_q(copy_gene)
if q<best_q:
best_gene=copy_gene
best_q=q
if best_q==before:
break
else:
before=best_q
return best_gene
def Ulusoy_split(self,ordered_list):
V=[0 for i in range(len(ordered_list)+1)]
P=[0 for i in range(len(ordered_list)+1)]
length=len(ordered_list)
for i in range(1,length+1):
V[i]=1000000000
for t in range(1,length+1):
i=t-1
j=i
load=0
cost=0
before_task=None
while j<length:
task=self.task_dic[ordered_list[j]]
load+=self.demand_dic[task]
if i==j:
cost=self._all_distance[self._depot][task[0]]+self.cost_dic[task]+self._all_distance[self._depot][task[1]]
else:
cost=self._all_distance[before_task[1]][task[0]]+self.cost_dic[task]+self._all_distance[self._depot][task[1]]-self._all_distance[self._depot][before_task[1]]
if load<=self._capacity:
v_new=V[t-1]+cost
if v_new<V[j+1]:
V[j+1]=v_new
P[j+1]=t-1
before_task=task
j+=1
else:
break
output=[]
j=length
ptr=P[j]
while ptr>0:
route=[]
for k in range(ptr,j):
route.append(ordered_list[k])
output.append(route)
j=ptr
ptr=P[j]
route=[]
for k in range(0,j):
route.append(ordered_list[k])
output.append(route)
return output
def flatten(self,gene):
output=[]
for i in gene:
for j in i:
output.append(j)
return output
def merge(self,gene,list):
output=[]
left=[]
for i in range(len(gene)):
if i in list:
output.append(gene[i])
else:
left.append(gene[i])
return output,left
def MS_local_search(self,gene):
min_split=None
min_left=None
min_score=10000000000
counter=0
for i in range(len(gene)):
for j in range(i+1,len(gene)):
counter+=1
if counter>100:
pass
else:
for i in range(5):
random_select,left=graph.merge(gene,[i,j])
split1=graph.Ulusoy_split(graph.PS1(copy.deepcopy(graph.flatten(random_select))))
split2=graph.Ulusoy_split(graph.PS2(copy.deepcopy(graph.flatten(random_select))))
split3=graph.Ulusoy_split(graph.PS3(copy.deepcopy(graph.flatten(random_select))))
split4=graph.Ulusoy_split(graph.PS4(copy.deepcopy(graph.flatten(random_select))))
split5=graph.Ulusoy_split(graph.PS5(copy.deepcopy(graph.flatten(random_select))))
score1=self.get_q(split1)
score2=self.get_q(split2)
score3=self.get_q(split3)
score4=self.get_q(split4)
score5=self.get_q(split5)
if score1<min_score:
min_score=score1
min_split=split1
min_left=left
if score2<min_score:
min_score=score2
min_split=split2
min_left=left
if score3<min_score:
min_score=score3
min_split=split3
min_left=left
if score4<min_score:
min_score=score4
min_split=split4
min_left=left
if score5<min_score:
min_score=score5
min_split=split5
min_left=left
for i in min_left:
min_split.append(i)
return min_split
def best_BIH(self):
population=queue.PriorityQueue()
gene_set=set()
counter=0
misstime=0
while time.time()-start<float(termin_time):
copy_gene=self.get_gene2()
tuple_gene=self.list_2_tuple(copy_gene)
if tuple_gene not in gene_set:
counter+=1
gene_set.add(tuple_gene)
new_individual=Individual(copy_gene, self.get_q(copy_gene))
population.put_nowait(new_individual)
misstime=0
else:
misstime+=1
if misstime>100:
break
best=population.get()
self.gene_output(best.gene)
def cross_over(self,gene1,gene2):
k1=random.randint(0,len(gene1)-1)
k2=random.randint(0,len(gene2)-1)
# print(k1)
# print(k2)
R1=gene1[k1]
R2=gene2[k2]
# print(f'R1 is {R1}')
# print(f'R2 is {R2}')
while len(R1)<2:
k1=random.randint(0,len(gene1)-1)
R1=gene1[k1]
while len(R2)<2:
k2=random.randint(0,len(gene2)-1)
R2=gene2[k2]
s1=random.randint(1,len(R1)-1)
s2=random.randint(1,len(R2)-1)
R11=R1[:s1]
R22=R2[s2:]
new=R11+R22
miss=[]
dup=[]
for i in new:
if i not in R1:
dup.append(i)
for i in R1:
if i not in new:
miss.append(i)
for i in dup:
new.remove(i)
for i in miss:
task_k=self.task_dic[i]
min_distance=1000000000
min_list=[]
for j in range(len(new)):
insert_index=j
if insert_index==0:
after=self.task_dic[new[insert_index]]
disx=self._all_distance[self._depot][task_k[0]]+self.cost_dic[task_k]+self._all_distance[task_k[1]][after[0]]
disy=self._all_distance[self._depot][task_k[1]]+self.cost_dic[task_k]+self._all_distance[task_k[0]][after[0]]
if disx<min_distance :
min_list=[]
min_list.append((j,True))
min_distance=disx
elif disx==min_distance:
min_list.append((j,True))
min_distance=disx
if disy<min_distance:
min_list=[]
min_list.append((j,False))
min_distance=disy
elif disy==min_distance:
min_list.append((j,False))
min_distance=disy
elif insert_index==(len(new)-1):
before=self.task_dic[new[insert_index]]
disx=self._all_distance[before[1]][task_k[0]]+self.cost_dic[task_k]+self._all_distance[task_k[1]][self._depot]
disy=self._all_distance[before[1]][task_k[1]]+self.cost_dic[task_k]+self._all_distance[task_k[0]][self._depot]
if disx<min_distance :
min_list=[]
min_list.append((j,True))
min_distance=disx
elif disx==min_distance:
min_list.append((j,True))
min_distance=disx
if disy<min_distance:
min_list=[]
min_list.append((j,False))
min_distance=disy
elif disy==min_distance:
min_list.append((j,False))
min_distance=disy
else:
before=self.task_dic[new[insert_index-1]]
after=self.task_dic[new[insert_index]]
disx=self._all_distance[before[1]][task_k[0]]+self.cost_dic[task_k]+self._all_distance[task_k[1]][after[0]]
disy=self._all_distance[before[1]][task_k[1]]+self.cost_dic[task_k]+self._all_distance[task_k[0]][after[0]]
if disx<min_distance :
min_list=[]
min_list.append((j,True))
min_distance=disx
elif disx==min_distance:
min_list.append((j,True))
min_distance=disx
if disy<min_distance:
min_list=[]
min_list.append((j,False))
min_distance=disy
elif disy==min_distance:
min_list.append((j,False))
min_distance=disy
k=random.randint(0,len(min_list)-1)
min_index=min_list[k][0]
min_s=min_list[k][1]
if not min_s:
task_k=(task_k[1],task_k[0])
new.insert(min_index,self.id_dic[(task_k[0],task_k[1])])
gene1.pop(k1)
gene1.insert(k1,new)
return gene1
def PS(self,unordered_list):
tasklist=[]
for i in unordered_list:
task=self.task_dic[i]
s=int(task[0])
t=int(task[1])
c=self.cost_dic[task]
d=self.demand_dic[task]
if d!=0:
tasklist.append((s,t,c,d,self._depot))
route=[]
gene=[]
now=self._depot
task_sum=0
while len(tasklist)>0:
tasklist.sort(key = lambda x:min(graph._all_distance[now][x[0]],graph._all_distance[now][x[1]]))
min_list=[]
min_dis=min(self._all_distance[now][tasklist[0][0]],self._all_distance[now][tasklist[0][1]])
for i in tasklist:
if min(self._all_distance[now][i[0]],self._all_distance[now][i[1]])==min_dis and i[3]+task_sum<self._capacity :
min_list.append(i)
if min_list==[]:
task_sum=0
gene.append(route)
route=[]
now=self._depot
break
np.random.shuffle(min_list)
min_task=min_list[0]
tasklist.remove(min_task)
task_sum+=min_task[3]
if self._all_distance[now][min_task[0]]<self._all_distance[now][min_task[1]]:
route.append(self.id_dic[(min_task[0],min_task[1])])
else:
route.append(self.id_dic[(min_task[1],min_task[0])])
now=min_task[1]
if now==self._depot:
task_sum=0
gene.append(route)
route=[]
gene.append(route)
task_sum=0
now=self._depot
return self.flatten(gene)
def PS1(self,unordered_list):
tasklist=queue.PriorityQueue()
for i in unordered_list:
task=self.task_dic[i]
s=int(task[0])
t=int(task[1])
c=self.cost_dic[task]
d=self.demand_dic[task]
if d!=0:
tasklist.put_nowait(Edge(s,t,c,d))
candidate=[]
route=[]
gene=[]
now=self._depot
task_sum=0
while not tasklist.empty():
while not tasklist.empty():
leastd=tasklist.get()
if leastd.d+task_sum<=self._capacity:
candidate.append(leastd)
else:
tasklist.put_nowait(leastd)
break
if len(candidate)==0:
task_sum=0
gene.append(route)
route=[]
now=self._depot
else:
min_distance=1000000000
min_list=[]
for i in range(len(candidate)):
taski=candidate[i]
disx=self._all_distance[taski.s][now]
disy=self._all_distance[taski.t][now]
if disx<min_distance :
min_list=[]
min_list.append((i,True))
min_distance=disx
elif disx==min_distance:
min_list.append((i,True))
min_distance=disx
if disy<min_distance:
min_list=[]
min_list.append((i,False))
min_distance=disy
elif disy==min_distance:
min_list.append((i,False))
min_distance=disy
k=0
max_distance=-1000000
for i in range(len(min_list)):
task=copy.deepcopy(candidate[min_list[i][0]])
s=min_list[i][1]
if not s:
temp=task.s
task.s=task.t
task.t=temp
dis=self._all_distance[self._depot][task.s]
if dis>max_distance:
k=i
max_distance=dis
min_index=min_list[k][0]
min_s=min_list[k][1]
min_task=candidate.pop(min_index)
if not min_s:
temp=min_task.s
min_task.s=min_task.t
min_task.t=temp
for i in candidate:
tasklist.put_nowait(i)
candidate=[]
task_sum+=min_task.d
route.append(self.id_dic[(min_task.s,min_task.t)])
now=min_task.t
gene.append(route)
task_sum=0
now=self._depot
return self.flatten(gene)
def PS2(self,unordered_list):
tasklist=queue.PriorityQueue()
for i in unordered_list:
task=self.task_dic[i]
s=int(task[0])
t=int(task[1])
c=self.cost_dic[task]
d=self.demand_dic[task]
if d!=0:
tasklist.put_nowait(Edge(s,t,c,d))
candidate=[]
route=[]
gene=[]
now=self._depot
task_sum=0
while not tasklist.empty():
while not tasklist.empty():
leastd=tasklist.get()
if leastd.d+task_sum<=self._capacity:
candidate.append(leastd)
else:
tasklist.put_nowait(leastd)
break
if len(candidate)==0:
task_sum=0
gene.append(route)
route=[]
now=self._depot
else:
min_distance=1000000000
min_list=[]
for i in range(len(candidate)):
taski=candidate[i]
disx=self._all_distance[taski.s][now]
disy=self._all_distance[taski.t][now]
if disx<min_distance :
min_list=[]
min_list.append((i,True))
min_distance=disx
elif disx==min_distance:
min_list.append((i,True))
min_distance=disx
if disy<min_distance:
min_list=[]
min_list.append((i,False))
min_distance=disy
elif disy==min_distance:
min_list.append((i,False))
min_distance=disy
k=0
min_distance=1000000
for i in range(len(min_list)):
task=copy.deepcopy(candidate[min_list[i][0]])
s=min_list[i][1]
if not s:
temp=task.s
task.s=task.t
task.t=temp
dis=self._all_distance[self._depot][task.s]
if dis<min_distance:
k=i
min_distance=dis
min_index=min_list[k][0]
min_s=min_list[k][1]
min_task=candidate.pop(min_index)
if not min_s:
temp=min_task.s
min_task.s=min_task.t
min_task.t=temp
for i in candidate:
tasklist.put_nowait(i)
candidate=[]
task_sum+=min_task.d
route.append(self.id_dic[(min_task.s,min_task.t)])
now=min_task.t
gene.append(route)
task_sum=0
now=self._depot
return self.flatten(gene)
def PS3(self,unordered_list):
tasklist=queue.PriorityQueue()
for i in unordered_list:
task=self.task_dic[i]
s=int(task[0])
t=int(task[1])
c=self.cost_dic[task]
d=self.demand_dic[task]
if d!=0:
tasklist.put_nowait(Edge(s,t,c,d))
candidate=[]
route=[]
gene=[]
now=self._depot
task_sum=0
while not tasklist.empty():
while not tasklist.empty():
leastd=tasklist.get()
if leastd.d+task_sum<=self._capacity:
candidate.append(leastd)
else:
tasklist.put_nowait(leastd)
break
if len(candidate)==0:
task_sum=0
gene.append(route)
route=[]
now=self._depot
else:
min_distance=1000000000
min_list=[]
for i in range(len(candidate)):
taski=candidate[i]
disx=self._all_distance[taski.s][now]
disy=self._all_distance[taski.t][now]
if disx<min_distance :
min_list=[]
min_list.append((i,True))
min_distance=disx
elif disx==min_distance:
min_list.append((i,True))
min_distance=disx
if disy<min_distance:
min_list=[]
min_list.append((i,False))
min_distance=disy
elif disy==min_distance:
min_list.append((i,False))
min_distance=disy
k=0
max_ratio=-1000000
for i in range(len(min_list)):
task=copy.deepcopy(candidate[min_list[i][0]])
s=min_list[i][1]
if not s:
temp=task.s
task.s=task.t
task.t=temp
ratio=task.d/task.c
if ratio>max_ratio:
k=i
max_ratio=ratio
min_index=min_list[k][0]
min_s=min_list[k][1]
min_task=candidate.pop(min_index)
if not min_s:
temp=min_task.s
min_task.s=min_task.t
min_task.t=temp
for i in candidate:
tasklist.put_nowait(i)
candidate=[]
task_sum+=min_task.d
route.append(self.id_dic[(min_task.s,min_task.t)])
now=min_task.t
gene.append(route)
task_sum=0
now=self._depot
return self.flatten(gene)
def PS4(self,unordered_list):
tasklist=queue.PriorityQueue()
for i in unordered_list:
task=self.task_dic[i]
s=int(task[0])
t=int(task[1])
c=self.cost_dic[task]
d=self.demand_dic[task]
if d!=0:
tasklist.put_nowait(Edge(s,t,c,d))
candidate=[]
route=[]
gene=[]
now=self._depot
task_sum=0
while not tasklist.empty():
while not tasklist.empty():
leastd=tasklist.get()
if leastd.d+task_sum<=self._capacity:
candidate.append(leastd)
else:
tasklist.put_nowait(leastd)
break
if len(candidate)==0:
task_sum=0
gene.append(route)
route=[]
now=self._depot
else:
min_distance=1000000000
min_list=[]
for i in range(len(candidate)):
taski=candidate[i]
disx=self._all_distance[taski.s][now]
disy=self._all_distance[taski.t][now]
if disx<min_distance :
min_list=[]
min_list.append((i,True))
min_distance=disx
elif disx==min_distance:
min_list.append((i,True))
min_distance=disx
if disy<min_distance:
min_list=[]
min_list.append((i,False))
min_distance=disy
elif disy==min_distance:
min_list.append((i,False))
min_distance=disy
k=0
min_ratio=1000000
for i in range(len(min_list)):
task=copy.deepcopy(candidate[min_list[i][0]])
s=min_list[i][1]
if not s:
temp=task.s
task.s=task.t
task.t=temp
ratio=task.d/task.c
if ratio<min_ratio:
k=i
min_ratio=ratio
min_index=min_list[k][0]
min_s=min_list[k][1]
min_task=candidate.pop(min_index)
if not min_s:
temp=min_task.s
min_task.s=min_task.t
min_task.t=temp
for i in candidate:
tasklist.put_nowait(i)
candidate=[]
task_sum+=min_task.d
route.append(self.id_dic[(min_task.s,min_task.t)])
now=min_task.t
gene.append(route)
task_sum=0
now=self._depot
return self.flatten(gene)
def PS5(self,unordered_list):
tasklist=queue.PriorityQueue()
for i in unordered_list:
task=self.task_dic[i]
s=int(task[0])
t=int(task[1])
c=self.cost_dic[task]
d=self.demand_dic[task]
if d!=0:
tasklist.put_nowait(Edge(s,t,c,d))
candidate=[]
route=[]
gene=[]
now=self._depot
task_sum=0
while not tasklist.empty():
while not tasklist.empty():
leastd=tasklist.get()
if leastd.d+task_sum<=self._capacity:
candidate.append(leastd)
else:
tasklist.put_nowait(leastd)
break
if len(candidate)==0:
task_sum=0
gene.append(route)
route=[]
now=self._depot
else:
min_distance=1000000000
min_list=[]
for i in range(len(candidate)):
taski=candidate[i]
disx=self._all_distance[taski.s][now]
disy=self._all_distance[taski.t][now]
if disx<min_distance :
min_list=[]
min_list.append((i,True))
min_distance=disx
elif disx==min_distance:
min_list.append((i,True))
min_distance=disx
if disy<min_distance:
min_list=[]
min_list.append((i,False))
min_distance=disy
elif disy==min_distance:
min_list.append((i,False))
min_distance=disy
k=0
if task_sum<self._capacity/2:
max_distance=-1000000
for i in range(len(min_list)):
task=copy.deepcopy(candidate[min_list[i][0]])
s=min_list[i][1]
if not s:
temp=task.s
task.s=task.t
task.t=temp
dis=self._all_distance[self._depot][task.s]
if dis>max_distance:
k=i
max_distance=dis
else:
min_distance=1000000
for i in range(len(min_list)):
task=copy.deepcopy(candidate[min_list[i][0]])
s=min_list[i][1]
if not s:
temp=task.s
task.s=task.t
task.t=temp
dis=self._all_distance[self._depot][task.s]
if dis<min_distance:
k=i
min_distance=dis
min_index=min_list[k][0]
min_s=min_list[k][1]
min_task=candidate.pop(min_index)
if not min_s:
temp=min_task.s
min_task.s=min_task.t
min_task.t=temp
for i in candidate:
tasklist.put_nowait(i)
candidate=[]
task_sum+=min_task.d
route.append(self.id_dic[(min_task.s,min_task.t)])
now=min_task.t
gene.append(route)
task_sum=0
now=self._depot
return self.flatten(gene)
def memetic_evolution(self,psize):
pop=[]
gene_set=set()
q_dict={}
while len(pop)<psize:
trial=0
copy_gene=None
tuple_gene=None
while True:
trial+=1
copy_gene=self.get_gene()
tuple_gene=self.list_2_tuple(copy_gene)
if trial==50 or tuple_gene not in gene_set:
break
if tuple_gene in gene_set:
break
pop.append(copy_gene)
gene_set.add(tuple_gene)
q_dict[tuple_gene]=self.get_q(copy_gene)
psize=len(pop)
while time.time()-start<float(termin_time):
popt=copy.deepcopy(pop)
sett=copy.deepcopy(gene_set)
for i in range(6*psize):
if time.time()-start>float(termin_time):
break
s1=random.randint(0,psize-1)
s2=random.randint(0,psize-1)
while s1==s2:
s2=random.randint(0,psize-1)
S1=pop[s1]
S2=pop[s2]
Sx_gene=self.cross_over(copy.deepcopy(S1),copy.deepcopy(S2))
# print(f's1 {self.get_q(S1)} s2 {self.get_q(S2)} s3 {self.get_q(Sx_gene)}')
r= random.random()
if r<0.2:
rr=random.random()
Sls_gene=None
if rr<0.33:
Sls_gene=self.single_local_search(Sx_gene)
elif rr<0.66:
Sls_gene=self.double_local_search(Sx_gene)
else:
Sls_gene=self.swap_local_search(Sx_gene)
Sls_tuple=self.list_2_tuple(Sls_gene)
Sx_tuple=self.list_2_tuple(Sx_gene)
q_dict[Sls_tuple]=self.get_q(Sls_gene)
q_dict[Sx_tuple]=self.get_q(Sx_gene)
ms_gene=self.MS_local_search(Sls_gene)
ms_q=self.get_q(ms_gene)
q_dict[self.list_2_tuple(ms_gene)]=ms_q
if ms_q<q_dict[Sls_tuple]:
Sls_gene=ms_gene
if Sls_tuple not in sett:
popt.append(Sls_gene)
sett.add(Sls_tuple)
elif Sx_tuple not in sett:
popt.append(Sx_gene)
sett.add(Sx_tuple)
else:
Sx_tuple=self.list_2_tuple(Sx_gene)
if Sx_tuple not in sett:
popt.append(Sx_gene)
sett.add(Sx_tuple)
q_dict[Sx_tuple]=self.get_q(Sx_gene)
rank=queue.PriorityQueue()
for i in popt:
rank.put_nowait(Individual(i,q_dict[self.list_2_tuple(i)]))
pop=[]
for i in range(psize):
pop.append(rank.get().gene)
min_gene=None
min_value=1000000000000
for i in pop:
q=q_dict[self.list_2_tuple(i)]
if q<min_value:
min_gene=i
min_value=q
self.gene_output(min_gene)
return min_gene
def memetic_evolution2(self,psize):
population=queue.PriorityQueue()
gene_set=set()
counter=0
while time.time()-start<float(termin_time)/2:
r=np.random.rand()
if r>0.5:
copy_gene=self.get_gene2()
else:
copy_gene=self.get_gene()
tuple_gene=self.list_2_tuple(copy_gene)
if tuple_gene not in gene_set:
counter+=1
gene_set.add(tuple_gene)
new_individual=Individual(copy_gene, self.get_q(copy_gene))
population.put_nowait(new_individual)
g=0
while time.time()-start<float(termin_time):
g+=1
poplist=[]
for i in range(psize):
poplist.append(population.get())
s1=random.randint(0,psize-1)
s2=random.randint(0,psize-1)
while s1==s2:
s2=random.randint(0,psize-1)
S1=poplist[s1]
S2=poplist[s2]
Sx_gene=self.cross_over(copy.deepcopy(S1.gene),copy.deepcopy(S2.gene))
# print(f's1 {self.get_q(S1.gene)} s2 {self.get_q(S2.gene)} s3 {self.get_q(Sx_gene)}')
tuple_Sx=self.list_2_tuple(Sx_gene)
if tuple_Sx not in gene_set:
counter+=1
gene_set.add(tuple_Sx)
Sx=Individual(Sx_gene, self.get_q(Sx_gene))
population.put_nowait(Sx)
r= random.random()
if r<0.2:
rp= random.random()
Sls_gene=None
if rp <0.3:
Sls_gene=self.single_local_search(Sx_gene)
elif rp<0.6:
Sls_gene=self.double_local_search(Sx_gene)
else:
Sls_gene=self.swap_local_search(Sx_gene)
tuple_Sls=self.list_2_tuple(Sls_gene)
if tuple_Sls not in gene_set:
counter+=1
gene_set.add(tuple_Sls)
Sls=Individual(Sls_gene, self.get_q(Sls_gene))
population.put_nowait(Sls)
MS_Sls_gene=self.MS_local_search(Sls_gene)
tuple_Sls=self.list_2_tuple(MS_Sls_gene)
# print(f's1 {self.get_q(S1.gene)} s2 {self.get_q(S2.gene)} s3 {self.get_q(Sx_gene)} Sls {self.get_q(Sls_gene)} Ms {self.get_q(MS_Sls_gene)}')
if tuple_Sls not in gene_set:
counter+=1
gene_set.add(tuple_Sls)
Sls=Individual(MS_Sls_gene, self.get_q(MS_Sls_gene))
population.put_nowait(Sls)
for i in range(psize):
population.put_nowait(poplist[i])
best=population.get()
self.gene_output(best.gene)
return best.gene
def mul_BIH(self,population):
gene_set=set()
counter=0
misstime=0
while time.time()-start<float(termin_time):
copy_gene=self.get_gene2()
tuple_gene=self.list_2_tuple(copy_gene)
if tuple_gene not in gene_set:
counter+=1
gene_set.add(tuple_gene)
new_individual=Individual(copy_gene, self.get_q(copy_gene))
population.put(new_individual)
misstime=0
else:
misstime+=1
if misstime>100:
break
# best=population.get()
# self.gene_output(best.gene)
graph=Graph(vertices,depot,required,non_required,vehicles,capacity,total_cost,edge_list)
if __name__=='__main__':
population=Queue()
processes =[Process(target=graph.mul_BIH,args=(population,)) for _ in range(8)]
for p in processes:
p.start()
for p in processes:
p.join()
results=[queue.get() for _ in processes]
results.sort()
graph.gene_output(results[0].gene) | [
"numpy.random.rand",
"multiprocessing.Process",
"queue.get",
"random.seed",
"random.random",
"copy.deepcopy",
"queue.PriorityQueue",
"multiprocessing.Queue",
"time.time",
"random.randint",
"numpy.random.shuffle"
] | [((200, 211), 'time.time', 'time.time', ([], {}), '()\n', (209, 211), False, 'import time\n'), ((212, 236), 'random.seed', 'random.seed', (['random_seed'], {}), '(random_seed)\n', (223, 236), False, 'import random\n'), ((58481, 58488), 'multiprocessing.Queue', 'Queue', ([], {}), '()\n', (58486, 58488), False, 'from multiprocessing import Process, Queue\n'), ((4351, 4372), 'queue.PriorityQueue', 'queue.PriorityQueue', ([], {}), '()\n', (4370, 4372), False, 'import queue\n'), ((7920, 7941), 'queue.PriorityQueue', 'queue.PriorityQueue', ([], {}), '()\n', (7939, 7941), False, 'import queue\n'), ((10338, 10367), 'copy.deepcopy', 'copy.deepcopy', (['self.tasklist2'], {}), '(self.tasklist2)\n', (10351, 10367), False, 'import copy\n'), ((29072, 29093), 'queue.PriorityQueue', 'queue.PriorityQueue', ([], {}), '()\n', (29091, 29093), False, 'import queue\n'), ((35913, 35934), 'queue.PriorityQueue', 'queue.PriorityQueue', ([], {}), '()\n', (35932, 35934), False, 'import queue\n'), ((38907, 38928), 'queue.PriorityQueue', 'queue.PriorityQueue', ([], {}), '()\n', (38926, 38928), False, 'import queue\n'), ((41900, 41921), 'queue.PriorityQueue', 'queue.PriorityQueue', ([], {}), '()\n', (41919, 41921), False, 'import queue\n'), ((44865, 44886), 'queue.PriorityQueue', 'queue.PriorityQueue', ([], {}), '()\n', (44884, 44886), False, 'import queue\n'), ((47829, 47850), 'queue.PriorityQueue', 'queue.PriorityQueue', ([], {}), '()\n', (47848, 47850), False, 'import queue\n'), ((54841, 54862), 'queue.PriorityQueue', 'queue.PriorityQueue', ([], {}), '()\n', (54860, 54862), False, 'import queue\n'), ((58505, 58554), 'multiprocessing.Process', 'Process', ([], {'target': 'graph.mul_BIH', 'args': '(population,)'}), '(target=graph.mul_BIH, args=(population,))\n', (58512, 58554), False, 'from multiprocessing import Process, Queue\n'), ((58675, 58686), 'queue.get', 'queue.get', ([], {}), '()\n', (58684, 58686), False, 'import queue\n'), ((11106, 11133), 'numpy.random.shuffle', 'np.random.shuffle', (['min_list'], {}), '(min_list)\n', (11123, 11133), True, 'import numpy as np\n'), ((12003, 12018), 'random.random', 'random.random', ([], {}), '()\n', (12016, 12018), False, 'import random\n'), ((14581, 14596), 'random.random', 'random.random', ([], {}), '()\n', (14594, 14596), False, 'import random\n'), ((35219, 35246), 'numpy.random.shuffle', 'np.random.shuffle', (['min_list'], {}), '(min_list)\n', (35236, 35246), True, 'import numpy as np\n'), ((52142, 52160), 'copy.deepcopy', 'copy.deepcopy', (['pop'], {}), '(pop)\n', (52155, 52160), False, 'import copy\n'), ((52178, 52201), 'copy.deepcopy', 'copy.deepcopy', (['gene_set'], {}), '(gene_set)\n', (52191, 52201), False, 'import copy\n'), ((54280, 54301), 'queue.PriorityQueue', 'queue.PriorityQueue', ([], {}), '()\n', (54299, 54301), False, 'import queue\n'), ((54973, 54989), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (54987, 54989), True, 'import numpy as np\n'), ((55612, 55640), 'random.randint', 'random.randint', (['(0)', '(psize - 1)'], {}), '(0, psize - 1)\n', (55626, 55640), False, 'import random\n'), ((55653, 55681), 'random.randint', 'random.randint', (['(0)', '(psize - 1)'], {}), '(0, psize - 1)\n', (55667, 55681), False, 'import random\n'), ((56260, 56275), 'random.random', 'random.random', ([], {}), '()\n', (56273, 56275), False, 'import random\n'), ((1216, 1232), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1230, 1232), True, 'import numpy as np\n'), ((1574, 1590), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1588, 1590), True, 'import numpy as np\n'), ((29168, 29179), 'time.time', 'time.time', ([], {}), '()\n', (29177, 29179), False, 'import time\n'), ((52087, 52098), 'time.time', 'time.time', ([], {}), '()\n', (52096, 52098), False, 'import time\n'), ((52341, 52369), 'random.randint', 'random.randint', (['(0)', '(psize - 1)'], {}), '(0, psize - 1)\n', (52355, 52369), False, 'import random\n'), ((52386, 52414), 'random.randint', 'random.randint', (['(0)', '(psize - 1)'], {}), '(0, psize - 1)\n', (52400, 52414), False, 'import random\n'), ((52736, 52751), 'random.random', 'random.random', ([], {}), '()\n', (52749, 52751), False, 'import random\n'), ((54919, 54930), 'time.time', 'time.time', ([], {}), '()\n', (54928, 54930), False, 'import time\n'), ((55435, 55446), 'time.time', 'time.time', ([], {}), '()\n', (55444, 55446), False, 'import time\n'), ((55725, 55753), 'random.randint', 'random.randint', (['(0)', '(psize - 1)'], {}), '(0, psize - 1)\n', (55739, 55753), False, 'import random\n'), ((55842, 55864), 'copy.deepcopy', 'copy.deepcopy', (['S1.gene'], {}), '(S1.gene)\n', (55855, 55864), False, 'import copy\n'), ((55865, 55887), 'copy.deepcopy', 'copy.deepcopy', (['S2.gene'], {}), '(S2.gene)\n', (55878, 55887), False, 'import copy\n'), ((56318, 56333), 'random.random', 'random.random', ([], {}), '()\n', (56331, 56333), False, 'import random\n'), ((57759, 57770), 'time.time', 'time.time', ([], {}), '()\n', (57768, 57770), False, 'import time\n'), ((37819, 37859), 'copy.deepcopy', 'copy.deepcopy', (['candidate[min_list[i][0]]'], {}), '(candidate[min_list[i][0]])\n', (37832, 37859), False, 'import copy\n'), ((40812, 40852), 'copy.deepcopy', 'copy.deepcopy', (['candidate[min_list[i][0]]'], {}), '(candidate[min_list[i][0]])\n', (40825, 40852), False, 'import copy\n'), ((43803, 43843), 'copy.deepcopy', 'copy.deepcopy', (['candidate[min_list[i][0]]'], {}), '(candidate[min_list[i][0]])\n', (43816, 43843), False, 'import copy\n'), ((46767, 46807), 'copy.deepcopy', 'copy.deepcopy', (['candidate[min_list[i][0]]'], {}), '(candidate[min_list[i][0]])\n', (46780, 46807), False, 'import copy\n'), ((52466, 52494), 'random.randint', 'random.randint', (['(0)', '(psize - 1)'], {}), '(0, psize - 1)\n', (52480, 52494), False, 'import random\n'), ((52587, 52604), 'copy.deepcopy', 'copy.deepcopy', (['S1'], {}), '(S1)\n', (52600, 52604), False, 'import copy\n'), ((52605, 52622), 'copy.deepcopy', 'copy.deepcopy', (['S2'], {}), '(S2)\n', (52618, 52622), False, 'import copy\n'), ((52811, 52826), 'random.random', 'random.random', ([], {}), '()\n', (52824, 52826), False, 'import random\n'), ((49793, 49833), 'copy.deepcopy', 'copy.deepcopy', (['candidate[min_list[i][0]]'], {}), '(candidate[min_list[i][0]])\n', (49806, 49833), False, 'import copy\n'), ((50364, 50404), 'copy.deepcopy', 'copy.deepcopy', (['candidate[min_list[i][0]]'], {}), '(candidate[min_list[i][0]])\n', (50377, 50404), False, 'import copy\n'), ((52258, 52269), 'time.time', 'time.time', ([], {}), '()\n', (52267, 52269), False, 'import time\n'), ((22150, 22169), 'copy.deepcopy', 'copy.deepcopy', (['gene'], {}), '(gene)\n', (22163, 22169), False, 'import copy\n'), ((23295, 23314), 'copy.deepcopy', 'copy.deepcopy', (['gene'], {}), '(gene)\n', (23308, 23314), False, 'import copy\n'), ((24491, 24510), 'copy.deepcopy', 'copy.deepcopy', (['gene'], {}), '(gene)\n', (24504, 24510), False, 'import copy\n'), ((21945, 21956), 'time.time', 'time.time', ([], {}), '()\n', (21954, 21956), False, 'import time\n'), ((23090, 23101), 'time.time', 'time.time', ([], {}), '()\n', (23099, 23101), False, 'import time\n'), ((24270, 24281), 'time.time', 'time.time', ([], {}), '()\n', (24279, 24281), False, 'import time\n')] |
import pandas as pd
import os
from PIL import Image
import numpy as np
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, ReduceLROnPlateau, TensorBoard
from keras import optimizers, losses, activations, models
from keras.layers import Convolution2D, Dense, Input, Flatten, Dropout, MaxPooling2D, BatchNormalization, \
GlobalMaxPool2D, Concatenate, GlobalMaxPooling2D, GlobalAveragePooling2D, Lambda
from keras.applications.resnet50 import ResNet50
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from keras import backend as K
from tqdm import tqdm
from collections import Counter
def read_and_resize(filepath, input_shape=(256, 256)):
im = Image.open((filepath)).convert('RGB')
im = im.resize(input_shape)
im_array = np.array(im, dtype="uint8")#[..., ::-1]
return np.array(im_array / (np.max(im_array)+ 0.001), dtype="float32")
datagen = ImageDataGenerator(
rotation_range=6,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True,
zoom_range=0.1)
def augment(im_array):
im_array = datagen.random_transform(im_array)
return im_array
def gen(df, batch_size=32, aug=False):
df = df.sample(frac=1)
dict_age = {'(0, 2)' : 0,
'(4, 6)' : 1,
'(8, 12)' : 2,
'(15, 20)' : 3,
'(25, 32)' : 4,
'(38, 43)' : 5,
'(48, 53)' : 6,
'(60, 100)' : 7}
while True:
for i, batch in enumerate([df[i:i+batch_size] for i in range(0,df.shape[0],batch_size)]):
if aug:
images = np.array([augment(read_and_resize(file_path)) for file_path in batch.path.values])
else:
images = np.array([read_and_resize(file_path) for file_path in batch.path.values])
labels = np.array([dict_age[g] for g in batch.age.values])
labels = labels[..., np.newaxis]
yield images, labels
def get_model(n_classes=1):
base_model = ResNet50(weights='imagenet', include_top=False)
#for layer in base_model.layers:
# layer.trainable = False
x = base_model.output
x = GlobalMaxPooling2D()(x)
x = Dropout(0.5)(x)
x = Dense(100, activation="relu")(x)
x = Dropout(0.5)(x)
if n_classes == 1:
x = Dense(n_classes, activation="sigmoid")(x)
else:
x = Dense(n_classes, activation="softmax")(x)
base_model = Model(base_model.input, x, name="base_model")
if n_classes == 1:
base_model.compile(loss="binary_crossentropy", metrics=['acc'], optimizer="adam")
else:
base_model.compile(loss="sparse_categorical_crossentropy", metrics=['acc'], optimizer="adam")
return base_model
def create_path(df, base_path):
df['path'] = df.apply(lambda x: base_path+"aligned/"+x['user_id']+"/landmark_aligned_face.%s.%s"
%(x['face_id'], x['original_image']), axis=1)
return df
def filter_df(df):
dict_age = {'(0, 2)' : 0,
'(4, 6)' : 1,
'(8, 12)' : 2,
'(15, 20)' : 3,
'(25, 32)' : 4,
'(38, 43)' : 5,
'(48, 53)' : 6,
'(60, 100)' : 7}
df['f'] = df.age.apply(lambda x: int(x in dict_age))
df = df[df.f == 1]
return df
if __name__ == "__main__":
base_path = "/media/ml/data_ml/face_age_gender/"
dict_age = {'(0, 2)' : 0,
'(4, 6)' : 1,
'(8, 12)' : 2,
'(15, 20)' : 3,
'(25, 32)' : 4,
'(38, 43)' : 5,
'(48, 53)' : 6,
'(60, 100)' : 7}
bag = 3
all_indexes = list(range(5))
accuracies = []
for test_id in tqdm(all_indexes):
train_id = [j for j in all_indexes if j!=test_id]
print(train_id, test_id)
train_df = pd.concat([pd.read_csv(base_path+"fold_%s_data.txt"%i, sep="\t") for i in train_id])
test_df = pd.read_csv(base_path+"fold_%s_data.txt"%test_id, sep="\t")
train_df = filter_df(train_df)
test_df = filter_df(test_df)
print(train_df.shape, test_df.shape)
train_df = create_path(train_df, base_path=base_path)
test_df = create_path(test_df, base_path=base_path)
cnt_ave = 0
predictions = 0
test_images = np.array([read_and_resize(file_path) for file_path in test_df.path.values])
test_labels = np.array([dict_age[a] for a in test_df.age.values])
for k in tqdm(range(bag)):
tr_tr, tr_val = train_test_split(train_df, test_size=0.1)
file_path = "baseline_age.h5"
checkpoint = ModelCheckpoint(file_path, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
early = EarlyStopping(monitor="val_acc", mode="max", patience=10)
reduce_on_plateau = ReduceLROnPlateau(monitor="val_acc", mode="max", factor=0.1, patience=3)
callbacks_list = [checkpoint, early, reduce_on_plateau] # early
model = get_model(n_classes=len(dict_age))
model.fit_generator(gen(tr_tr, aug=True), validation_data=gen(tr_val), epochs=200, verbose=2, workers=4,
callbacks=callbacks_list, steps_per_epoch=50, validation_steps=30)
model.load_weights(file_path)
predictions += model.predict(test_images)
cnt_ave += 1
test_images = test_images[:, :, ::-1, :]
predictions += model.predict(test_images)
cnt_ave += 1
K.clear_session()
predictions = predictions/cnt_ave
predictions = predictions.argmax(axis=-1)
acc = accuracy_score(test_labels, predictions)
print("accuracy : %s " %acc)
accuracies.append(acc)
print("mean acc : %s (%s) " % (np.mean(accuracies), np.std(accuracies))) | [
"pandas.read_csv",
"keras.preprocessing.image.ImageDataGenerator",
"numpy.array",
"keras.layers.Dense",
"numpy.mean",
"numpy.max",
"keras.models.Model",
"keras.callbacks.EarlyStopping",
"keras.backend.clear_session",
"keras.layers.GlobalMaxPooling2D",
"sklearn.model_selection.train_test_split",
... | [((1031, 1156), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rotation_range': '(6)', 'width_shift_range': '(0.1)', 'height_shift_range': '(0.1)', 'horizontal_flip': '(True)', 'zoom_range': '(0.1)'}), '(rotation_range=6, width_shift_range=0.1,\n height_shift_range=0.1, horizontal_flip=True, zoom_range=0.1)\n', (1049, 1156), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((905, 932), 'numpy.array', 'np.array', (['im'], {'dtype': '"""uint8"""'}), "(im, dtype='uint8')\n", (913, 932), True, 'import numpy as np\n'), ((2148, 2195), 'keras.applications.resnet50.ResNet50', 'ResNet50', ([], {'weights': '"""imagenet"""', 'include_top': '(False)'}), "(weights='imagenet', include_top=False)\n", (2156, 2195), False, 'from keras.applications.resnet50 import ResNet50\n'), ((2574, 2619), 'keras.models.Model', 'Model', (['base_model.input', 'x'], {'name': '"""base_model"""'}), "(base_model.input, x, name='base_model')\n", (2579, 2619), False, 'from keras.models import Model\n'), ((3924, 3941), 'tqdm.tqdm', 'tqdm', (['all_indexes'], {}), '(all_indexes)\n', (3928, 3941), False, 'from tqdm import tqdm\n'), ((2302, 2322), 'keras.layers.GlobalMaxPooling2D', 'GlobalMaxPooling2D', ([], {}), '()\n', (2320, 2322), False, 'from keras.layers import Convolution2D, Dense, Input, Flatten, Dropout, MaxPooling2D, BatchNormalization, GlobalMaxPool2D, Concatenate, GlobalMaxPooling2D, GlobalAveragePooling2D, Lambda\n'), ((2334, 2346), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (2341, 2346), False, 'from keras.layers import Convolution2D, Dense, Input, Flatten, Dropout, MaxPooling2D, BatchNormalization, GlobalMaxPool2D, Concatenate, GlobalMaxPooling2D, GlobalAveragePooling2D, Lambda\n'), ((2358, 2387), 'keras.layers.Dense', 'Dense', (['(100)'], {'activation': '"""relu"""'}), "(100, activation='relu')\n", (2363, 2387), False, 'from keras.layers import Convolution2D, Dense, Input, Flatten, Dropout, MaxPooling2D, BatchNormalization, GlobalMaxPool2D, Concatenate, GlobalMaxPooling2D, GlobalAveragePooling2D, Lambda\n'), ((2399, 2411), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (2406, 2411), False, 'from keras.layers import Convolution2D, Dense, Input, Flatten, Dropout, MaxPooling2D, BatchNormalization, GlobalMaxPool2D, Concatenate, GlobalMaxPooling2D, GlobalAveragePooling2D, Lambda\n'), ((4158, 4221), 'pandas.read_csv', 'pd.read_csv', (["(base_path + 'fold_%s_data.txt' % test_id)"], {'sep': '"""\t"""'}), "(base_path + 'fold_%s_data.txt' % test_id, sep='\\t')\n", (4169, 4221), True, 'import pandas as pd\n'), ((4630, 4681), 'numpy.array', 'np.array', (['[dict_age[a] for a in test_df.age.values]'], {}), '([dict_age[a] for a in test_df.age.values])\n', (4638, 4681), True, 'import numpy as np\n'), ((5876, 5916), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['test_labels', 'predictions'], {}), '(test_labels, predictions)\n', (5890, 5916), False, 'from sklearn.metrics import accuracy_score\n'), ((820, 840), 'PIL.Image.open', 'Image.open', (['filepath'], {}), '(filepath)\n', (830, 840), False, 'from PIL import Image\n'), ((1971, 2020), 'numpy.array', 'np.array', (['[dict_age[g] for g in batch.age.values]'], {}), '([dict_age[g] for g in batch.age.values])\n', (1979, 2020), True, 'import numpy as np\n'), ((2450, 2488), 'keras.layers.Dense', 'Dense', (['n_classes'], {'activation': '"""sigmoid"""'}), "(n_classes, activation='sigmoid')\n", (2455, 2488), False, 'from keras.layers import Convolution2D, Dense, Input, Flatten, Dropout, MaxPooling2D, BatchNormalization, GlobalMaxPool2D, Concatenate, GlobalMaxPooling2D, GlobalAveragePooling2D, Lambda\n'), ((2514, 2552), 'keras.layers.Dense', 'Dense', (['n_classes'], {'activation': '"""softmax"""'}), "(n_classes, activation='softmax')\n", (2519, 2552), False, 'from keras.layers import Convolution2D, Dense, Input, Flatten, Dropout, MaxPooling2D, BatchNormalization, GlobalMaxPool2D, Concatenate, GlobalMaxPooling2D, GlobalAveragePooling2D, Lambda\n'), ((4748, 4789), 'sklearn.model_selection.train_test_split', 'train_test_split', (['train_df'], {'test_size': '(0.1)'}), '(train_df, test_size=0.1)\n', (4764, 4789), False, 'from sklearn.model_selection import train_test_split\n'), ((4859, 4953), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['file_path'], {'monitor': '"""val_acc"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""max"""'}), "(file_path, monitor='val_acc', verbose=1, save_best_only=\n True, mode='max')\n", (4874, 4953), False, 'from keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, ReduceLROnPlateau, TensorBoard\n'), ((4970, 5027), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_acc"""', 'mode': '"""max"""', 'patience': '(10)'}), "(monitor='val_acc', mode='max', patience=10)\n", (4983, 5027), False, 'from keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, ReduceLROnPlateau, TensorBoard\n'), ((5061, 5133), 'keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'monitor': '"""val_acc"""', 'mode': '"""max"""', 'factor': '(0.1)', 'patience': '(3)'}), "(monitor='val_acc', mode='max', factor=0.1, patience=3)\n", (5078, 5133), False, 'from keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, ReduceLROnPlateau, TensorBoard\n'), ((5749, 5766), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (5764, 5766), True, 'from keras import backend as K\n'), ((977, 993), 'numpy.max', 'np.max', (['im_array'], {}), '(im_array)\n', (983, 993), True, 'import numpy as np\n'), ((4066, 4123), 'pandas.read_csv', 'pd.read_csv', (["(base_path + 'fold_%s_data.txt' % i)"], {'sep': '"""\t"""'}), "(base_path + 'fold_%s_data.txt' % i, sep='\\t')\n", (4077, 4123), True, 'import pandas as pd\n'), ((6024, 6043), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (6031, 6043), True, 'import numpy as np\n'), ((6045, 6063), 'numpy.std', 'np.std', (['accuracies'], {}), '(accuracies)\n', (6051, 6063), True, 'import numpy as np\n')] |
"""
What is denoising?
Noise is modeled as samples from a certain distribution, it can be referred
with the name of distribution it follows, for example, a Gaussian noise.
Mode (noise_type and noise_params):
- "gaussian", [standard deviation of the distribution]: Gaussian noise
- "poisson", [peak value]: Poisson noise
"""
import numpy as np
from typing import Tuple, Optional
from nnimgproc.processor import TargetProcessor
from nnimgproc.util.parameters import Parameters
# TargetProcessor for denoising
class DenoisingTargetProcessor(TargetProcessor):
def __init__(self, noise_type: str, noise_params: list):
super(DenoisingTargetProcessor, self).__init__()
self._noise_type = noise_type
self._noise_params = noise_params
self._params = Parameters()
# Noise definitions
def __call__(self, img: np.ndarray) \
-> Tuple[np.ndarray, np.ndarray, Optional[Parameters]]:
y = img
if self._noise_type == 'gaussian':
sigma = float(self._noise_params[0])
if sigma == 0:
x = np.copy(img)
else:
x = (img + np.random.normal(0, 1, img.shape) * sigma)
self._params.set('noise_level', sigma)
elif self._noise_type == "poisson":
peak = float(self._noise_params[0])
x = (np.random.poisson(img * peak) / peak)
self._params.set('noise_level', peak)
else:
raise NotImplementedError('%s noise is not implemented' %
self._noise_type)
return x.clip(0, 1), y, self._params
| [
"nnimgproc.util.parameters.Parameters",
"numpy.copy",
"numpy.random.poisson",
"numpy.random.normal"
] | [((784, 796), 'nnimgproc.util.parameters.Parameters', 'Parameters', ([], {}), '()\n', (794, 796), False, 'from nnimgproc.util.parameters import Parameters\n'), ((1087, 1099), 'numpy.copy', 'np.copy', (['img'], {}), '(img)\n', (1094, 1099), True, 'import numpy as np\n'), ((1348, 1377), 'numpy.random.poisson', 'np.random.poisson', (['(img * peak)'], {}), '(img * peak)\n', (1365, 1377), True, 'import numpy as np\n'), ((1145, 1178), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', 'img.shape'], {}), '(0, 1, img.shape)\n', (1161, 1178), True, 'import numpy as np\n')] |
from nltk.tag import pos_tag
import numpy as np
import pandas as pd
from transformers import TFGPT2LMHeadModel, GPT2Tokenizer
from string import punctuation
from os import path, listdir
import pickle
import copy
from helper_functions import get_embeddings
from nltk import word_tokenize
#Model
MODEL_ID_GPT2 = 'gpt2'
MODEL_GPT2 = TFGPT2LMHeadModel.from_pretrained(MODEL_ID_GPT2, output_hidden_states = True, output_attentions = False)
MODEL_TOKENIZER_GPT2 = GPT2Tokenizer.from_pretrained(MODEL_ID_GPT2)
#Embedding Property Lists
pleasant = sorted(list(set('caress,freedom,health,love,peace,cheer,friend,heaven,loyal,pleasure,diamond,gentle,honest,lucky,rainbow,diploma,gift,honor,miracle,sunrise,family,happy,laughter,paradise,vacation'.split(','))))
unpleasant = sorted(list(set('abuse,crash,filth,murder,sickness,accident,death,grief,poison,stink,assault,disaster,hatred,pollute,tragedy,divorce,jail,poverty,ugly,cancer,kill,rotten,vomit,agony,prison'.split(','))))
dominant = sorted(list(set('power,command,control,master,rule,authority,strong,superior,dominant,confident,leader,king,victory,mighty,bravery,triumph,win,success,fame,glory,respect,honor,champion,advantage,capable'.split(','))))
submissive = sorted(list(set('subordinate,weak,disadvantage,helpless,insecure,failure,lonely,humiliate,coward,feeble,inferior,embarrassed,victim,afraid,timid,shame,defeat,panic,disappointment,impotence,shy,nervous,meek,fearful,distressed'.split(','))))
arousal = sorted(list(set('thrill,excitement,desire,sex,ecstasy,erotic,passion,infatuation,lust,flirt,murder,rage,assault,danger,terror,fight,scream,violent,startled,alert,anger,laughter,surprise,intruder,aroused'.split(','))))
indifference = sorted(list(set('relaxed,sleep,quiet,bored,subdued,peace,indifferent,secure,gentle,cozy,bland,reserved,slow,plain,solemn,polite,tired,weary,safe,comfort,protected,dull,soothing,leisure,placid'.split(','))))
#WEAT Names
ea_name_male = sorted(list(set('Adam,Harry,Josh,Roger,Alan,Frank,Justin,Ryan,Andrew,Jack,Matthew,Stephen,Brad,Greg,Paul,Jonathan,Peter,Brad,Brendan,Geoffrey,Greg,Brett,Matthew,Neil,Todd'.split(','))))
ea_name_female = sorted(list(set('Amanda,Courtney,Heather,Melanie,Katie,Betsy,Kristin,Nancy,Stephanie,Ellen,Lauren,Colleen,Emily,Megan,Rachel,Allison,Anne,Carrie,Emily,Jill,Laurie,Meredith,Sarah'.split(','))))
aa_name_male = sorted(list(set('Alonzo,Jamel,Theo,Alphonse,Jerome,Leroy,Torrance,Darnell,Lamar,Lionel,Tyree,Deion,Lamont,Malik,Terrence,Tyrone,Lavon,Marcellus,Wardell,Darnell,Hakim,Jermaine,Kareem,Jamal,Leroy,Rasheed,Tyrone'.split(','))))
aa_name_female = sorted(list(set('Nichelle,Shereen,Ebony,Latisha,Shaniqua,Jasmine,Tanisha,Tia,Lakisha,Latoya,Yolanda,Malika,Yvette,Aisha,Ebony,Keisha,Kenya,Lakisha,Latoya,Tamika,Tanisha'.split(','))))
#Full WEAT
pleasant = ['caress','freedom','health','love','peace','cheer','friend','heaven','loyal','pleasure','diamond','gentle','honest','lucky','rainbow','diploma','gift','honor','miracle','sunrise','family','happy','laughter','paradise','vacation']
unpleasant = ['abuse','crash','filth','murder','sickness','accident','death','grief','poison','stink','assault','disaster','hatred','pollute','tragedy','divorce','jail','poverty','ugly','cancer','kill','rotten','vomit','agony','prison']
flower = ['aster','clover','hyacinth','marigold','poppy','azalea','crocus','iris','orchid','rose','bluebell','daffodil','lilac','pansy','tulip','buttercup','daisy','lily','peony','violet','carnation','gladiola','magnolia','petunia','zinnia']
insect = ['ant','caterpillar','flea','locust','spider','bedbug','centipede','fly','maggot','tarantula','bee','cockroach','gnat','mosquito','termite','beetle','cricket','hornet','moth','wasp','blackfly','dragonfly','horsefly','roach','weevil']
instrument = ['bagpipe','cello','guitar','lute','trombone','banjo','clarinet','harmonica','mandolin','trumpet','bassoon','drum','harp','oboe','tuba','bell','fiddle','harpsichord','piano','viola','bongo','flute','horn','saxophone','violin']
weapon = ['arrow','club','gun','missile','spear','axe','dagger','harpoon','pistol','sword','blade','dynamite','hatchet','rifle','tank','bomb','firearm','knife','shotgun','teargas','cannon','grenade','mace','slingshot','whip']
ea_name = ['Adam','Harry','Josh','Roger','Alan','Frank','Justin','Ryan','Andrew','Jack','Matthew','Stephen','Brad','Greg','Paul','Jonathan','Peter','Amanda','Courtney','Heather','Melanie','Katie','Betsy','Kristin','Nancy','Stephanie','Ellen','Lauren','Colleen','Emily','Megan','Rachel']
aa_name = ['Alonzo','Jamel','Theo','Alphonse','Jerome','Leroy','Torrance','Darnell','Lamar','Lionel','Tyree','Deion','Lamont','Malik','Terrence','Tyrone','Lavon','Marcellus','Wardell','Nichelle','Shereen','Ebony','Latisha','Shaniqua','Jasmine','Tanisha','Tia','Lakisha','Latoya','Yolanda','Malika','Yvette']
ea_name_2 = ['Brad','Brendan','Geoffrey','Greg','Brett','Matthew','Neil','Todd','Allison','Anne','Carrie','Emily','Jill','Laurie','Meredith','Sarah']
aa_name_2 = ['Darnell','Hakim','Jermaine','Kareem','Jamal','Leroy','Rasheed','Tyrone','Aisha','Ebony','Keisha','Kenya','Lakisha','Latoya','Tamika','Tanisha']
pleasant_2 = ['joy','love','peace','wonderful','pleasure','friend','laughter','happy']
unpleasant_2 = ['agony','terrible','horrible','nasty','evil','war','awful','failure']
career = ['executive','management','professional','corporation','salary','office','business','career']
domestic = ['home','parents','children','family','cousins','marriage','wedding','relatives']
male_name = ['John','Paul','Mike','Kevin','Steve','Greg','Jeff','Bill']
female_name = ['Amy','Joan','Lisa','Sarah','Diana','Kate','Ann','Donna']
male = ['male','man','boy','brother','he','him','his','son']
female = ['female','woman','girl','sister','she','her','hers','daughter']
mathematics = ['math','algebra','geometry','calculus','equations','computation','numbers','addition']
art = ['poetry','art','dance','literature','novel','symphony','drama','sculpture']
male_2 = ['brother','father','uncle','grandfather','son','he','his','him']
female_2 = ['sister','mother','aunt','grandmother','daughter','she','hers','her']
science = ['science','technology','physics','chemistry','Einstein','NASA','experiment','astronomy']
art_2 = ['poetry','art','Shakespeare','dance','literature','novel','symphony','drama']
temporary = ['impermanent','unstable','variable','fleeting','short-term','brief','occasional']
permanent = ['stable','always','constant','persistent','chronic','prolonged','forever']
mental = ['sad','hopeless','gloomy','tearful','miserable','depressed']
physical = ['sick','illness','influenza','disease','virus','cancer']
young = ['Tiffany','Michelle','Cindy','Kristy','Brad','Eric','Joey','Billy']
old = ['Ethel','Bernice','Gertrude','Agnes','Cecil','Wilbert','Mortimer','Edgar']
#Scripting Area
weat_terms = list(set(flower + insect + instrument + weapon + ea_name + aa_name + ea_name_2 + aa_name_2 + pleasant + unpleasant + pleasant_2 + unpleasant_2 + young + old + male_name + female_name + career + domestic + male + female + science + mathematics + art + art_2))
pleasant_weat = list(set(flower + instrument + ea_name + ea_name_2 + pleasant + pleasant_2 + young))
unpleasant_weat = list(set(insect + weapon + aa_name + aa_name_2 + unpleasant + unpleasant_2 + old))
neutral_weat = list(set(male_name + female_name + career + domestic + male + female + science + mathematics + art + art_2))
CURRENT_MODEL = MODEL_GPT2
CURRENT_TOKENIZER = MODEL_TOKENIZER_GPT2
WRITE_MODEL = 'gpt2'
TEMPLATE = 'This is WORD'
DUMP_PATH = f'D:\\cwe_dictionaries'
TENSOR_TYPE = 'tf'
#Load in lexica
bellezza = pd.read_csv('Bellezza_Lexicon.csv')
bellezza_terms = bellezza['word'].to_list()
bellezza_valence = bellezza['combined_pleasantness'].to_list()
bellezza_valence_dict = {bellezza_terms[idx]: bellezza_valence[idx] for idx in range(len(bellezza_terms))}
anew = pd.read_csv('ANEW.csv')
anew_terms = anew['Description'].to_list()
anew_valence = anew['Valence Mean'].to_list()
anew_dominance = anew['Dominance Mean'].to_list()
anew_arousal = anew['Arousal Mean'].to_list()
anew_sd_valence = anew['Valence SD'].to_list()
anew_sd_dominance = anew['Dominance SD'].to_list()
anew_sd_arousal = anew['Arousal SD'].to_list()
anew_valence_dict = {anew_terms[idx]: anew_valence[idx] for idx in range(len(anew_terms))}
warriner = pd.read_csv('Warriner_Lexicon.csv')
warriner_terms = warriner['Word'].to_list()
warriner_terms[8289] = 'null'
warriner_valence = warriner['V.Mean.Sum'].to_list()
warriner_dominance = warriner['D.Mean.Sum'].to_list()
warriner_arousal = warriner['A.Mean.Sum'].to_list()
warriner_sd_valence = warriner['V.SD.Sum'].to_list()
warriner_sd_dominance = warriner['D.SD.Sum'].to_list()
warriner_sd_arousal = warriner['A.SD.Sum'].to_list()
warriner_valence_dict = {warriner_terms[idx]: warriner_valence[idx] for idx in range(len(warriner_terms))}
term_list = list(set(bellezza_terms + anew_terms + warriner_terms + weat_terms + arousal + dominant + indifference + submissive))
missing = list(term_list)
context_dict = {}
dir_ = f'D:\\new_contexts'
dir_list = list(listdir(dir_))
for target_file in dir_list:
print(target_file)
with open(path.join(dir_,target_file), 'rb') as pkl_reader:
contexts = pickle.load(pkl_reader)[0]
for context in contexts:
if type(context) != str:
continue
pop_idx = []
for idx, term in enumerate(missing):
if term in context:
print(context)
try:
tokenized_context = word_tokenize(context)
tokenized_term = word_tokenize(term)
except:
continue
try:
pos = tokenized_context.index(term[0])
start = max(0, pos - 10)
end = min(len(tokenized_context), pos + 10)
context_dict[term] = ' '.join(tokenized_context[start:end])
pop_idx.append(idx)
print(term)
with open(f'D:\\cwe_dictionaries\\updated_random_context_dictionary.pkl', 'wb') as pkl_writer:
pickle.dump(context_dict, pkl_writer)
continue
except:
continue
missing = [missing[i] for i in range(len(missing)) if i not in pop_idx]
print(len(missing))
if not missing:
break
if not missing:
break
print(missing)
with open(f'D:\\cwe_dictionaries\\random_context_dictionary.pkl', 'wb') as pkl_writer:
pickle.dump(context_dict, pkl_writer)
SETTING = 'aligned'
#Set valence for aligned contexts
if SETTING == 'aligned' or SETTING == 'misaligned':
term_valence_dict = copy.deepcopy(warriner_valence_dict)
for idx, term in enumerate(anew_terms):
if term not in term_valence_dict:
term_valence_dict[term] = anew_valence[idx]
#Rescale Bellezza for consistency with other lexica
for idx, term in enumerate(bellezza_terms):
if term not in term_valence_dict:
term_valence_dict[term] = ((bellezza_valence[idx] - 1) * 2) + 1
for term in pleasant_weat:
if term not in term_valence_dict:
term_valence_dict[term] = 8.0
for term in unpleasant_weat:
if term not in term_valence_dict:
term_valence_dict[term] = 2.0
for term in neutral_weat:
if term not in term_valence_dict:
term_valence_dict[term] = 5.0
term_class_dict = {}
for term, valence in term_valence_dict.items():
if valence <= 2.5:
term_class_dict[term] = 0
elif valence <= 4.0:
term_class_dict[term] = 1
elif valence <= 6.0:
term_class_dict[term] = 2
elif valence <= 7.5:
term_class_dict[term] = 3
else:
term_class_dict[term] = 4
aligned_context_dict = {0: 'It is very unpleasant to think about WORD',
1: 'It is unpleasant to think about WORD',
2: 'It is neither pleasant nor unpleasant to think about WORD',
3: 'It is pleasant to think about WORD',
4: 'It is very pleasant to think about WORD',}
misaligned_context_dict = {4: 'It is very unpleasant to think about WORD',
3: 'It is unpleasant to think about WORD',
2: 'It is neither pleasant nor unpleasant to think about WORD',
1: 'It is pleasant to think about WORD',
0: 'It is very pleasant to think about WORD',}
#Collect embeddings and write to a dictionary
embedding_dict = {}
SETTING = 'bleached'
if SETTING == 'bleached':
embedding_dict = {}
for idx, term in enumerate(term_list):
context = TEMPLATE.replace('WORD', term)
embedding_dict[term] = get_embeddings(term, context, CURRENT_MODEL, CURRENT_TOKENIZER, tensor_type=TENSOR_TYPE)
with open(path.join(DUMP_PATH, f'{WRITE_MODEL}_{SETTING}.pkl'), 'wb') as pkl_writer:
pickle.dump(embedding_dict, pkl_writer)
SETTING = 'random'
if SETTING == 'random':
for idx, term in enumerate(term_list):
embedding_dict[term] = get_embeddings(term, context_dict[term], CURRENT_MODEL, CURRENT_TOKENIZER, tensor_type=TENSOR_TYPE)
print(f'{term} worked')
with open(path.join(DUMP_PATH, f'{WRITE_MODEL}_{SETTING}.pkl'), 'wb') as pkl_writer:
pickle.dump(embedding_dict, pkl_writer)
SETTING = 'aligned'
if SETTING == 'aligned':
embedding_dict = {}
for idx, term in enumerate(term_list):
context = aligned_context_dict[term_class_dict[term]].replace('WORD', term)
embedding_dict[term] = get_embeddings(term, context, CURRENT_MODEL, CURRENT_TOKENIZER, tensor_type=TENSOR_TYPE)
with open(path.join(DUMP_PATH, f'{WRITE_MODEL}_{SETTING}.pkl'), 'wb') as pkl_writer:
pickle.dump(embedding_dict, pkl_writer)
SETTING = 'misaligned'
if SETTING == 'misaligned':
embedding_dict = {}
for idx, term in enumerate(term_list):
context = misaligned_context_dict[term_class_dict[term]].replace('WORD', term)
embedding_dict[term] = get_embeddings(term, context, CURRENT_MODEL, CURRENT_TOKENIZER, tensor_type=TENSOR_TYPE)
with open(path.join(DUMP_PATH, f'{WRITE_MODEL}_{SETTING}.pkl'), 'wb') as pkl_writer:
pickle.dump(embedding_dict, pkl_writer)
#Get CoLA Test Embeddings
k = pd.read_csv(f'D:\\in_domain_dev.tsv',sep='\t',header=None)
ids = k.index.to_list()
labels = k[1].to_list()
label_dict = {ids[idx]:labels[idx] for idx in range(len(ids))}
sentences = k[3].to_list()
sentence_dict = {ids[idx]:sentences[idx] for idx in range(len(ids))}
sentences = [i.strip() for i in sentences]
new_sentences = [i.rstrip(punctuation) for i in sentences]
new_sentence_dict = {ids[idx]:new_sentences[idx] for idx in range(len(ids))}
actual_last_word = [i.rsplit(' ',1)[1] for i in new_sentences]
trunced = [i.rsplit(' ',1)[0] for i in new_sentences]
last_dict = {}
trunc_dict = {}
gpt2_predictions = {}
gpt2_pos = {}
no_punct_dict = {}
for idx in range(len(ids)):
sentence = trunced[idx]
encoded = MODEL_TOKENIZER_GPT2.encode(sentence,return_tensors='tf')
output = MODEL_GPT2(encoded)
last_hs = np.array(output[-1][12][0][-1])
trunc_dict[idx] = last_hs
pred = np.argmax(np.squeeze(output[0])[-1])
next_word = MODEL_TOKENIZER_GPT2.decode([pred])
gpt2_predictions[idx] = next_word
new_ = sentence + next_word
pos = pos_tag(word_tokenize(new_))[-1]
gpt2_pos[idx] = pos
with open(f'D:\\cola_test\\trunc_vectors_val.pkl','wb') as pkl_writer:
pickle.dump(trunc_dict,pkl_writer)
with open(f'D:\\cola_test\\gpt2_preds_val.pkl','wb') as pkl_writer:
pickle.dump(gpt2_predictions,pkl_writer)
with open(f'D:\\cola_test\\gpt2_pred_pos_val.pkl','wb') as pkl_writer:
pickle.dump(gpt2_pos,pkl_writer)
print('done trunced')
for idx in range(len(ids)):
sentence = sentences[idx]
encoded = MODEL_TOKENIZER_GPT2.encode(sentence,return_tensors='tf')
output = MODEL_GPT2(encoded)
last_hs = np.array(output[-1][12][0][-1])
last_dict[idx] = last_hs
with open(f'D:\\cola_test\\last_vectors_val.pkl','wb') as pkl_writer:
pickle.dump(last_dict,pkl_writer)
print('done last')
for idx in range(len(ids)):
sentence = new_sentences[idx]
encoded = MODEL_TOKENIZER_GPT2.encode(sentence,return_tensors='tf')
output = MODEL_GPT2(encoded)
last_hs = np.array(output[-1][12][0][-1])
no_punct_dict[idx] = last_hs
with open(f'D:\\cola_test\\no_punct_vectors_val.pkl','wb') as pkl_writer:
pickle.dump(no_punct_dict,pkl_writer)
print('done everything') | [
"transformers.GPT2Tokenizer.from_pretrained",
"os.listdir",
"pickle.dump",
"helper_functions.get_embeddings",
"pandas.read_csv",
"nltk.word_tokenize",
"os.path.join",
"pickle.load",
"numpy.squeeze",
"numpy.array",
"copy.deepcopy",
"transformers.TFGPT2LMHeadModel.from_pretrained"
] | [((344, 448), 'transformers.TFGPT2LMHeadModel.from_pretrained', 'TFGPT2LMHeadModel.from_pretrained', (['MODEL_ID_GPT2'], {'output_hidden_states': '(True)', 'output_attentions': '(False)'}), '(MODEL_ID_GPT2, output_hidden_states=True,\n output_attentions=False)\n', (377, 448), False, 'from transformers import TFGPT2LMHeadModel, GPT2Tokenizer\n'), ((473, 517), 'transformers.GPT2Tokenizer.from_pretrained', 'GPT2Tokenizer.from_pretrained', (['MODEL_ID_GPT2'], {}), '(MODEL_ID_GPT2)\n', (502, 517), False, 'from transformers import TFGPT2LMHeadModel, GPT2Tokenizer\n'), ((7663, 7698), 'pandas.read_csv', 'pd.read_csv', (['"""Bellezza_Lexicon.csv"""'], {}), "('Bellezza_Lexicon.csv')\n", (7674, 7698), True, 'import pandas as pd\n'), ((7926, 7949), 'pandas.read_csv', 'pd.read_csv', (['"""ANEW.csv"""'], {}), "('ANEW.csv')\n", (7937, 7949), True, 'import pandas as pd\n'), ((8393, 8428), 'pandas.read_csv', 'pd.read_csv', (['"""Warriner_Lexicon.csv"""'], {}), "('Warriner_Lexicon.csv')\n", (8404, 8428), True, 'import pandas as pd\n'), ((14549, 14609), 'pandas.read_csv', 'pd.read_csv', (['f"""D:\\\\in_domain_dev.tsv"""'], {'sep': '"""\t"""', 'header': 'None'}), "(f'D:\\\\in_domain_dev.tsv', sep='\\t', header=None)\n", (14560, 14609), True, 'import pandas as pd\n'), ((9164, 9177), 'os.listdir', 'listdir', (['dir_'], {}), '(dir_)\n', (9171, 9177), False, 'from os import path, listdir\n'), ((10680, 10717), 'pickle.dump', 'pickle.dump', (['context_dict', 'pkl_writer'], {}), '(context_dict, pkl_writer)\n', (10691, 10717), False, 'import pickle\n'), ((10858, 10894), 'copy.deepcopy', 'copy.deepcopy', (['warriner_valence_dict'], {}), '(warriner_valence_dict)\n', (10871, 10894), False, 'import copy\n'), ((15404, 15435), 'numpy.array', 'np.array', (['output[-1][12][0][-1]'], {}), '(output[-1][12][0][-1])\n', (15412, 15435), True, 'import numpy as np\n'), ((15793, 15828), 'pickle.dump', 'pickle.dump', (['trunc_dict', 'pkl_writer'], {}), '(trunc_dict, pkl_writer)\n', (15804, 15828), False, 'import pickle\n'), ((15904, 15945), 'pickle.dump', 'pickle.dump', (['gpt2_predictions', 'pkl_writer'], {}), '(gpt2_predictions, pkl_writer)\n', (15915, 15945), False, 'import pickle\n'), ((16024, 16057), 'pickle.dump', 'pickle.dump', (['gpt2_pos', 'pkl_writer'], {}), '(gpt2_pos, pkl_writer)\n', (16035, 16057), False, 'import pickle\n'), ((16266, 16297), 'numpy.array', 'np.array', (['output[-1][12][0][-1]'], {}), '(output[-1][12][0][-1])\n', (16274, 16297), True, 'import numpy as np\n'), ((16406, 16440), 'pickle.dump', 'pickle.dump', (['last_dict', 'pkl_writer'], {}), '(last_dict, pkl_writer)\n', (16417, 16440), False, 'import pickle\n'), ((16650, 16681), 'numpy.array', 'np.array', (['output[-1][12][0][-1]'], {}), '(output[-1][12][0][-1])\n', (16658, 16681), True, 'import numpy as np\n'), ((16798, 16836), 'pickle.dump', 'pickle.dump', (['no_punct_dict', 'pkl_writer'], {}), '(no_punct_dict, pkl_writer)\n', (16809, 16836), False, 'import pickle\n'), ((12939, 13032), 'helper_functions.get_embeddings', 'get_embeddings', (['term', 'context', 'CURRENT_MODEL', 'CURRENT_TOKENIZER'], {'tensor_type': 'TENSOR_TYPE'}), '(term, context, CURRENT_MODEL, CURRENT_TOKENIZER, tensor_type\n =TENSOR_TYPE)\n', (12953, 13032), False, 'from helper_functions import get_embeddings\n'), ((13129, 13168), 'pickle.dump', 'pickle.dump', (['embedding_dict', 'pkl_writer'], {}), '(embedding_dict, pkl_writer)\n', (13140, 13168), False, 'import pickle\n'), ((13294, 13397), 'helper_functions.get_embeddings', 'get_embeddings', (['term', 'context_dict[term]', 'CURRENT_MODEL', 'CURRENT_TOKENIZER'], {'tensor_type': 'TENSOR_TYPE'}), '(term, context_dict[term], CURRENT_MODEL, CURRENT_TOKENIZER,\n tensor_type=TENSOR_TYPE)\n', (13308, 13397), False, 'from helper_functions import get_embeddings\n'), ((13528, 13567), 'pickle.dump', 'pickle.dump', (['embedding_dict', 'pkl_writer'], {}), '(embedding_dict, pkl_writer)\n', (13539, 13567), False, 'import pickle\n'), ((13805, 13898), 'helper_functions.get_embeddings', 'get_embeddings', (['term', 'context', 'CURRENT_MODEL', 'CURRENT_TOKENIZER'], {'tensor_type': 'TENSOR_TYPE'}), '(term, context, CURRENT_MODEL, CURRENT_TOKENIZER, tensor_type\n =TENSOR_TYPE)\n', (13819, 13898), False, 'from helper_functions import get_embeddings\n'), ((13995, 14034), 'pickle.dump', 'pickle.dump', (['embedding_dict', 'pkl_writer'], {}), '(embedding_dict, pkl_writer)\n', (14006, 14034), False, 'import pickle\n'), ((14281, 14374), 'helper_functions.get_embeddings', 'get_embeddings', (['term', 'context', 'CURRENT_MODEL', 'CURRENT_TOKENIZER'], {'tensor_type': 'TENSOR_TYPE'}), '(term, context, CURRENT_MODEL, CURRENT_TOKENIZER, tensor_type\n =TENSOR_TYPE)\n', (14295, 14374), False, 'from helper_functions import get_embeddings\n'), ((14471, 14510), 'pickle.dump', 'pickle.dump', (['embedding_dict', 'pkl_writer'], {}), '(embedding_dict, pkl_writer)\n', (14482, 14510), False, 'import pickle\n'), ((9250, 9278), 'os.path.join', 'path.join', (['dir_', 'target_file'], {}), '(dir_, target_file)\n', (9259, 9278), False, 'from os import path, listdir\n'), ((9320, 9343), 'pickle.load', 'pickle.load', (['pkl_reader'], {}), '(pkl_reader)\n', (9331, 9343), False, 'import pickle\n'), ((13045, 13097), 'os.path.join', 'path.join', (['DUMP_PATH', 'f"""{WRITE_MODEL}_{SETTING}.pkl"""'], {}), "(DUMP_PATH, f'{WRITE_MODEL}_{SETTING}.pkl')\n", (13054, 13097), False, 'from os import path, listdir\n'), ((13444, 13496), 'os.path.join', 'path.join', (['DUMP_PATH', 'f"""{WRITE_MODEL}_{SETTING}.pkl"""'], {}), "(DUMP_PATH, f'{WRITE_MODEL}_{SETTING}.pkl')\n", (13453, 13496), False, 'from os import path, listdir\n'), ((13911, 13963), 'os.path.join', 'path.join', (['DUMP_PATH', 'f"""{WRITE_MODEL}_{SETTING}.pkl"""'], {}), "(DUMP_PATH, f'{WRITE_MODEL}_{SETTING}.pkl')\n", (13920, 13963), False, 'from os import path, listdir\n'), ((14387, 14439), 'os.path.join', 'path.join', (['DUMP_PATH', 'f"""{WRITE_MODEL}_{SETTING}.pkl"""'], {}), "(DUMP_PATH, f'{WRITE_MODEL}_{SETTING}.pkl')\n", (14396, 14439), False, 'from os import path, listdir\n'), ((15491, 15512), 'numpy.squeeze', 'np.squeeze', (['output[0]'], {}), '(output[0])\n', (15501, 15512), True, 'import numpy as np\n'), ((15664, 15683), 'nltk.word_tokenize', 'word_tokenize', (['new_'], {}), '(new_)\n', (15677, 15683), False, 'from nltk import word_tokenize\n'), ((9629, 9651), 'nltk.word_tokenize', 'word_tokenize', (['context'], {}), '(context)\n', (9642, 9651), False, 'from nltk import word_tokenize\n'), ((9690, 9709), 'nltk.word_tokenize', 'word_tokenize', (['term'], {}), '(term)\n', (9703, 9709), False, 'from nltk import word_tokenize\n'), ((10254, 10291), 'pickle.dump', 'pickle.dump', (['context_dict', 'pkl_writer'], {}), '(context_dict, pkl_writer)\n', (10265, 10291), False, 'import pickle\n')] |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import argparse
import logging
import os
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
from data_util import load_test_dataset, load_train_dataset
from model import get_model
from sklearn.model_selection import train_test_split
logging.getLogger().setLevel(logging.INFO)
tf.logging.set_verbosity(tf.logging.ERROR)
# Copy inference pre/post-processing script so it will be included in the model package
os.system("mkdir /opt/ml/model/code")
os.system("cp inference.py /opt/ml/model/code")
os.system("cp requirements.txt /opt/ml/model/code")
def save_model(model, path):
tf.contrib.saved_model.save_keras_model(model, f"{path}/SavedModel")
logging.info("Model successfully saved at: {}".format(path))
def main(args):
model = get_model(
filters=args.filter_sizes,
hidden_units=args.hidden_size,
dropouts=args.dropout_sizes,
num_class=args.num_classes,
)
# load training data
x, y = load_train_dataset(droot=args.train_dir)
# one-hot encode label
one_hot_y = np.zeros((y.shape[0], args.num_classes))
one_hot_y[np.arange(y.shape[0]), y] = 1
# split x and y into train and val set
X_train, X_val, y_train, y_val = train_test_split(
x, one_hot_y, test_size=args.test_size, random_state=42, shuffle=True
)
# normalize the x image
X_train = X_train / 255
X_val = X_val / 255
opt = tf.keras.optimizers.Adam(args.learning_rate, args.momentum)
model.compile(
loss="categorical_crossentropy",
optimizer=opt,
metrics=["categorical_crossentropy", "accuracy"],
)
# a callback to save model ckpt after each epoch if better model is found
model_ckpt_callback = tf.keras.callbacks.ModelCheckpoint(
args.output_data_dir + "/checkpoint-{epoch}.h5",
monitor="val_accuracy",
)
logging.info("Start training ...")
model.fit(
X_train,
y_train,
validation_data=(X_val, y_val),
batch_size=args.batch_size,
epochs=args.epochs,
callbacks=[model_ckpt_callback],
verbose=2,
)
save_model(model, args.model_output_dir)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--filter-sizes", nargs=2, type=int, default=[64, 32], help="Filter size with length of 2"
)
parser.add_argument(
"--hidden-size", type=int, default=256, help="Feed-forward layer hidden unit size."
)
parser.add_argument(
"--dropout-sizes",
nargs=3,
type=float,
default=[0.3, 0.3, 0.5],
help="Dropout layer size with length of 2",
)
parser.add_argument(
"--num-classes", type=int, default=10, help="Num of class in classification task."
)
parser.add_argument("--learning-rate", type=float, default=0.001, help="Initial learning rate.")
parser.add_argument("--epochs", type=int, default=10)
parser.add_argument("--batch-size", type=int, default=128)
parser.add_argument("--test-size", type=float, default=0.2)
parser.add_argument("--momentum", type=float, default=0.9)
parser.add_argument("--train-dir", type=str, default=os.environ.get("SM_CHANNEL_TRAINING"))
parser.add_argument("--model_dir", type=str)
parser.add_argument("--model-output-dir", type=str, default=os.environ.get("SM_MODEL_DIR"))
parser.add_argument("--output-data-dir", type=str, default=os.environ.get("SM_OUTPUT_DATA_DIR"))
args = parser.parse_args()
main(args)
| [
"logging.getLogger",
"argparse.ArgumentParser",
"numpy.arange",
"sklearn.model_selection.train_test_split",
"tensorflow.contrib.saved_model.save_keras_model",
"tensorflow.logging.set_verbosity",
"os.environ.get",
"tensorflow.keras.optimizers.Adam",
"numpy.zeros",
"tensorflow.keras.callbacks.ModelC... | [((873, 915), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.ERROR'], {}), '(tf.logging.ERROR)\n', (897, 915), True, 'import tensorflow as tf\n'), ((1006, 1043), 'os.system', 'os.system', (['"""mkdir /opt/ml/model/code"""'], {}), "('mkdir /opt/ml/model/code')\n", (1015, 1043), False, 'import os\n'), ((1044, 1091), 'os.system', 'os.system', (['"""cp inference.py /opt/ml/model/code"""'], {}), "('cp inference.py /opt/ml/model/code')\n", (1053, 1091), False, 'import os\n'), ((1092, 1143), 'os.system', 'os.system', (['"""cp requirements.txt /opt/ml/model/code"""'], {}), "('cp requirements.txt /opt/ml/model/code')\n", (1101, 1143), False, 'import os\n'), ((1179, 1247), 'tensorflow.contrib.saved_model.save_keras_model', 'tf.contrib.saved_model.save_keras_model', (['model', 'f"""{path}/SavedModel"""'], {}), "(model, f'{path}/SavedModel')\n", (1218, 1247), True, 'import tensorflow as tf\n'), ((1343, 1471), 'model.get_model', 'get_model', ([], {'filters': 'args.filter_sizes', 'hidden_units': 'args.hidden_size', 'dropouts': 'args.dropout_sizes', 'num_class': 'args.num_classes'}), '(filters=args.filter_sizes, hidden_units=args.hidden_size,\n dropouts=args.dropout_sizes, num_class=args.num_classes)\n', (1352, 1471), False, 'from model import get_model\n'), ((1544, 1584), 'data_util.load_train_dataset', 'load_train_dataset', ([], {'droot': 'args.train_dir'}), '(droot=args.train_dir)\n', (1562, 1584), False, 'from data_util import load_test_dataset, load_train_dataset\n'), ((1628, 1668), 'numpy.zeros', 'np.zeros', (['(y.shape[0], args.num_classes)'], {}), '((y.shape[0], args.num_classes))\n', (1636, 1668), True, 'import numpy as np\n'), ((1793, 1884), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'one_hot_y'], {'test_size': 'args.test_size', 'random_state': '(42)', 'shuffle': '(True)'}), '(x, one_hot_y, test_size=args.test_size, random_state=42,\n shuffle=True)\n', (1809, 1884), False, 'from sklearn.model_selection import train_test_split\n'), ((1987, 2046), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['args.learning_rate', 'args.momentum'], {}), '(args.learning_rate, args.momentum)\n', (2011, 2046), True, 'import tensorflow as tf\n'), ((2300, 2411), 'tensorflow.keras.callbacks.ModelCheckpoint', 'tf.keras.callbacks.ModelCheckpoint', (["(args.output_data_dir + '/checkpoint-{epoch}.h5')"], {'monitor': '"""val_accuracy"""'}), "(args.output_data_dir +\n '/checkpoint-{epoch}.h5', monitor='val_accuracy')\n", (2334, 2411), True, 'import tensorflow as tf\n'), ((2436, 2470), 'logging.info', 'logging.info', (['"""Start training ..."""'], {}), "('Start training ...')\n", (2448, 2470), False, 'import logging\n'), ((2779, 2804), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2802, 2804), False, 'import argparse\n'), ((830, 849), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (847, 849), False, 'import logging\n'), ((1683, 1704), 'numpy.arange', 'np.arange', (['y.shape[0]'], {}), '(y.shape[0])\n', (1692, 1704), True, 'import numpy as np\n'), ((3767, 3804), 'os.environ.get', 'os.environ.get', (['"""SM_CHANNEL_TRAINING"""'], {}), "('SM_CHANNEL_TRAINING')\n", (3781, 3804), False, 'import os\n'), ((3919, 3949), 'os.environ.get', 'os.environ.get', (['"""SM_MODEL_DIR"""'], {}), "('SM_MODEL_DIR')\n", (3933, 3949), False, 'import os\n'), ((4014, 4050), 'os.environ.get', 'os.environ.get', (['"""SM_OUTPUT_DATA_DIR"""'], {}), "('SM_OUTPUT_DATA_DIR')\n", (4028, 4050), False, 'import os\n')] |
import os
import gzip
import logging
import tempfile
from pathlib import Path
import h5py
import numpy
import sunpy.map
from astropy import units as u
from astropy.io import fits
from astropy.time import Time
from sunpy.util.exceptions import warn_user
from sunkit_instruments.suvi._variables import (
COMPOSITE_MATCHES,
FITS_FILE_EXTENSIONS,
L1B_MATCHES,
NETCDF_FILE_EXTENSIONS,
TAG_COMMENT_MAPPING,
TAG_MAPPING,
)
__all__ = ["read_suvi", "files_to_map"]
def _fix_l1b_header(filename):
"""
Fix a SUVI L1b FITS file header (broken due to the wrong
implementation of the CONTINUE keyword convention).
.. note::
astropy versions <=4.2.0 will do this faster, because we can
still use the `astropy.io.fits.header.Header.to_string()` method.
Starting with astropy version 4.2.1, the
`astropy.io.fits.header.Header.to_string()` method will not work
anymore due to FITS header consistency checks that cannot
be overridden. The solution for that is not very elegant
in the code here (reading the FITS file directly as bytes
until we hit a UnicodeDecodeError), but it is the only one
that works as far as I know.
.. note::
If the input file it gzipped, an unzipped file in the default
tmp directory will be used and deleted afterwards.
Parameters
----------
filename: `str`
Filename of the L1b file with the corrupt FITS header.
Returns
-------
`astropy.io.fits.header.Header`
Corrected FITS header.
"""
try:
# First try it with the astropy .to_string() method, as this is the easiest.
hdr = fits.getheader(filename)
hdr_str = hdr.tostring()
except Exception:
# Read the file manually as bytes until we hit a UnicodeDecodeError, i.e.
# until we reach the data part. Since astropy version 4.2.1, we can't use
# the .to_string() method anymore because of FITS header consistency checks
# that cannot be overridden, and they won't fix it unfortunately. If the
# input file is a .gz file, we need to unpack it first to the tmp directory.
temp_dir = tempfile.gettempdir()
name = Path(filename).name
is_gz_file = False
if name.endswith(".gz"):
is_gz_file = True
with gzip.open(filename, "r") as gfile:
filename = str(Path(temp_dir) / name[:-3])
with open(filename, "wb") as file_out:
file_out.write(gfile.read())
hdr_str = ""
with open(filename, "rb") as file:
counter = 1
while True:
try:
this_line = file.read(counter)
this_str = this_line.decode("utf-8")
hdr_str += this_str
counter += 1
except UnicodeDecodeError:
break
if is_gz_file:
os.remove(filename)
# Make a list of strings with a length of 80
hdr_list = [hdr_str[i : i + 80] for i in range(0, len(hdr_str), 80)]
# Remove all the empty entries
while " " * 80 in hdr_list:
hdr_list.remove(" " * 80)
hdr_list_new = []
for count, item in enumerate(hdr_list):
if count <= len(hdr_list) - 2:
if (
hdr_list[count][0:8] != "CONTINUE"
and hdr_list[count + 1][0:8] != "CONTINUE"
):
hdr_list_new.append(hdr_list[count])
else:
if (
hdr_list[count][0:8] != "CONTINUE"
and hdr_list[count + 1][0:8] == "CONTINUE"
):
ampersand_pos = hdr_list[count].find("&")
if ampersand_pos != -1:
new_entry = hdr_list[count][0:ampersand_pos]
else:
raise RuntimeError(
"There should be an ampersand at the end of a CONTINUE'd keyword."
)
tmp_count = 1
while hdr_list[count + tmp_count][0:8] == "CONTINUE":
ampersand_pos = hdr_list[count + tmp_count].find("&")
if ampersand_pos != -1:
first_sq_pos = hdr_list[count + tmp_count].find("'")
if first_sq_pos != -1:
new_entry = (
new_entry
+ hdr_list[count + tmp_count][
first_sq_pos + 1 : ampersand_pos
]
)
else:
raise RuntimeError(
"There should be two single quotes after CONTINUE. Did not find any."
)
else:
# If there is no ampersand at the end anymore, it means the entry ends here.
# Read from the first to the second single quote in this case.
first_sq_pos = hdr_list[count + tmp_count].find("'")
if first_sq_pos != -1:
second_sq_pos = hdr_list[count + tmp_count][
first_sq_pos + 1 :
].find("'")
if second_sq_pos != -1:
new_entry = (
new_entry
+ hdr_list[count + tmp_count][
first_sq_pos
+ 1 : second_sq_pos
+ 1
+ first_sq_pos
].rstrip()
+ "'"
)
else:
raise RuntimeError(
"There should be two single quotes after CONTINUE. Found the first, but not the second."
)
else:
raise RuntimeError(
"There should be two single quotes after CONTINUE. Did not find any."
)
tmp_count += 1
hdr_list_new.append(new_entry)
else:
continue
else:
# Add END at the end of the header
hdr_list_new.append(hdr_list[count])
# Now we stitch together the CONTINUE information correctly,
# with a "\n" at the end that we use as a separator later on
# when we convert from a string to an astropy header.
for count, item in enumerate(hdr_list_new):
if len(item) > 80:
this_entry = item[0:78] + "&'\n"
rest = "CONTINUE '" + item[78:]
while len(rest) > 80:
this_entry = this_entry + rest[0:78] + "&'\n"
rest = "CONTINUE '" + rest[78:]
this_entry = this_entry + rest
hdr_list_new[count] = this_entry
# Now we should have the correct list of strings. Since we can't convert a list to a
# FITS header directly, we have to convert it to a string first, separated by "\n".
hdr_str_new = "\n".join([str(item) for item in hdr_list_new])
hdr_corr = fits.Header.fromstring(hdr_str_new, sep="\n")
return hdr_corr
def _read_fits(filename):
"""
Read a FITS file and return the header, data and dqf.
"""
if any(fn in os.path.basename(filename) for fn in COMPOSITE_MATCHES):
with fits.open(filename) as hdu:
data, header = hdu[1].data, hdu[1].header
dqf = None
elif any(fn in os.path.basename(filename) for fn in L1B_MATCHES):
with fits.open(filename) as hdu:
data, header, dqf = hdu[0].data, _fix_l1b_header(filename), hdu[1].data
else:
raise ValueError(
f"File {filename} does not look like a SUVI L1b FITS file or L2 HDR composite."
)
return header, data, dqf
def _make_cdf_header(header_info):
header_info_copy = header_info.copy()
# Discard everything where the key name is longer than 8 characters,
# plus specific entries we have to deal with manually.
for key, value in header_info.items():
if len(key) > 8:
del header_info_copy[key]
elif key in ["RAD", "DQF", "NAXIS1", "NAXIS2"]:
del header_info_copy[key]
for key, value in header_info_copy.items():
if isinstance(value, numpy.ndarray):
# We only want single values for the header, no arrays of length 1.
# We convert everything that looks like an integer to a long,
# everything that looks like a float to float64, and byte strings
# to actual strings.
if value.ndim == 0:
if value.dtype in [
numpy.int8,
numpy.int16,
numpy.int32,
numpy.int64,
numpy.uint8,
numpy.uint16,
numpy.uint32,
numpy.uint64,
]:
header_info_copy[key] = numpy.longlong(value)
elif value.dtype in [numpy.float16, numpy.float32, numpy.float64]:
header_info_copy[key] = numpy.float64(value)
else:
if value.dtype == "|S1":
# Byte string to actual string, and removing weird characters
header_info_copy[key] = (
value.tobytes().decode("utf-8").rstrip("\x00")
)
# Now deal with the dates (float in the netCDF). Transform to readable string,
# ignore bakeout date because it is always -999.
for key, value in header_info_copy.items():
if key.startswith("DATE") and key != "DATE-BKE":
# Explanation for the odd time creation: the SUVI files say they use the
# the J2000 epoch, but they do not: the reference time is 2000-01-01 at
# 12:00:00 *UTC*, whereas the reference time for J2000 is in *TT*. So in
# order to get the time right, we need to define it in TT, but add the
# offset of 69.184 seconds between UTC and TT.
the_readable_date = (
Time("2000-01-01T12:01:09.184", scale="tt") + value * u.s
)
header_info_copy[key] = the_readable_date.utc.value
# Add NAXIS1 and NAXIS2 manually, because they are odd coming from the netCDF
header_info_copy["NAXIS1"] = None
header_info_copy["NAXIS2"] = None
# Same for BLANK, BSCALE, and BZERO
header_info_copy["BLANK"] = None
header_info_copy["BSCALE"] = None
header_info_copy["BZERO"] = None
header_info_copy["BUNIT"] = None
header = fits.Header.fromkeys(header_info_copy.keys())
for keyword in header:
header[keyword] = header_info_copy[keyword]
# Add fits header comments for known keywords as defined above
for keyword in header:
if keyword in TAG_COMMENT_MAPPING:
header.set(keyword, header[keyword], TAG_COMMENT_MAPPING[keyword])
# Add EXTEND, EXTVER, EXTNAME, and LONGSTR
header.append(("EXTEND", True, "FITS dataet may contain extensions"))
header.append(("EXTVER", 1, ""))
header.append(("EXTNAME", "DATA", ""))
header.append(
("LONGSTRN", "OGIP 1.0", "The HEASARC Long String Convention may be used")
)
return header
def _read_netCDF(filename):
"""
Read a CDF file and return the header, data and dqf.
"""
if any(fn in os.path.basename(filename) for fn in L1B_MATCHES):
with h5py.File(filename, "r") as afile:
data = afile["RAD"][:]
blank = afile["RAD"].attrs["_FillValue"][0]
bzero = afile["RAD"].attrs["add_offset"][0]
bscale = afile["RAD"].attrs["scale_factor"][0]
bunit = afile["RAD"].attrs["units"].tobytes().decode("utf-8").rstrip("\x00")
data = data * bscale + bzero
dqf = afile["DQF"][:]
header_info = dict((key, afile[key][...]) for key in afile.keys())
header = _make_cdf_header(header_info)
# Deal with this here as we require the file.
for att, val in afile.attrs.items():
if att in TAG_MAPPING:
header[TAG_MAPPING[att]] = (
val.tobytes().decode("utf-8").rstrip("\x00")
)
header["NAXIS1"] = data.shape[0]
header["NAXIS2"] = data.shape[1]
header["BLANK"] = blank
header["BSCALE"] = bscale
header["BZERO"] = bzero
header["BUNIT"] = bunit
else:
raise ValueError(f"File {filename} does not look like a SUVI L1b netCDF file.")
return header, data, dqf
def read_suvi(filename):
"""
Read a SUVI L1b FITS or netCDF file or a L2 HDR composite FITS file.
Returns header, data and the data quality flag array (DQF) for L1b files.
For SUVI L1b FITS files, the broken FITS header is fixed automatically
(broken because of the wrong implementation of the CONTINUE convention).
This read function is intended to provide a consistent file interface
for FITS and netCDF, L1b and L2.
.. note::
The type of file is determined by pattern matching in the
filenames, e.g. "-L1b-Fe171" for a 171 L1b file and "-l2-ci171"
for a 171 L2 HDR composite. If those patterns are not found
in the filename, the files will not be recognized.
.. note::
If ``filename`` is an L1b netCDF file, the information from
the netCDF file is transformed into a FITS header.
Parameters
----------
filename : `str`
File to read.
Returns
-------
`astropy.io.fits.header.Header`, `~numpy.ndarray`, `~numpy.ndarray`
Header, data, and data quality flags.
"""
if filename.lower().endswith(FITS_FILE_EXTENSIONS):
header, data, dqf = _read_fits(filename)
elif filename.lower().endswith(NETCDF_FILE_EXTENSIONS):
header, data, dqf = _read_netCDF(filename)
else:
raise ValueError(
f"File {filename} does not look like a valid FITS or netCDF file."
)
return header, data, dqf
def files_to_map(
files,
despike_l1b=False,
only_long_exposures=False,
only_short_exposures=False,
only_short_flare_exposures=False,
):
"""
Read SUVI L1b FITS or netCDF files or L2 HDR composite FITS files and
return a `~sunpy.map.Map` or a `~sunpy.map.MapSequence`. For SUVI L1b
FITS files, the broken FITS header is fixed automatically (broken
because of the wrong implementation of the CONTINUE convention).
.. note::
The first file in the (sorted, if sort_files=True) list determines what
will be accepted further on, i.e. L2 HDR composites or L1b files. If L1b
files are appearing in a file list that started with an L2 HDR composite,
they will be rejected (and vice versa). The type of file is determined
by pattern matching in the filenames, e.g. "-L1b-Fe171" for a 171 L1b file
and "-l2-ci171" for a 171 L2 HDR composite. If those patterns are not found
in the filename, the files will not be recognized.
Parameters
----------
files: `str` or `list` of `str`
File(s) to read.
despike_l1b: `bool`, optional. Default: False.
If True and input is L1b, data will get despiked
with the standard filter_width=7. Can not be used
for early SUVI files where the DQF extension is
missing.
only_long_exposures: `bool`, optional. Default: False.
If True, only long exposure L1b files from the input list will be
accepted and converted to a map. Ignored for L2 HDR composites.
only_short_exposures: `bool`, optional. Default: False.
If True, only short exposure L1b files from the input list will be
accepted and converted to a map. Ignored for L2 HDR composites and
any wavelengths other than 94 and 131 (because for everything >131,
there are no observations that are labeled "short", only "long" and
"short_flare").
only_short_flare_exposures: `bool`, optional. Default: False.
If True, only short flare exposure L1b files from the input list will
be accepted and converted to a map. Ignored for L2 HDR composites.
Returns
-------
`~sunpy.map.Map`, `~sunpy.map.MapSequence`, or `None`.
A map (sequence) of the SUVI data, or `None` if no
data was found matching the given criteria.
"""
# Avoid circular imports
from sunkit_instruments.suvi.suvi import despike_l1b_array
if isinstance(files, str):
files = [files]
files = sorted(files)
if any(fn in os.path.basename(files[0]) for fn in COMPOSITE_MATCHES):
composites = True
elif any(fn in os.path.basename(files[0]) for fn in L1B_MATCHES):
composites = False
else:
raise ValueError(
f"First file {files[0]} does not look like a SUVI L1b file or L2 HDR composite."
)
datas = []
headers = []
for afile in files:
logging.debug(f"Reading {afile}")
if composites:
if any(fn in os.path.basename(afile) for fn in COMPOSITE_MATCHES):
header, data, _ = read_suvi(afile)
datas.append(data)
headers.append(header)
else:
warn_user(
f"File {afile} does not look like a SUVI L2 HDR composite. Skipping."
)
else:
if any(fn in os.path.basename(afile) for fn in L1B_MATCHES):
header, data, dqf_mask = read_suvi(afile)
if despike_l1b:
data = despike_l1b_array(data, dqf_mask)
if only_long_exposures:
if "long_exposure" in header["SCI_OBJ"]:
datas.append(data)
headers.append(header)
elif only_short_exposures:
if "short_exposure" in header["SCI_OBJ"]:
datas.append(data)
headers.append(header)
elif only_short_flare_exposures:
if "short_flare_exposure" in header["SCI_OBJ"]:
datas.append(data)
headers.append(header)
else:
datas.append(data)
headers.append(header)
else:
warn_user(f"File {afile} does not look like a SUVI L1b file. Skipping.")
if len(datas) == 1:
return sunpy.map.Map(datas[0], headers[0])
elif len(datas) > 1:
return sunpy.map.Map(list(zip(datas, headers)), sequence=True)
else:
warn_user("List of data/headers is empty.")
| [
"astropy.io.fits.getheader",
"logging.debug",
"sunpy.util.exceptions.warn_user",
"pathlib.Path",
"gzip.open",
"numpy.longlong",
"numpy.float64",
"h5py.File",
"astropy.time.Time",
"tempfile.gettempdir",
"os.path.basename",
"astropy.io.fits.open",
"astropy.io.fits.Header.fromstring",
"sunkit... | [((7669, 7714), 'astropy.io.fits.Header.fromstring', 'fits.Header.fromstring', (['hdr_str_new'], {'sep': '"""\n"""'}), "(hdr_str_new, sep='\\n')\n", (7691, 7714), False, 'from astropy.io import fits\n'), ((1689, 1713), 'astropy.io.fits.getheader', 'fits.getheader', (['filename'], {}), '(filename)\n', (1703, 1713), False, 'from astropy.io import fits\n'), ((17633, 17666), 'logging.debug', 'logging.debug', (['f"""Reading {afile}"""'], {}), "(f'Reading {afile}')\n", (17646, 17666), False, 'import logging\n'), ((2202, 2223), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (2221, 2223), False, 'import tempfile\n'), ((7924, 7943), 'astropy.io.fits.open', 'fits.open', (['filename'], {}), '(filename)\n', (7933, 7943), False, 'from astropy.io import fits\n'), ((12042, 12066), 'h5py.File', 'h5py.File', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (12051, 12066), False, 'import h5py\n'), ((19278, 19321), 'sunpy.util.exceptions.warn_user', 'warn_user', (['"""List of data/headers is empty."""'], {}), "('List of data/headers is empty.')\n", (19287, 19321), False, 'from sunpy.util.exceptions import warn_user\n'), ((2239, 2253), 'pathlib.Path', 'Path', (['filename'], {}), '(filename)\n', (2243, 2253), False, 'from pathlib import Path\n'), ((2982, 3001), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (2991, 3001), False, 'import os\n'), ((7854, 7880), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (7870, 7880), False, 'import os\n'), ((8112, 8131), 'astropy.io.fits.open', 'fits.open', (['filename'], {}), '(filename)\n', (8121, 8131), False, 'from astropy.io import fits\n'), ((10694, 10737), 'astropy.time.Time', 'Time', (['"""2000-01-01T12:01:09.184"""'], {'scale': '"""tt"""'}), "('2000-01-01T12:01:09.184', scale='tt')\n", (10698, 10737), False, 'from astropy.time import Time\n'), ((11978, 12004), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (11994, 12004), False, 'import os\n'), ((17249, 17275), 'os.path.basename', 'os.path.basename', (['files[0]'], {}), '(files[0])\n', (17265, 17275), False, 'import os\n'), ((17928, 18013), 'sunpy.util.exceptions.warn_user', 'warn_user', (['f"""File {afile} does not look like a SUVI L2 HDR composite. Skipping."""'], {}), "(f'File {afile} does not look like a SUVI L2 HDR composite. Skipping.'\n )\n", (17937, 18013), False, 'from sunpy.util.exceptions import warn_user\n'), ((19016, 19088), 'sunpy.util.exceptions.warn_user', 'warn_user', (['f"""File {afile} does not look like a SUVI L1b file. Skipping."""'], {}), "(f'File {afile} does not look like a SUVI L1b file. Skipping.')\n", (19025, 19088), False, 'from sunpy.util.exceptions import warn_user\n'), ((2366, 2390), 'gzip.open', 'gzip.open', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (2375, 2390), False, 'import gzip\n'), ((8048, 8074), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (8064, 8074), False, 'import os\n'), ((9557, 9578), 'numpy.longlong', 'numpy.longlong', (['value'], {}), '(value)\n', (9571, 9578), False, 'import numpy\n'), ((17351, 17377), 'os.path.basename', 'os.path.basename', (['files[0]'], {}), '(files[0])\n', (17367, 17377), False, 'import os\n'), ((18251, 18284), 'sunkit_instruments.suvi.suvi.despike_l1b_array', 'despike_l1b_array', (['data', 'dqf_mask'], {}), '(data, dqf_mask)\n', (18268, 18284), False, 'from sunkit_instruments.suvi.suvi import despike_l1b_array\n'), ((9706, 9726), 'numpy.float64', 'numpy.float64', (['value'], {}), '(value)\n', (9719, 9726), False, 'import numpy\n'), ((17715, 17738), 'os.path.basename', 'os.path.basename', (['afile'], {}), '(afile)\n', (17731, 17738), False, 'import os\n'), ((18086, 18109), 'os.path.basename', 'os.path.basename', (['afile'], {}), '(afile)\n', (18102, 18109), False, 'import os\n'), ((2432, 2446), 'pathlib.Path', 'Path', (['temp_dir'], {}), '(temp_dir)\n', (2436, 2446), False, 'from pathlib import Path\n')] |
from aide_design.shared.units import unit_registry as u
from datetime import datetime, timedelta
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
from pathlib import Path
def ftime(data_file_path, start, end=-1):
"""This function extracts the column of times from a ProCoDA data file.
Parameters
----------
data_file_path : string
File path. If the file is in the working directory, then the file name
is sufficient.
start : int or float
Index of first row of data to extract from the data file
end : int or float, optional
Index of last row of data to extract from the data
Defaults to -1, which extracts all the data in the file
Returns
-------
numpy array
Experimental times starting at 0 day with units of days.
Examples
--------
ftime(Reactor_data.txt, 0)
"""
if not isinstance(start, int):
start = int(start)
if not isinstance(end, int):
end = int(end)
df = pd.read_csv(data_file_path, delimiter='\t')
start_time = pd.to_numeric(df.iloc[start, 0])*u.day
day_times = pd.to_numeric(df.iloc[start:end, 0])
time_data = np.subtract((np.array(day_times)*u.day), start_time)
return time_data
def column_of_data(data_file_path, start, column, end="-1", units=""):
"""This function extracts a column of data from a ProCoDA data file.
Parameters
----------
data_file_path : string
File path. If the file is in the working directory, then the file name
is sufficient.
start : int
Index of first row of data to extract from the data file
end : int, optional
Index of last row of data to extract from the data
Defaults to -1, which extracts all the data in the file
column : int or string
int:
Index of the column that you want to extract. Column 0 is time.
The first data column is column 1.
string:
Name of the column header that you want to extract
units : string, optional
The units you want to apply to the data, e.g. 'mg/L'.
Defaults to "" which indicates no units
Returns
-------
numpy array
Experimental data with the units applied.
Examples
--------
column_of_data(Reactor_data.txt, 0, 1, -1, "mg/L")
"""
if not isinstance(start, int):
start = int(start)
if not isinstance(end, int):
end = int(end)
df = pd.read_csv(data_file_path, delimiter='\t')
if units == "":
if isinstance(column, int):
data = np.array(pd.to_numeric(df.iloc[start:end, column]))
else:
df[column][0:len(df)]
else:
if isinstance(column, int):
data = np.array(pd.to_numeric(df.iloc[start:end, column]))*u(units)
else:
df[column][0:len(df)]*u(units)
return data
def notes(data_file_path):
"""This function extracts any experimental notes from a ProCoDA data file.
Parameters
----------
data_file_path : string
File path. If the file is in the working directory, then the file name
is sufficient.
Returns
-------
dataframe
The rows of the data file that contain text notes inserted during the
experiment. Use this to identify the section of the data file that you
want to extract.
Examples
--------
"""
df = pd.read_csv(data_file_path, delimiter='\t')
text_row = df.iloc[0:-1, 0].str.contains('[a-z]', '[A-Z]')
text_row_index = text_row.index[text_row].tolist()
notes = df.loc[text_row_index]
return notes
def read_state(dates, state, column, units="", path="", extension=".xls"):
"""Reads a ProCoDA file and outputs the data column and time vector for
each iteration of the given state.
Parameters
----------
dates : string (list)
A list of dates or single date for which data was recorded, in
the form "M-D-Y"
state : int
The state ID number for which data should be extracted
column : int or string
int:
Index of the column that you want to extract. Column 0 is time.
The first data column is column 1.
string:
Name of the column header that you want to extract
units : string, optional
The units you want to apply to the data, e.g. 'mg/L'.
Defaults to "" which indicates no units
path : string, optional
Optional argument of the path to the folder containing your ProCoDA
files. Defaults to the current directory if no argument is passed in
extension : string, optional
The file extension of the tab delimited file. Defaults to ".xls" if
no argument is passed in
Returns
-------
time : numpy array
Times corresponding to the data (with units)
data : numpy array
Data in the given column during the given state with units
Examples
--------
time, data = read_state(["6-19-2013", "6-20-2013"], 1, 28, "mL/s")
"""
data_agg = []
day = 0
first_day = True
overnight = False
if not isinstance(dates, list):
dates = [dates]
for d in dates:
state_file = path + "statelog " + d + extension
data_file = path + "datalog " + d + extension
states = pd.read_csv(state_file, delimiter='\t')
data = pd.read_csv(data_file, delimiter='\t')
states = np.array(states)
data = np.array(data)
# get the start and end times for the state
state_start_idx = states[:, 1] == state
state_start = states[state_start_idx, 0]
state_end_idx = np.append([False], state_start_idx[0:(np.size(state_start_idx)-1)])
state_end = states[state_end_idx, 0]
if overnight:
state_start = np.insert(state_start, 0, 0)
state_end = np.insert(state_end, 0, states[0, 0])
if state_start_idx[-1]:
state_end.append(data[0, -1])
# get the corresponding indices in the data array
data_start = []
data_end = []
for i in range(np.size(state_start)):
add_start = True
for j in range(np.size(data[:, 0])):
if (data[j, 0] > state_start[i]) and add_start:
data_start.append(j)
add_start = False
if (data[j, 0] > state_end[i]):
data_end.append(j-1)
break
if first_day:
start_time = data[1, 0]
# extract data at those times
for i in range(np.size(data_start)):
t = data[data_start[i]:data_end[i], 0] + day - start_time
if isinstance(column, int):
c = data[data_start[i]:data_end[i], column]
else:
c = data[column][data_start[i]:data_end[i]]
if overnight and i == 0:
data_agg = np.insert(data_agg[-1], np.size(data_agg[-1][:, 0]),
np.vstack((t, c)).T)
else:
data_agg.append(np.vstack((t, c)).T)
day += 1
if first_day:
first_day = False
if state_start_idx[-1]:
overnight = True
data_agg = np.vstack(data_agg)
if units != "":
return data_agg[:, 0]*u.day, data_agg[:, 1]*u(units)
else:
return data_agg[:, 0]*u.day, data_agg[:, 1]
def average_state(dates, state, column, units="", path="", extension=".xls"):
"""Outputs the average value of the data for each instance of a state in
the given ProCoDA files
Parameters
----------
dates : string (list)
A list of dates or single date for which data was recorded, in
the form "M-D-Y"
state : int
The state ID number for which data should be extracted
column : int or string
int:
Index of the column that you want to extract. Column 0 is time.
The first data column is column 1.
string:
Name of the column header that you want to extract
units : string, optional
The units you want to apply to the data, e.g. 'mg/L'.
Defaults to "" which indicates no units
path : string, optional
Optional argument of the path to the folder containing your ProCoDA
files. Defaults to the current directory if no argument is passed in
extension : string, optional
The file extension of the tab delimited file. Defaults to ".xls" if
no argument is passed in
Returns
-------
float list
A list of averages for each instance of the given state
Examples
--------
data_avgs = average_state(["6-19-2013", "6-20-2013"], 1, 28, "mL/s")
"""
data_agg = []
day = 0
first_day = True
overnight = False
if not isinstance(dates, list):
dates = [dates]
for d in dates:
state_file = path + "statelog " + d + extension
data_file = path + "datalog " + d + extension
states = pd.read_csv(state_file, delimiter='\t')
data = pd.read_csv(data_file, delimiter='\t')
states = np.array(states)
data = np.array(data)
# get the start and end times for the state
state_start_idx = states[:, 1] == state
state_start = states[state_start_idx, 0]
state_end_idx = np.append([False], state_start_idx[0:(np.size(state_start_idx)-1)])
state_end = states[state_end_idx, 0]
if overnight:
state_start = np.insert(state_start, 0, 0)
state_end = np.insert(state_end, 0, states[0, 0])
if state_start_idx[-1]:
state_end.append(data[0, -1])
# get the corresponding indices in the data array
data_start = []
data_end = []
for i in range(np.size(state_start)):
add_start = True
for j in range(np.size(data[:, 0])):
if (data[j, 0] > state_start[i]) and add_start:
data_start.append(j)
add_start = False
if (data[j, 0] > state_end[i]):
data_end.append(j-1)
break
if first_day:
start_time = data[1, 0]
# extract data at those times
for i in range(np.size(data_start)):
if isinstance(column, int):
c = data[data_start[i]:data_end[i], column]
else:
c = data[column][data_start[i]:data_end[i]]
if overnight and i == 0:
data_agg = np.insert(data_agg[-1], np.size(data_agg[-1][:]), c)
else:
data_agg.append(c)
day += 1
if first_day:
first_day = False
if state_start_idx[-1]:
overnight = True
averages = np.zeros(np.size(data_agg))
for i in range(np.size(data_agg)):
averages[i] = np.average(data_agg[i])
if units != "":
return averages*u(units)
else:
return averages
def perform_function_on_state(func, dates, state, column, units="", path="", extension=".xls"):
"""Performs the function given on each state of the data for the given state
in the given column and outputs the result for each instance of the state
Parameters
----------
func : function
A function which will be applied to data from each instance of the state
dates : string (list)
A list of dates or single date for which data was recorded, in
the form "M-D-Y"
state : int
The state ID number for which data should be extracted
column : int or string
int:
Index of the column that you want to extract. Column 0 is time.
The first data column is column 1.
string:
Name of the column header that you want to extract
units : string, optional
The units you want to apply to the data, e.g. 'mg/L'.
Defaults to "" which indicates no units
path : string, optional
Optional argument of the path to the folder containing your ProCoDA
files. Defaults to the current directory if no argument is passed in
extension : string, optional
The file extension of the tab delimited file. Defaults to ".xls" if
no argument is passed in
Returns
-------
list
The outputs of the given function for each instance of the given state
Requires
--------
func takes in a list of data with units and outputs the correct units
Examples
--------
def avg_with_units(lst):
num = np.size(lst)
acc = 0
for i in lst:
acc = i + acc
return acc / num
data_avgs = perform_function_on_state(avg_with_units, ["6-19-2013", "6-20-2013"], 1, 28, "mL/s")
"""
data_agg = []
day = 0
first_day = True
overnight = False
if not isinstance(dates, list):
dates = [dates]
for d in dates:
state_file = path + "statelog " + d + extension
data_file = path + "datalog " + d + extension
states = pd.read_csv(state_file, delimiter='\t')
data = pd.read_csv(data_file, delimiter='\t')
states = np.array(states)
data = np.array(data)
# get the start and end times for the state
state_start_idx = states[:, 1] == state
state_start = states[state_start_idx, 0]
state_end_idx = np.append([False], state_start_idx[0:(np.size(state_start_idx)-1)])
state_end = states[state_end_idx, 0]
if overnight:
state_start = np.insert(state_start, 0, 0)
state_end = np.insert(state_end, 0, states[0, 0])
if state_start_idx[-1]:
state_end.append(data[0, -1])
# get the corresponding indices in the data array
data_start = []
data_end = []
for i in range(np.size(state_start)):
add_start = True
for j in range(np.size(data[:, 0])):
if (data[j, 0] > state_start[i]) and add_start:
data_start.append(j)
add_start = False
if (data[j, 0] > state_end[i]):
data_end.append(j-1)
break
if first_day:
start_time = data[1, 0]
# extract data at those times
for i in range(np.size(data_start)):
if isinstance(column, int):
c = data[data_start[i]:data_end[i], column]
else:
c = data[column][data_start[i]:data_end[i]]
if overnight and i == 0:
data_agg = np.insert(data_agg[-1], np.size(data_agg[-1][:]), c)
else:
data_agg.append(c)
day += 1
if first_day:
first_day = False
if state_start_idx[-1]:
overnight = True
output = np.zeros(np.size(data_agg))
for i in range(np.size(data_agg)):
if units != "":
output[i] = func(data_agg[i]*u(units)).magnitude
else:
output[i] = func(data_agg[i])
if units != "":
return output*func(data_agg[i]*u(units)).units
else:
return output
def plot_state(dates, state, column, path="", extension=".xls"):
"""Reads a ProCoDA file and plots the data column for each iteration of
the given state.
Parameters
----------
dates : string (list)
A list of dates or single date for which data was recorded, in
the form "M-D-Y"
state : int
The state ID number for which data should be plotted
column : int or string
int:
Index of the column that you want to extract. Column 0 is time.
The first data column is column 1.
string:
Name of the column header that you want to extract
path : string, optional
Optional argument of the path to the folder containing your ProCoDA
files. Defaults to the current directory if no argument is passed in
extension : string, optional
The file extension of the tab delimited file. Defaults to ".xls" if
no argument is passed in
Returns
-------
None
Examples
--------
plot_state(["6-19-2013", "6-20-2013"], 1, 28)
"""
data_agg = []
day = 0
first_day = True
overnight = False
if not isinstance(dates, list):
dates = [dates]
for d in dates:
state_file = path + "statelog " + d + extension
data_file = path + "datalog " + d + extension
states = pd.read_csv(state_file, delimiter='\t')
data = pd.read_csv(data_file, delimiter='\t')
states = np.array(states)
data = np.array(data)
# get the start and end times for the state
state_start_idx = states[:, 1] == state
state_start = states[state_start_idx, 0]
state_end_idx = np.append([False], state_start_idx[0:(np.size(state_start_idx)-1)])
state_end = states[state_end_idx, 0]
if overnight:
state_start = np.insert(state_start, 0, 0)
state_end = np.insert(state_end, 0, states[0, 0])
if state_start_idx[-1]:
state_end.append(data[0, -1])
# get the corresponding indices in the data array
data_start = []
data_end = []
for i in range(np.size(state_start)):
add_start = True
for j in range(np.size(data[:, 0])):
if (data[j, 0] > state_start[i]) and add_start:
data_start.append(j)
add_start = False
if (data[j, 0] > state_end[i]):
data_end.append(j-1)
break
if first_day:
start_time = data[1, 0]
# extract data at those times
for i in range(np.size(data_start)):
t = data[data_start[i]:data_end[i], 0] + day - start_time
if isinstance(column, int):
c = data[data_start[i]:data_end[i], column]
else:
c = data[column][data_start[i]:data_end[i]]
if overnight and i == 0:
data_agg = np.insert(data_agg[-1], np.size(data_agg[-1][:, 0]),
np.vstack((t, c)).T)
else:
data_agg.append(np.vstack((t, c)).T)
day += 1
if first_day:
first_day = False
if state_start_idx[-1]:
overnight = True
plt.figure()
for i in data_agg:
t = i[:, 0] - i[0, 0]
plt.plot(t, i[:, 1])
plt.show()
def read_state_with_metafile(func, state, column, path, metaids=[],
extension=".xls", units=""):
"""Takes in a ProCoDA meta file and performs a function for all data of a
certain state in each of the experiments (denoted by file paths in then
metafile)
Parameters
----------
func : function
A function which will be applied to data from each instance of the state
state : int
The state ID number for which data should be extracted
column : int or string
int:
Index of the column that you want to extract. Column 0 is time.
The first data column is column 1.
string:
Name of the column header that you want to extract
path : string
Path to your ProCoDA metafile (must be tab-delimited)
metaids : string list, optional
a list of the experiment IDs you'd like to analyze from the metafile
extension : string, optional
The file extension of the tab delimited file. Defaults to ".xls" if
no argument is passed in
units : string, optional
The units you want to apply to the data, e.g. 'mg/L'.
Defaults to "" which indicates no units
Returns
-------
ids : string list
The list of experiment ids given in the metafile
outputs : list
The outputs of the given function for each experiment
Examples
--------
def avg_with_units(lst):
num = np.size(lst)
acc = 0
for i in lst:
acc = i + acc
return acc / num
path = "../tests/data/Test Meta File.txt"
ids, answer = read_state_with_metafile(avg_with_units, 1, 28, path, [], ".xls", "mg/L")
"""
outputs = []
metafile = pd.read_csv(path, delimiter='\t', header=None)
metafile = np.array(metafile)
ids = metafile[1:, 0]
if not isinstance(ids[0], str):
ids = list(map(str, ids))
if metaids:
paths = []
for i in range(len(ids)):
if ids[i] in metaids:
paths.append(metafile[i, 4])
else:
paths = metafile[1:, 4]
basepath = os.path.join(os.path.split(path)[0], metafile[0, 4])
# use a loop to evaluate each experiment in the metafile
for i in range(len(paths)):
# get the range of dates for experiment i
day1 = metafile[i+1, 1]
# modify the metafile date so that it works with datetime format
if not (day1[2] == "-" or day1[2] == "/"):
day1 = "0" + day1
if not (day1[5] == "-" or day1[5] == "/"):
day1 = day1[:3] + "0" + day1[3:]
if day1[2] == "-":
dt = datetime.strptime(day1, "%m-%d-%Y")
else:
dt = datetime.strptime(day1, "%m/%d/%y")
duration = metafile[i+1, 3]
if not isinstance(duration, int):
duration = int(duration)
date_list = []
for j in range(duration):
curr_day = dt.strftime("%m-%d-%Y")
if curr_day[3] == "0":
curr_day = curr_day[:3] + curr_day[4:]
if curr_day[0] == "0":
curr_day = curr_day[1:]
date_list.append(curr_day)
dt = dt + timedelta(days=1)
path = str(Path(os.path.join(basepath, paths[i]))) + os.sep
_, data = read_state(date_list, state, column, units, path, extension)
outputs.append(func(data))
return ids, outputs
def write_calculations_to_csv(funcs, states, columns, path, headers, out_name,
metaids=[], extension=".xls"):
"""Writes each output of the given functions on the given states and data
columns to a new column in a
Parameters
----------
funcs : function (list)
A function or list of functions which will be applied in order to the
data. If only one function is given it is applied to all the
states/columns
states : string (list)
The state ID numbers for which data should be extracted. List should be
in order of calculation or if only one state is given then it will be
used for all the calculations
columns : int or string (list)
If only one column is given it is used for all the calculations
int:
Index of the column that you want to extract. Column 0 is time.
The first data column is column 1.
string:
Name of the column header that you want to extract
path : string
Path to your ProCoDA metafile (must be tab-delimited)
headers : string list
List of the desired header for each calculation, in order
out_name : string
Desired name for the output file. Can include a relative path
metaids : string list, optional
a list of the experiment IDs you'd like to analyze from the metafile
extension : string, optional
The file extension of the tab delimited file. Defaults to ".xls" if
no argument is passed in
Returns
-------
out_name.csv
A CSV file with the each column being a new calcuation and each row
being a new experiment on which the calcuations were performed
output : DataFrame
Pandas dataframe which is the same data that was written to CSV
Requires
--------
funcs, states, columns, and headers are all of the same length if they are
lists. Some being lists and some single values are okay.
Examples
--------
"""
if not isinstance(funcs, list):
funcs = [funcs] * len(headers)
if not isinstance(states, list):
states = [states] * len(headers)
if not isinstance(columns, list):
columns = [columns] * len(headers)
data_agg = []
for i in range(len(headers)):
ids, data = read_state_with_metafile(funcs[i], states[i], columns[i],
path, metaids, extension)
data_agg = np.append(data_agg, [data])
output = pd.DataFrame(data=np.vstack((ids, data_agg)).T,
columns=["ID"]+headers)
output.to_csv(out_name, sep='\t')
return output
| [
"numpy.insert",
"pandas.read_csv",
"numpy.average",
"datetime.datetime.strptime",
"numpy.size",
"matplotlib.pyplot.plot",
"os.path.join",
"os.path.split",
"numpy.append",
"numpy.array",
"matplotlib.pyplot.figure",
"pandas.to_numeric",
"numpy.vstack",
"aide_design.shared.units.unit_registry... | [((1033, 1076), 'pandas.read_csv', 'pd.read_csv', (['data_file_path'], {'delimiter': '"""\t"""'}), "(data_file_path, delimiter='\\t')\n", (1044, 1076), True, 'import pandas as pd\n'), ((1149, 1185), 'pandas.to_numeric', 'pd.to_numeric', (['df.iloc[start:end, 0]'], {}), '(df.iloc[start:end, 0])\n', (1162, 1185), True, 'import pandas as pd\n'), ((2506, 2549), 'pandas.read_csv', 'pd.read_csv', (['data_file_path'], {'delimiter': '"""\t"""'}), "(data_file_path, delimiter='\\t')\n", (2517, 2549), True, 'import pandas as pd\n'), ((3459, 3502), 'pandas.read_csv', 'pd.read_csv', (['data_file_path'], {'delimiter': '"""\t"""'}), "(data_file_path, delimiter='\\t')\n", (3470, 3502), True, 'import pandas as pd\n'), ((7320, 7339), 'numpy.vstack', 'np.vstack', (['data_agg'], {}), '(data_agg)\n', (7329, 7339), True, 'import numpy as np\n'), ((18560, 18572), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (18570, 18572), True, 'import matplotlib.pyplot as plt\n'), ((18660, 18670), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18668, 18670), True, 'import matplotlib.pyplot as plt\n'), ((20440, 20486), 'pandas.read_csv', 'pd.read_csv', (['path'], {'delimiter': '"""\t"""', 'header': 'None'}), "(path, delimiter='\\t', header=None)\n", (20451, 20486), True, 'import pandas as pd\n'), ((20502, 20520), 'numpy.array', 'np.array', (['metafile'], {}), '(metafile)\n', (20510, 20520), True, 'import numpy as np\n'), ((1094, 1126), 'pandas.to_numeric', 'pd.to_numeric', (['df.iloc[start, 0]'], {}), '(df.iloc[start, 0])\n', (1107, 1126), True, 'import pandas as pd\n'), ((5388, 5427), 'pandas.read_csv', 'pd.read_csv', (['state_file'], {'delimiter': '"""\t"""'}), "(state_file, delimiter='\\t')\n", (5399, 5427), True, 'import pandas as pd\n'), ((5443, 5481), 'pandas.read_csv', 'pd.read_csv', (['data_file'], {'delimiter': '"""\t"""'}), "(data_file, delimiter='\\t')\n", (5454, 5481), True, 'import pandas as pd\n'), ((5500, 5516), 'numpy.array', 'np.array', (['states'], {}), '(states)\n', (5508, 5516), True, 'import numpy as np\n'), ((5532, 5546), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (5540, 5546), True, 'import numpy as np\n'), ((9105, 9144), 'pandas.read_csv', 'pd.read_csv', (['state_file'], {'delimiter': '"""\t"""'}), "(state_file, delimiter='\\t')\n", (9116, 9144), True, 'import pandas as pd\n'), ((9160, 9198), 'pandas.read_csv', 'pd.read_csv', (['data_file'], {'delimiter': '"""\t"""'}), "(data_file, delimiter='\\t')\n", (9171, 9198), True, 'import pandas as pd\n'), ((9217, 9233), 'numpy.array', 'np.array', (['states'], {}), '(states)\n', (9225, 9233), True, 'import numpy as np\n'), ((9249, 9263), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (9257, 9263), True, 'import numpy as np\n'), ((10900, 10917), 'numpy.size', 'np.size', (['data_agg'], {}), '(data_agg)\n', (10907, 10917), True, 'import numpy as np\n'), ((10938, 10955), 'numpy.size', 'np.size', (['data_agg'], {}), '(data_agg)\n', (10945, 10955), True, 'import numpy as np\n'), ((10980, 11003), 'numpy.average', 'np.average', (['data_agg[i]'], {}), '(data_agg[i])\n', (10990, 11003), True, 'import numpy as np\n'), ((13173, 13212), 'pandas.read_csv', 'pd.read_csv', (['state_file'], {'delimiter': '"""\t"""'}), "(state_file, delimiter='\\t')\n", (13184, 13212), True, 'import pandas as pd\n'), ((13228, 13266), 'pandas.read_csv', 'pd.read_csv', (['data_file'], {'delimiter': '"""\t"""'}), "(data_file, delimiter='\\t')\n", (13239, 13266), True, 'import pandas as pd\n'), ((13285, 13301), 'numpy.array', 'np.array', (['states'], {}), '(states)\n', (13293, 13301), True, 'import numpy as np\n'), ((13317, 13331), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (13325, 13331), True, 'import numpy as np\n'), ((14966, 14983), 'numpy.size', 'np.size', (['data_agg'], {}), '(data_agg)\n', (14973, 14983), True, 'import numpy as np\n'), ((15004, 15021), 'numpy.size', 'np.size', (['data_agg'], {}), '(data_agg)\n', (15011, 15021), True, 'import numpy as np\n'), ((16639, 16678), 'pandas.read_csv', 'pd.read_csv', (['state_file'], {'delimiter': '"""\t"""'}), "(state_file, delimiter='\\t')\n", (16650, 16678), True, 'import pandas as pd\n'), ((16694, 16732), 'pandas.read_csv', 'pd.read_csv', (['data_file'], {'delimiter': '"""\t"""'}), "(data_file, delimiter='\\t')\n", (16705, 16732), True, 'import pandas as pd\n'), ((16751, 16767), 'numpy.array', 'np.array', (['states'], {}), '(states)\n', (16759, 16767), True, 'import numpy as np\n'), ((16783, 16797), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (16791, 16797), True, 'import numpy as np\n'), ((18634, 18654), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'i[:, 1]'], {}), '(t, i[:, 1])\n', (18642, 18654), True, 'import matplotlib.pyplot as plt\n'), ((24649, 24676), 'numpy.append', 'np.append', (['data_agg', '[data]'], {}), '(data_agg, [data])\n', (24658, 24676), True, 'import numpy as np\n'), ((1215, 1234), 'numpy.array', 'np.array', (['day_times'], {}), '(day_times)\n', (1223, 1234), True, 'import numpy as np\n'), ((5883, 5911), 'numpy.insert', 'np.insert', (['state_start', '(0)', '(0)'], {}), '(state_start, 0, 0)\n', (5892, 5911), True, 'import numpy as np\n'), ((5936, 5973), 'numpy.insert', 'np.insert', (['state_end', '(0)', 'states[0, 0]'], {}), '(state_end, 0, states[0, 0])\n', (5945, 5973), True, 'import numpy as np\n'), ((6177, 6197), 'numpy.size', 'np.size', (['state_start'], {}), '(state_start)\n', (6184, 6197), True, 'import numpy as np\n'), ((6657, 6676), 'numpy.size', 'np.size', (['data_start'], {}), '(data_start)\n', (6664, 6676), True, 'import numpy as np\n'), ((9600, 9628), 'numpy.insert', 'np.insert', (['state_start', '(0)', '(0)'], {}), '(state_start, 0, 0)\n', (9609, 9628), True, 'import numpy as np\n'), ((9653, 9690), 'numpy.insert', 'np.insert', (['state_end', '(0)', 'states[0, 0]'], {}), '(state_end, 0, states[0, 0])\n', (9662, 9690), True, 'import numpy as np\n'), ((9894, 9914), 'numpy.size', 'np.size', (['state_start'], {}), '(state_start)\n', (9901, 9914), True, 'import numpy as np\n'), ((10374, 10393), 'numpy.size', 'np.size', (['data_start'], {}), '(data_start)\n', (10381, 10393), True, 'import numpy as np\n'), ((11049, 11057), 'aide_design.shared.units.unit_registry', 'u', (['units'], {}), '(units)\n', (11050, 11057), True, 'from aide_design.shared.units import unit_registry as u\n'), ((13668, 13696), 'numpy.insert', 'np.insert', (['state_start', '(0)', '(0)'], {}), '(state_start, 0, 0)\n', (13677, 13696), True, 'import numpy as np\n'), ((13721, 13758), 'numpy.insert', 'np.insert', (['state_end', '(0)', 'states[0, 0]'], {}), '(state_end, 0, states[0, 0])\n', (13730, 13758), True, 'import numpy as np\n'), ((13962, 13982), 'numpy.size', 'np.size', (['state_start'], {}), '(state_start)\n', (13969, 13982), True, 'import numpy as np\n'), ((14442, 14461), 'numpy.size', 'np.size', (['data_start'], {}), '(data_start)\n', (14449, 14461), True, 'import numpy as np\n'), ((17134, 17162), 'numpy.insert', 'np.insert', (['state_start', '(0)', '(0)'], {}), '(state_start, 0, 0)\n', (17143, 17162), True, 'import numpy as np\n'), ((17187, 17224), 'numpy.insert', 'np.insert', (['state_end', '(0)', 'states[0, 0]'], {}), '(state_end, 0, states[0, 0])\n', (17196, 17224), True, 'import numpy as np\n'), ((17428, 17448), 'numpy.size', 'np.size', (['state_start'], {}), '(state_start)\n', (17435, 17448), True, 'import numpy as np\n'), ((17908, 17927), 'numpy.size', 'np.size', (['data_start'], {}), '(data_start)\n', (17915, 17927), True, 'import numpy as np\n'), ((20839, 20858), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (20852, 20858), False, 'import os\n'), ((21351, 21386), 'datetime.datetime.strptime', 'datetime.strptime', (['day1', '"""%m-%d-%Y"""'], {}), "(day1, '%m-%d-%Y')\n", (21368, 21386), False, 'from datetime import datetime, timedelta\n'), ((21418, 21453), 'datetime.datetime.strptime', 'datetime.strptime', (['day1', '"""%m/%d/%y"""'], {}), "(day1, '%m/%d/%y')\n", (21435, 21453), False, 'from datetime import datetime, timedelta\n'), ((2634, 2675), 'pandas.to_numeric', 'pd.to_numeric', (['df.iloc[start:end, column]'], {}), '(df.iloc[start:end, column])\n', (2647, 2675), True, 'import pandas as pd\n'), ((2842, 2850), 'aide_design.shared.units.unit_registry', 'u', (['units'], {}), '(units)\n', (2843, 2850), True, 'from aide_design.shared.units import unit_registry as u\n'), ((2899, 2907), 'aide_design.shared.units.unit_registry', 'u', (['units'], {}), '(units)\n', (2900, 2907), True, 'from aide_design.shared.units import unit_registry as u\n'), ((6256, 6275), 'numpy.size', 'np.size', (['data[:, 0]'], {}), '(data[:, 0])\n', (6263, 6275), True, 'import numpy as np\n'), ((7412, 7420), 'aide_design.shared.units.unit_registry', 'u', (['units'], {}), '(units)\n', (7413, 7420), True, 'from aide_design.shared.units import unit_registry as u\n'), ((9973, 9992), 'numpy.size', 'np.size', (['data[:, 0]'], {}), '(data[:, 0])\n', (9980, 9992), True, 'import numpy as np\n'), ((14041, 14060), 'numpy.size', 'np.size', (['data[:, 0]'], {}), '(data[:, 0])\n', (14048, 14060), True, 'import numpy as np\n'), ((17507, 17526), 'numpy.size', 'np.size', (['data[:, 0]'], {}), '(data[:, 0])\n', (17514, 17526), True, 'import numpy as np\n'), ((21903, 21920), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (21912, 21920), False, 'from datetime import datetime, timedelta\n'), ((24709, 24735), 'numpy.vstack', 'np.vstack', (['(ids, data_agg)'], {}), '((ids, data_agg))\n', (24718, 24735), True, 'import numpy as np\n'), ((2799, 2840), 'pandas.to_numeric', 'pd.to_numeric', (['df.iloc[start:end, column]'], {}), '(df.iloc[start:end, column])\n', (2812, 2840), True, 'import pandas as pd\n'), ((7015, 7042), 'numpy.size', 'np.size', (['data_agg[-1][:, 0]'], {}), '(data_agg[-1][:, 0])\n', (7022, 7042), True, 'import numpy as np\n'), ((10662, 10686), 'numpy.size', 'np.size', (['data_agg[-1][:]'], {}), '(data_agg[-1][:])\n', (10669, 10686), True, 'import numpy as np\n'), ((14730, 14754), 'numpy.size', 'np.size', (['data_agg[-1][:]'], {}), '(data_agg[-1][:])\n', (14737, 14754), True, 'import numpy as np\n'), ((18266, 18293), 'numpy.size', 'np.size', (['data_agg[-1][:, 0]'], {}), '(data_agg[-1][:, 0])\n', (18273, 18293), True, 'import numpy as np\n'), ((21946, 21978), 'os.path.join', 'os.path.join', (['basepath', 'paths[i]'], {}), '(basepath, paths[i])\n', (21958, 21978), False, 'import os\n'), ((5759, 5783), 'numpy.size', 'np.size', (['state_start_idx'], {}), '(state_start_idx)\n', (5766, 5783), True, 'import numpy as np\n'), ((7081, 7098), 'numpy.vstack', 'np.vstack', (['(t, c)'], {}), '((t, c))\n', (7090, 7098), True, 'import numpy as np\n'), ((7152, 7169), 'numpy.vstack', 'np.vstack', (['(t, c)'], {}), '((t, c))\n', (7161, 7169), True, 'import numpy as np\n'), ((9476, 9500), 'numpy.size', 'np.size', (['state_start_idx'], {}), '(state_start_idx)\n', (9483, 9500), True, 'import numpy as np\n'), ((13544, 13568), 'numpy.size', 'np.size', (['state_start_idx'], {}), '(state_start_idx)\n', (13551, 13568), True, 'import numpy as np\n'), ((15089, 15097), 'aide_design.shared.units.unit_registry', 'u', (['units'], {}), '(units)\n', (15090, 15097), True, 'from aide_design.shared.units import unit_registry as u\n'), ((15225, 15233), 'aide_design.shared.units.unit_registry', 'u', (['units'], {}), '(units)\n', (15226, 15233), True, 'from aide_design.shared.units import unit_registry as u\n'), ((17010, 17034), 'numpy.size', 'np.size', (['state_start_idx'], {}), '(state_start_idx)\n', (17017, 17034), True, 'import numpy as np\n'), ((18332, 18349), 'numpy.vstack', 'np.vstack', (['(t, c)'], {}), '((t, c))\n', (18341, 18349), True, 'import numpy as np\n'), ((18403, 18420), 'numpy.vstack', 'np.vstack', (['(t, c)'], {}), '((t, c))\n', (18412, 18420), True, 'import numpy as np\n')] |
from __future__ import annotations
from typing import List, Tuple, Union
import numpy as np
# now just for the 2x2 mat or 1x2 vec
class Matrix():
def __init__(self,
arr: Union[List[float], np.ndarray],
data_type: str = 'mat',
row: int = 2,
col: int = 2):
self._data_type: str = data_type
self._val: np.ndarray = np.array(arr).reshape(
row, 1 if data_type == 'vec' else col)
# unary operator
def __neg__(self) -> Matrix:
return Matrix(-self._val, self._data_type)
def __pos__(self) -> Matrix:
return Matrix(self._val, self._data_type)
def __invert__(self):
raise NotImplementedError
# binary operator
def __add__(self, other: Union[float, int, Matrix]) -> Matrix:
if isinstance(other, float) or isinstance(other, int):
return Matrix(self._val + other, self._data_type)
else:
return Matrix(self._val + other._val, self._data_type)
def __sub__(self, other: Union[float, int, Matrix]) -> Matrix:
if isinstance(other, float) or isinstance(other, int):
return Matrix(self._val - other, self._data_type)
else:
return Matrix(self._val - other._val, self._data_type)
def __mul__(self, other: Union[float, int, Matrix]) -> Matrix:
if isinstance(other, float) or isinstance(other, int):
return Matrix(self._val * other, self._data_type)
else:
assert self._val.shape[1] == other._val.shape[0]
return Matrix(self._val @ other._val, other._data_type)
def __truediv__(self, other: float) -> Matrix:
assert not np.isclose(other, 0)
return Matrix(self._val / other, self._data_type)
def __floordiv__(self, other):
raise NotImplementedError
def __mod__(self, other):
raise NotImplementedError
def __pow__(self, other):
raise NotImplementedError
def __rshift__(self, other):
raise NotImplementedError
def __lshift__(self, other):
raise NotImplementedError
def __and__(self, other):
raise NotImplementedError
def __or__(self, other):
raise NotImplementedError
def __xor__(self, other):
raise NotImplementedError
# comparsion operator
def __lt__(self, other):
raise NotImplementedError
def __gt__(self, other):
raise NotImplementedError
def __le__(self, other):
raise NotImplementedError
def __ge__(self, other):
raise NotImplementedError
def __eq__(self, other) -> bool:
return np.isclose(self._val, other._val).all()
def __ne__(self, other) -> bool:
return not np.isclose(self._val, other._val).all()
# assignment operator
def __isub__(self, other: Union[float, int, Matrix]) -> Matrix:
if isinstance(other, float) or isinstance(other, int):
self._val -= other
else:
self._val -= other._val
return self
def __iadd__(self, other: Union[float, int, Matrix]) -> Matrix:
if isinstance(other, float) or isinstance(other, int):
self._val += other
else:
self._val += other._val
return self
def __imul__(self, other: Union[float, int, Matrix]) -> Matrix:
if isinstance(other, float) or isinstance(other, int):
self._val *= other
else:
assert self._val.shape[1] == other._val.shape[0]
self._val = self._val @ other._val
return self
def __idiv__(self, other: float) -> Matrix:
assert not np.isclose(other, 0)
self._val /= other
return self
def __ifloordiv__(self, other):
raise NotImplementedError
def __imod__(self, other):
raise NotImplementedError
def __ipow__(self, other):
raise NotImplementedError
def __irshift__(self, other):
raise NotImplementedError
def __ilshift__(self, other):
raise NotImplementedError
def __iand__(self, other):
raise NotImplementedError
def __ior__(self, other):
raise NotImplementedError
def __ixor__(self, other):
raise NotImplementedError
def __str__(self) -> str:
res: str = ''
for i in self._val:
res += str(i) + '\n'
return res
@property
def x(self) -> float:
'''extern interface for the 2d vector's x pos
Returns
-------
float
x pos of the vector
'''
return self._val[0, 0]
@x.setter
def x(self, val: float):
self._val[0, 0] = val
@property
def y(self) -> float:
'''extern interface for the 2d vector's y pos
Returns
-------
float
y pos of the vector
'''
if self._val.shape == (2, 1):
return self._val[1, 0]
elif self._val.shape == (1, 2):
return self._val[0, 1]
else:
raise ValueError
@y.setter
def y(self, val: float):
self._val[1, 0] = val
@property
def shape(self) -> Tuple[int, ...]:
return self._val.shape
@property
def size(self) -> int:
return self._val.size
@property
def row1(self) -> Matrix:
assert self._val.shape == (2, 2)
return Matrix(self._val[0], 'vec')
@property
def row2(self) -> Matrix:
assert self._val.shape == (2, 2)
return Matrix(self._val[1], 'vec')
def reshape(self, row: int, col: int) -> Matrix:
self._val = self._val.reshape(row, col)
return self
def value(self, row: int = 0, col: int = 0) -> float:
assert self._val.shape == (2, 2)
assert 0 <= row <= self._val.shape[0]
assert 0 <= col <= self._val.shape[1]
return self._val[row, col]
def determinant(self) -> float:
assert self._val.shape == (2, 2)
return np.linalg.det(self._val)
def transpose(self) -> Matrix:
self._val = self._val.T
return self
def invert(self) -> Matrix:
assert self._val.shape == (2, 2)
self._val = np.linalg.inv(self._val)
return self
def skew_symmetric_mat(self, vec: Matrix) -> Matrix:
assert self._val.shape == (2, 2)
return Matrix([0, -vec._val[1, 0], vec._val[0, 0], 0])
def identity_mat(self) -> Matrix:
assert self._val.shape == (2, 2)
return Matrix([1, 0, 0, 1])
def len_square(self) -> float:
return np.square(self._val).sum()
def len(self) -> float:
return np.sqrt(self.len_square())
def theta(self) -> float:
assert self._val.shape == (2, 1)
assert not np.isclose(self._val[0, 0], 0)
return np.arctan2(self._val[1, 0], self._val[0, 0])
def set_value(self, val: Union[List[float], Matrix]) -> Matrix:
if isinstance(val, list):
self._val = np.array(val).reshape(self._val.shape)
elif isinstance(val, Matrix):
self._val = val._val
return self
def clear(self) -> Matrix:
if self._val.shape == (2, 2):
self.set_value([0.0, 0.0, 0.0, 0.0])
else:
self.set_value([0.0, 0.0])
return self
def negate(self) -> Matrix:
self._val = -self._val
return self
def negative(self) -> Matrix:
return Matrix(-self._val, self._data_type)
def swap(self, other: Matrix) -> Matrix:
assert self._data_type == other._data_type
assert self._val.shape == other._val.shape
self._val, other._val = other._val, self._val
return self
def normalize(self) -> Matrix:
assert not np.isclose(self.len(), 0)
self._val /= self.len()
return self
def normal(self) -> Matrix:
assert not np.isclose(self.len(), 0)
return Matrix(self._val / self.len(), self._data_type)
def is_origin(self) -> bool:
assert self._val.shape == (2, 1)
return np.isclose(self._val, [0, 0]).all()
def dot(self, other: Matrix) -> float:
assert self._val.shape == (2, 1)
assert other._val.shape == (2, 1)
return np.dot(self._val.T, other._val)[0, 0]
def cross(self, other: Matrix) -> float:
assert self._val.shape == (2, 1)
assert other._val.shape == (2, 1)
# NOTE: same as the cross_product method
return np.cross(self._val.reshape(2), other._val.reshape(2)).tolist()
def perpendicular(self) -> Matrix:
assert self._val.shape == (2, 1)
return Matrix([-self._val[1, 0], self._val[0, 0]], self._data_type)
@staticmethod
def dot_product(veca: Matrix, vecb: Matrix) -> float:
assert veca._val.shape == (2, 1)
assert vecb._val.shape == (2, 1)
return np.dot(veca._val.T, vecb._val)[0, 0]
@staticmethod
def cross_product(veca: Matrix, vecb: Matrix) -> float:
assert veca._val.shape == (2, 1)
assert vecb._val.shape == (2, 1)
# NOTE: just hack this impl to output scalar val otherwise vector
# to pass mypy check
return np.cross(veca._val.reshape(2), vecb._val.reshape(2)).tolist()
@staticmethod
def cross_product2(lhs: Union[Matrix, float], rhs: Union[Matrix,
float]) -> Matrix:
if isinstance(lhs, float) and isinstance(rhs, Matrix):
assert rhs._val.shape == (2, 1)
return Matrix([-rhs.y, rhs.x], 'vec') * lhs
elif isinstance(lhs, Matrix) and isinstance(rhs, float):
assert lhs._val.shape == (2, 1)
return Matrix([lhs.y, -lhs.x], 'vec') * rhs
else:
raise TypeError
@staticmethod
def rotate_mat(radian: float) -> Matrix:
res: List[float] = []
cos_val: float = np.cos(radian)
sin_val: float = np.sin(radian)
res.append(cos_val)
res.append(-sin_val)
res.append(sin_val)
res.append(cos_val)
return Matrix(res)
| [
"numpy.isclose",
"numpy.linalg.det",
"numpy.square",
"numpy.array",
"numpy.dot",
"numpy.linalg.inv",
"numpy.arctan2",
"numpy.cos",
"numpy.sin"
] | [((5993, 6017), 'numpy.linalg.det', 'np.linalg.det', (['self._val'], {}), '(self._val)\n', (6006, 6017), True, 'import numpy as np\n'), ((6200, 6224), 'numpy.linalg.inv', 'np.linalg.inv', (['self._val'], {}), '(self._val)\n', (6213, 6224), True, 'import numpy as np\n'), ((6809, 6853), 'numpy.arctan2', 'np.arctan2', (['self._val[1, 0]', 'self._val[0, 0]'], {}), '(self._val[1, 0], self._val[0, 0])\n', (6819, 6853), True, 'import numpy as np\n'), ((9900, 9914), 'numpy.cos', 'np.cos', (['radian'], {}), '(radian)\n', (9906, 9914), True, 'import numpy as np\n'), ((9940, 9954), 'numpy.sin', 'np.sin', (['radian'], {}), '(radian)\n', (9946, 9954), True, 'import numpy as np\n'), ((1706, 1726), 'numpy.isclose', 'np.isclose', (['other', '(0)'], {}), '(other, 0)\n', (1716, 1726), True, 'import numpy as np\n'), ((3653, 3673), 'numpy.isclose', 'np.isclose', (['other', '(0)'], {}), '(other, 0)\n', (3663, 3673), True, 'import numpy as np\n'), ((6763, 6793), 'numpy.isclose', 'np.isclose', (['self._val[0, 0]', '(0)'], {}), '(self._val[0, 0], 0)\n', (6773, 6793), True, 'import numpy as np\n'), ((8239, 8270), 'numpy.dot', 'np.dot', (['self._val.T', 'other._val'], {}), '(self._val.T, other._val)\n', (8245, 8270), True, 'import numpy as np\n'), ((8864, 8894), 'numpy.dot', 'np.dot', (['veca._val.T', 'vecb._val'], {}), '(veca._val.T, vecb._val)\n', (8870, 8894), True, 'import numpy as np\n'), ((404, 417), 'numpy.array', 'np.array', (['arr'], {}), '(arr)\n', (412, 417), True, 'import numpy as np\n'), ((2650, 2683), 'numpy.isclose', 'np.isclose', (['self._val', 'other._val'], {}), '(self._val, other._val)\n', (2660, 2683), True, 'import numpy as np\n'), ((6574, 6594), 'numpy.square', 'np.square', (['self._val'], {}), '(self._val)\n', (6583, 6594), True, 'import numpy as np\n'), ((8061, 8090), 'numpy.isclose', 'np.isclose', (['self._val', '[0, 0]'], {}), '(self._val, [0, 0])\n', (8071, 8090), True, 'import numpy as np\n'), ((2747, 2780), 'numpy.isclose', 'np.isclose', (['self._val', 'other._val'], {}), '(self._val, other._val)\n', (2757, 2780), True, 'import numpy as np\n'), ((6981, 6994), 'numpy.array', 'np.array', (['val'], {}), '(val)\n', (6989, 6994), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# OPTIMIZE: using caching improves speed significantly at the cost of memory. I
# need to see which is preferable in higher-level tests.
from functools import cached_property
import numpy
from numba import njit as numba_speedup
class Lattice:
"""
This class is for 3D crystal lattices, which are represented by a 3x3 matrix.
This class provides a bunch of tools to pull out common values like vector
lengths and angles, and also allows for lattice conversions.
For some basic formulas and introduction, see here:
http://gisaxs.com/index.php/Unit_cell
http://gisaxs.com/index.php/Lattices
"""
def __init__(self, matrix):
"""
Create a lattice from a 3x3 matrix. Each vector is defined via xyz on
one row of the matrix. For example, a cubic lattice where all vectors
are 1 Angstrom and 90deg from eachother would be defined as
matrix = [[1, 0, 0],
[0, 1, 0],
[0, 0, 1]]
"""
# Convert the matrix to a numpy array and store it
self.matrix = numpy.array(matrix)
@cached_property
def lengths(self):
"""
Gives the lengths of the lattice as (a, b, c).
"""
# The easiest way to do this is with numpy via...
# lengths = numpy.linalg.norm(matrix, axis=1)
# However, we instead write a super-optimized numba function to make this
# method really fast. This is defined below as a static method.
# The length of a 3D vector (xyz) is defined by...
# length = sqrt(a**2 + b**2 + c**2)
# and we are doing this for all three vectors at once.
return self._lengths_fast(self.matrix)
@staticmethod
@numba_speedup(cache=True)
def _lengths_fast(matrix):
# NOTE: users should not call this method! Use lattice.lengths instead
# OPTIMIZE: is there an alternative way to write this that numba will
# run faster?
# numpy.linalg.norm(matrix, axis=1) --> doesn't work with numba
return numpy.sqrt(numpy.sum(matrix**2, axis=1))
@property
def a(self):
"""
The length of the lattice vector "a" (in Angstroms)
"""
return self.lengths[0]
@property
def b(self):
"""
The length of the lattice vector "b" (in Angstroms)
"""
return self.lengths[1]
@property
def c(self):
"""
The length of the lattice vector "c" (in Angstroms)
"""
return self.lengths[2]
@cached_property
def angles(self):
"""
The angles of the lattice as (alpha, beta, gamma).
"""
# The angle between two 3D vectors (xyz) is defined by...
# angle_radians = arccos(dot(vector1, vector2)/(vector1.length*vector2.length))
# And alpha, beta, gamma are just the angles between the b&c, a&c, and a&b
# vectors, respectively. We cacluate all of these at once here. We also
# write a super-optimized numba function to make this method really fast.
# This is defined below as a static method.
return self._angles_fast(self.matrix, self.lengths)
@staticmethod
@numba_speedup(cache=True)
def _angles_fast(matrix, lengths):
# NOTE: users should not call this method! Use lattice.angles instead
# OPTIMIZE: is there an alternative way to write this that numba will
# run faster?
# keep a list of the angles
angles = []
# this establishes the three angles we want to calculate
# alpha --> angle between b and c
# beta --> angle between a and c
# gamma --> angle between a and b
# So the numbers below are indexes of the matrix!
# (for example, vector b has index 1)
vector_pairs = [(1, 2), (0, 2), (0, 1)]
# The angle between two 3D vectors (xyz) is defined by...
# angle_radians = arccos(dot(vector1, vector2)/(vector1.length*vector2.length))
for vector_index_1, vector_index_2 in vector_pairs:
# calculate single angle using formula
unit_vector_1 = matrix[vector_index_1] / lengths[vector_index_1]
unit_vector_2 = matrix[vector_index_2] / lengths[vector_index_2]
dot_product = numpy.dot(unit_vector_1, unit_vector_2)
angle = numpy.arccos(dot_product)
# convert to degrees before saving
angles.append(numpy.degrees(angle))
# return the list as a numpy array
return numpy.array(angles)
@property
def alpha(self):
"""
The angle between the lattice vectors b and c (in degrees).
"""
return self.angles[0]
@property
def beta(self):
"""
The angle between the lattice vectors a and c (in degrees).
"""
return self.angles[1]
@property
def gamma(self):
"""
The angle between the lattice vectors a and b (in degrees).
"""
return self.angles[2]
| [
"numpy.arccos",
"numba.njit",
"numpy.array",
"numpy.dot",
"numpy.sum",
"numpy.degrees"
] | [((1774, 1799), 'numba.njit', 'numba_speedup', ([], {'cache': '(True)'}), '(cache=True)\n', (1787, 1799), True, 'from numba import njit as numba_speedup\n'), ((3248, 3273), 'numba.njit', 'numba_speedup', ([], {'cache': '(True)'}), '(cache=True)\n', (3261, 3273), True, 'from numba import njit as numba_speedup\n'), ((1123, 1142), 'numpy.array', 'numpy.array', (['matrix'], {}), '(matrix)\n', (1134, 1142), False, 'import numpy\n'), ((4587, 4606), 'numpy.array', 'numpy.array', (['angles'], {}), '(angles)\n', (4598, 4606), False, 'import numpy\n'), ((2113, 2143), 'numpy.sum', 'numpy.sum', (['(matrix ** 2)'], {'axis': '(1)'}), '(matrix ** 2, axis=1)\n', (2122, 2143), False, 'import numpy\n'), ((4347, 4386), 'numpy.dot', 'numpy.dot', (['unit_vector_1', 'unit_vector_2'], {}), '(unit_vector_1, unit_vector_2)\n', (4356, 4386), False, 'import numpy\n'), ((4407, 4432), 'numpy.arccos', 'numpy.arccos', (['dot_product'], {}), '(dot_product)\n', (4419, 4432), False, 'import numpy\n'), ((4506, 4526), 'numpy.degrees', 'numpy.degrees', (['angle'], {}), '(angle)\n', (4519, 4526), False, 'import numpy\n')] |
import sqlite3
from tqdm import tqdm
import numpy as np
import array
import sys
import math
import os
import multiprocessing
import shutil
import pandas as pd
from scipy.signal import savgol_filter
class Reload:
def __init__(self, path_pri, path_tra, fold):
self.path_pri = path_pri
self.path_tra = path_tra
self.fold = fold
def sqlite_read(self, path):
"""
python读取sqlite数据库文件
"""
mydb = sqlite3.connect(path) # 链接数据库
mydb.text_factory = lambda x: str(x, 'gbk', 'ignore')
cur = mydb.cursor() # 创建游标cur来执行SQL语句
# 获取表名
cur.execute("SELECT name FROM sqlite_master WHERE type='table'")
Tables = cur.fetchall() # Tables 为元组列表
# 获取表结构的所有信息
if path[-5:] == 'pridb':
cur.execute("SELECT * FROM {}".format(Tables[3][0]))
res = cur.fetchall()[-2][1]
elif path[-5:] == 'tradb':
cur.execute("SELECT * FROM {}".format(Tables[1][0]))
res = cur.fetchall()[-3][1]
return int(res)
def read_with_time(self, time):
conn_pri = sqlite3.connect(self.path_pri)
result_pri = conn_pri.execute(
"Select SetID, Time, Chan, Thr, Amp, RiseT, Dur, Eny, RMS, Counts, TRAI FROM view_ae_data")
chan_1, chan_2, chan_3, chan_4 = [], [], [], []
t = [[] for _ in range(len(time) - 1)]
N_pri = self.sqlite_read(self.path_pri)
for _ in tqdm(range(N_pri)):
i = result_pri.fetchone()
if i[-2] is not None and i[-2] >= 6 and i[-1] > 0:
for idx, chan in zip(np.arange(1, 5), [chan_1, chan_2, chan_3, chan_4]):
if i[2] == idx:
chan.append(i)
for j in range(len(t)):
if time[j] <= i[1] < time[j + 1]:
t[j].append(i)
break
break
chan_1 = np.array(chan_1)
chan_2 = np.array(chan_2)
chan_3 = np.array(chan_3)
chan_4 = np.array(chan_4)
return t, chan_1, chan_2, chan_3, chan_4
def read_vallen_data(self, lower=2, t_cut=float('inf'), mode='all'):
data_tra, data_pri, chan_1, chan_2, chan_3, chan_4 = [], [], [], [], [], []
if mode == 'all' or mode == 'tra only':
conn_tra = sqlite3.connect(self.path_tra)
result_tra = conn_tra.execute(
"Select Time, Chan, Thr, SampleRate, Samples, TR_mV, Data, TRAI FROM view_tr_data")
N_tra = self.sqlite_read(self.path_tra)
for _ in tqdm(range(N_tra), ncols=80):
i = result_tra.fetchone()
if i[0] > t_cut:
continue
data_tra.append(i)
if mode == 'all' or mode == 'pri only':
conn_pri = sqlite3.connect(self.path_pri)
result_pri = conn_pri.execute(
"Select SetID, Time, Chan, Thr, Amp, RiseT, Dur, Eny, RMS, Counts, TRAI FROM view_ae_data")
N_pri = self.sqlite_read(self.path_pri)
for _ in tqdm(range(N_pri), ncols=80):
i = result_pri.fetchone()
if i[0] > t_cut:
continue
if i[-2] is not None and i[-2] > lower and i[-1] > 0:
data_pri.append(i)
if i[2] == 1:
chan_1.append(i)
if i[2] == 2:
chan_2.append(i)
elif i[2] == 3:
chan_3.append(i)
elif i[2] == 4:
chan_4.append(i)
data_tra = sorted(data_tra, key=lambda x: x[-1])
data_pri = np.array(data_pri)
chan_1 = np.array(chan_1)
chan_2 = np.array(chan_2)
chan_3 = np.array(chan_3)
chan_4 = np.array(chan_4)
return data_tra, data_pri, chan_1, chan_2, chan_3, chan_4
def read_pac_data(self, path, lower=2):
os.chdir(path)
dir_features = os.listdir(path)[0]
data_tra, data_pri, chan_1, chan_2, chan_3, chan_4 = [], [], [], [], [], []
with open(dir_features, 'r') as f:
data_pri = np.array([j.strip(', ') for i in f.readlines()[1:] for j in i.strip("\n")])
for _ in tqdm(range(N_tra), ncols=80):
i = result_tra.fetchone()
data_tra.append(i)
for _ in tqdm(range(N_pri), ncols=80):
i = result_pri.fetchone()
if i[-2] is not None and i[-2] > lower and i[-1] > 0:
data_pri.append(i)
if i[2] == 1:
chan_1.append(i)
if i[2] == 2:
chan_2.append(i)
elif i[2] == 3:
chan_3.append(i)
elif i[2] == 4:
chan_4.append(i)
data_tra = sorted(data_tra, key=lambda x: x[-1])
data_pri = np.array(data_pri)
chan_1 = np.array(chan_1)
chan_2 = np.array(chan_2)
chan_3 = np.array(chan_3)
chan_4 = np.array(chan_4)
return data_tra, data_pri, chan_1, chan_2, chan_3, chan_4
def export_feature(self, t, time):
for i in range(len(time) - 1):
with open(self.fold + '-%d-%d.txt' % (time[i], time[i + 1]), 'w') as f:
f.write('SetID, TRAI, Time, Chan, Thr, Amp, RiseT, Dur, Eny, RMS, Counts\n')
# ID, Time(s), Chan, Thr(μV), Thr(dB), Amp(μV), Amp(dB), RiseT(s), Dur(s), Eny(aJ), RMS(μV), Counts, Frequency(Hz)
for i in t[i]:
f.write('{}, {}, {:.8f}, {}, {:.7f}, {:.7f}, {:.2f}, {:.2f}, {:.7f}, {:.7f}, {}\n'.format(
i[0], i[-1], i[1], i[2], i[3], i[4], i[5], i[6], i[7], i[8], i[9]))
class Export:
def __init__(self, chan, data_tra, features_path):
self.data_tra = data_tra
self.features_path = features_path
self.chan = chan
def find_idx(self):
Res = []
for i in self.data_tra:
Res.append(i[-1])
Res = np.array(Res)
return Res
def detect_folder(self):
tar = './waveform'
if not os.path.exists(tar):
os.mkdir(tar)
else:
print("=" * 46 + " Warning " + "=" * 45)
while True:
ans = input(
"The exported data file has been detected. Do you want to overwrite it: (Enter 'yes' or 'no') ")
if ans.strip() == 'yes':
shutil.rmtree(tar)
os.mkdir(tar)
break
elif ans.strip() == 'no':
sys.exit(0)
print("Please enter 'yes' or 'no' to continue!")
def export_waveform(self, chan, thread_id=0, status='normal'):
if status == 'normal':
self.detect_folder()
Res = self.find_idx()
pbar = tqdm(chan, ncols=80)
for i in pbar:
trai = i[-1]
try:
j = self.data_tra[int(trai - 1)]
except IndexError:
try:
idx = np.where(Res == trai)[0][0]
j = self.data_tra[idx]
except IndexError:
print('Error 1: TRAI:{} in Channel is not found in data_tra!'.format(trai))
continue
if j[-1] != trai:
try:
idx = np.where(Res == trai)[0][0]
j = self.data_tra[idx]
except IndexError:
print('Error 2: TRAI:{} in Channel is not found in data_tra!'.format(trai))
continue
sig = np.multiply(array.array('h', bytes(j[-2])), j[-3] * 1000)
with open('./waveform/' + self.features_path[:-4] + '_{:.0f}_{:.8f}.txt'.format(trai, j[0]), 'w') as f:
f.write('Amp(uV)\n')
for a in sig:
f.write('{}\n'.format(a))
pbar.set_description("Process: %s | Exporting: %s" % (thread_id, int(trai)))
def accelerate_export(self, N=4):
# check existing file
self.detect_folder()
# Multiprocessing acceleration
each_core = int(math.ceil(self.chan.shape[0] / float(N)))
pool = multiprocessing.Pool(processes=N)
result = []
for idx, i in enumerate(range(0, self.chan.shape[0], each_core)):
result.append(pool.apply_async(self.export_waveform, (self.chan[i:i + each_core], idx + 1, 'accelerate',)))
pool.close()
pool.join()
print('Finished export of waveforms!')
return result
def material_status(component, status):
if component == 'pure':
if status == 'random':
# 0.508, 0.729, 1.022, 1.174, 1.609
idx_select_2 = [105, 94, 95, 109, 102]
TRAI_select_2 = [4117396, 4115821, 4115822, 4117632, 4117393]
# -0.264, -0.022
idx_select_1 = [95, 60]
TRAI_select_1 = [124104, 76892]
idx_same_amp_1 = [45, 62, 39, 41, 56]
TRAI_same_amp_1 = [88835, 114468, 82239, 84019, 104771]
idx_same_amp_2 = [61, 118, 139, 91, 136]
TRAI_same_amp_2 = [74951, 168997, 4114923, 121368, 4078227]
elif component == 'electrolysis':
if status == 'random':
# 0.115, 0.275, 0.297, 0.601, 1.024
idx_select_2 = [50, 148, 51, 252, 10]
TRAI_select_2 = [3067, 11644, 3079, 28583, 1501]
# 0.303, 0.409, 0.534, 0.759, 1.026
idx_select_1 = [13, 75, 79, 72, 71]
TRAI_select_1 = [2949, 14166, 14815, 14140, 14090]
if status == 'amp':
idx_select_2 = [90, 23, 48, 50, 29]
TRAI_select_2 = [4619, 2229, 2977, 3014, 2345]
idx_select_1 = [16, 26, 87, 34, 22]
TRAI_select_1 = [3932, 7412, 16349, 9001, 6300]
elif status == 'eny':
idx_select_2 = [79, 229, 117, 285, 59]
TRAI_select_2 = [4012, 22499, 7445, 34436, 3282]
idx_select_1 = [160, 141, 57, 37, 70]
TRAI_select_1 = [26465, 23930, 11974, 9379, 13667]
return idx_select_1, idx_select_2, TRAI_select_1, TRAI_select_2
def validation(k):
# Time, Amp, RiseTime, Dur, Eny, Counts, TRAI
i = data_tra[k]
sig = np.multiply(array.array('h', bytes(i[-2])), i[-3] * 1000)
time = np.linspace(i[0], i[0] + pow(i[-5], -1) * (i[-4] - 1), i[-4])
thr = i[2]
valid_wave_idx = np.where(abs(sig) >= thr)[0]
valid_time = time[valid_wave_idx[0]:(valid_wave_idx[-1] + 1)]
start = time[valid_wave_idx[0]]
end = time[valid_wave_idx[-1]]
duration = (end - start) * pow(10, 6)
max_idx = np.argmax(abs(sig))
amplitude = max(abs(sig))
rise_time = (time[max_idx] - start) * pow(10, 6)
valid_data = sig[valid_wave_idx[0]:(valid_wave_idx[-1] + 1)]
energy = np.sum(np.multiply(pow(valid_data, 2), pow(10, 6) / i[3]))
RMS = math.sqrt(energy / duration)
count, idx = 0, 1
N = len(valid_data)
for idx in range(1, N):
if valid_data[idx - 1] >= thr > valid_data[idx]:
count += 1
# while idx < N:
# if min(valid_data[idx - 1], valid_data[idx]) <= thr < max((valid_data[idx - 1], valid_data[idx])):
# count += 1
# idx += 2
# continue
# idx += 1
print(i[0], amplitude, rise_time, duration, energy / pow(10, 4), count, i[-1])
def val_TRAI(data_pri, TRAI):
# Time, Amp, RiseTime, Dur, Eny, Counts, TRAI
for i in TRAI:
vallen = data_pri[i - 1]
print('-' * 80)
print('{:.8f} {} {} {} {} {:.0f} {:.0f}'.format(vallen[1], vallen[4], vallen[5], vallen[6],
vallen[-4], vallen[-2], vallen[-1]))
validation(i - 1)
def save_E_T(Time, Eny, cls_1_KKM, cls_2_KKM, time, displace, smooth_load, strain, smooth_stress):
df_1 = pd.DataFrame({'time_pop1': Time[cls_KKM[0]], 'energy_pop1': Eny[cls_KKM[0]]})
df_2 = pd.DataFrame({'time_pop2': Time[cls_KKM[1]], 'energy_pop2': Eny[cls_KKM[1]]})
df_3 = pd.DataFrame(
{'time': time, 'displace': displace, 'load': smooth_load, 'strain': strain, 'stress': smooth_stress})
df_1.to_csv('E-T_electrolysis_pop1.csv')
df_2.to_csv('E-T_electrolysis_pop2.csv')
df_3.to_csv('E-T_electrolysis_RawData.csv')
def load_stress(path_curve):
data = pd.read_csv(path_curve, encoding='gbk').drop(index=[0]).astype('float32')
data_drop = data.drop_duplicates(['拉伸应变 (应变 1)'])
time = np.array(data_drop.iloc[:, 0])
displace = np.array(data_drop.iloc[:, 1])
load = np.array(data_drop.iloc[:, 2])
strain = np.array(data_drop.iloc[:, 3])
stress = np.array(data_drop.iloc[:, 4])
sort_idx = np.argsort(strain)
strain = strain[sort_idx]
stress = stress[sort_idx]
return time, displace, load, strain, stress
def smooth_curve(time, stress, window_length=99, polyorder=1, epoch=200, curoff=[2500, 25000]):
y_smooth = savgol_filter(stress, window_length, polyorder, mode= 'nearest')
for i in range(epoch):
if i == 5:
front = y_smooth
y_smooth = savgol_filter(y_smooth, window_length, polyorder, mode= 'nearest')
front_idx = np.where(time < curoff[0])[0][-1]
rest_idx = np.where(time > curoff[1])[0][0]
res = np.concatenate((stress[:40], front[40:front_idx], y_smooth[front_idx:rest_idx], stress[rest_idx:]))
return res
def filelist_convert(data_path, tar=None):
file_list = os.listdir(data_path)
if tar:
tar += '.txt'
else:
tar = data_path.split('/')[-1] + '.txt'
if tar in file_list:
exist_idx = np.where(np.array(file_list) == tar)[0][0]
file_list.pop(exist_idx)
file_idx = np.array([np.array(i[:-4].split('_')[1:]).astype('int64') for i in file_list])
return file_list, file_idx | [
"os.path.exists",
"os.listdir",
"sys.exit",
"sqlite3.connect",
"pandas.read_csv",
"numpy.where",
"tqdm.tqdm",
"math.sqrt",
"scipy.signal.savgol_filter",
"shutil.rmtree",
"numpy.argsort",
"numpy.array",
"os.chdir",
"multiprocessing.Pool",
"numpy.concatenate",
"os.mkdir",
"pandas.DataF... | [((10978, 11006), 'math.sqrt', 'math.sqrt', (['(energy / duration)'], {}), '(energy / duration)\n', (10987, 11006), False, 'import math\n'), ((11953, 12030), 'pandas.DataFrame', 'pd.DataFrame', (["{'time_pop1': Time[cls_KKM[0]], 'energy_pop1': Eny[cls_KKM[0]]}"], {}), "({'time_pop1': Time[cls_KKM[0]], 'energy_pop1': Eny[cls_KKM[0]]})\n", (11965, 12030), True, 'import pandas as pd\n'), ((12042, 12119), 'pandas.DataFrame', 'pd.DataFrame', (["{'time_pop2': Time[cls_KKM[1]], 'energy_pop2': Eny[cls_KKM[1]]}"], {}), "({'time_pop2': Time[cls_KKM[1]], 'energy_pop2': Eny[cls_KKM[1]]})\n", (12054, 12119), True, 'import pandas as pd\n'), ((12131, 12249), 'pandas.DataFrame', 'pd.DataFrame', (["{'time': time, 'displace': displace, 'load': smooth_load, 'strain': strain,\n 'stress': smooth_stress}"], {}), "({'time': time, 'displace': displace, 'load': smooth_load,\n 'strain': strain, 'stress': smooth_stress})\n", (12143, 12249), True, 'import pandas as pd\n'), ((12574, 12604), 'numpy.array', 'np.array', (['data_drop.iloc[:, 0]'], {}), '(data_drop.iloc[:, 0])\n', (12582, 12604), True, 'import numpy as np\n'), ((12620, 12650), 'numpy.array', 'np.array', (['data_drop.iloc[:, 1]'], {}), '(data_drop.iloc[:, 1])\n', (12628, 12650), True, 'import numpy as np\n'), ((12662, 12692), 'numpy.array', 'np.array', (['data_drop.iloc[:, 2]'], {}), '(data_drop.iloc[:, 2])\n', (12670, 12692), True, 'import numpy as np\n'), ((12706, 12736), 'numpy.array', 'np.array', (['data_drop.iloc[:, 3]'], {}), '(data_drop.iloc[:, 3])\n', (12714, 12736), True, 'import numpy as np\n'), ((12750, 12780), 'numpy.array', 'np.array', (['data_drop.iloc[:, 4]'], {}), '(data_drop.iloc[:, 4])\n', (12758, 12780), True, 'import numpy as np\n'), ((12796, 12814), 'numpy.argsort', 'np.argsort', (['strain'], {}), '(strain)\n', (12806, 12814), True, 'import numpy as np\n'), ((13036, 13099), 'scipy.signal.savgol_filter', 'savgol_filter', (['stress', 'window_length', 'polyorder'], {'mode': '"""nearest"""'}), "(stress, window_length, polyorder, mode='nearest')\n", (13049, 13099), False, 'from scipy.signal import savgol_filter\n'), ((13371, 13475), 'numpy.concatenate', 'np.concatenate', (['(stress[:40], front[40:front_idx], y_smooth[front_idx:rest_idx], stress[\n rest_idx:])'], {}), '((stress[:40], front[40:front_idx], y_smooth[front_idx:\n rest_idx], stress[rest_idx:]))\n', (13385, 13475), True, 'import numpy as np\n'), ((13547, 13568), 'os.listdir', 'os.listdir', (['data_path'], {}), '(data_path)\n', (13557, 13568), False, 'import os\n'), ((456, 477), 'sqlite3.connect', 'sqlite3.connect', (['path'], {}), '(path)\n', (471, 477), False, 'import sqlite3\n'), ((1113, 1143), 'sqlite3.connect', 'sqlite3.connect', (['self.path_pri'], {}), '(self.path_pri)\n', (1128, 1143), False, 'import sqlite3\n'), ((1982, 1998), 'numpy.array', 'np.array', (['chan_1'], {}), '(chan_1)\n', (1990, 1998), True, 'import numpy as np\n'), ((2016, 2032), 'numpy.array', 'np.array', (['chan_2'], {}), '(chan_2)\n', (2024, 2032), True, 'import numpy as np\n'), ((2050, 2066), 'numpy.array', 'np.array', (['chan_3'], {}), '(chan_3)\n', (2058, 2066), True, 'import numpy as np\n'), ((2084, 2100), 'numpy.array', 'np.array', (['chan_4'], {}), '(chan_4)\n', (2092, 2100), True, 'import numpy as np\n'), ((3744, 3762), 'numpy.array', 'np.array', (['data_pri'], {}), '(data_pri)\n', (3752, 3762), True, 'import numpy as np\n'), ((3780, 3796), 'numpy.array', 'np.array', (['chan_1'], {}), '(chan_1)\n', (3788, 3796), True, 'import numpy as np\n'), ((3814, 3830), 'numpy.array', 'np.array', (['chan_2'], {}), '(chan_2)\n', (3822, 3830), True, 'import numpy as np\n'), ((3848, 3864), 'numpy.array', 'np.array', (['chan_3'], {}), '(chan_3)\n', (3856, 3864), True, 'import numpy as np\n'), ((3882, 3898), 'numpy.array', 'np.array', (['chan_4'], {}), '(chan_4)\n', (3890, 3898), True, 'import numpy as np\n'), ((4018, 4032), 'os.chdir', 'os.chdir', (['path'], {}), '(path)\n', (4026, 4032), False, 'import os\n'), ((4952, 4970), 'numpy.array', 'np.array', (['data_pri'], {}), '(data_pri)\n', (4960, 4970), True, 'import numpy as np\n'), ((4988, 5004), 'numpy.array', 'np.array', (['chan_1'], {}), '(chan_1)\n', (4996, 5004), True, 'import numpy as np\n'), ((5022, 5038), 'numpy.array', 'np.array', (['chan_2'], {}), '(chan_2)\n', (5030, 5038), True, 'import numpy as np\n'), ((5056, 5072), 'numpy.array', 'np.array', (['chan_3'], {}), '(chan_3)\n', (5064, 5072), True, 'import numpy as np\n'), ((5090, 5106), 'numpy.array', 'np.array', (['chan_4'], {}), '(chan_4)\n', (5098, 5106), True, 'import numpy as np\n'), ((6084, 6097), 'numpy.array', 'np.array', (['Res'], {}), '(Res)\n', (6092, 6097), True, 'import numpy as np\n'), ((6929, 6949), 'tqdm.tqdm', 'tqdm', (['chan'], {'ncols': '(80)'}), '(chan, ncols=80)\n', (6933, 6949), False, 'from tqdm import tqdm\n'), ((8294, 8327), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': 'N'}), '(processes=N)\n', (8314, 8327), False, 'import multiprocessing\n'), ((13195, 13260), 'scipy.signal.savgol_filter', 'savgol_filter', (['y_smooth', 'window_length', 'polyorder'], {'mode': '"""nearest"""'}), "(y_smooth, window_length, polyorder, mode='nearest')\n", (13208, 13260), False, 'from scipy.signal import savgol_filter\n'), ((2379, 2409), 'sqlite3.connect', 'sqlite3.connect', (['self.path_tra'], {}), '(self.path_tra)\n', (2394, 2409), False, 'import sqlite3\n'), ((2866, 2896), 'sqlite3.connect', 'sqlite3.connect', (['self.path_pri'], {}), '(self.path_pri)\n', (2881, 2896), False, 'import sqlite3\n'), ((4056, 4072), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (4066, 4072), False, 'import os\n'), ((6189, 6208), 'os.path.exists', 'os.path.exists', (['tar'], {}), '(tar)\n', (6203, 6208), False, 'import os\n'), ((6222, 6235), 'os.mkdir', 'os.mkdir', (['tar'], {}), '(tar)\n', (6230, 6235), False, 'import os\n'), ((13279, 13305), 'numpy.where', 'np.where', (['(time < curoff[0])'], {}), '(time < curoff[0])\n', (13287, 13305), True, 'import numpy as np\n'), ((13328, 13354), 'numpy.where', 'np.where', (['(time > curoff[1])'], {}), '(time > curoff[1])\n', (13336, 13354), True, 'import numpy as np\n'), ((1613, 1628), 'numpy.arange', 'np.arange', (['(1)', '(5)'], {}), '(1, 5)\n', (1622, 1628), True, 'import numpy as np\n'), ((6534, 6552), 'shutil.rmtree', 'shutil.rmtree', (['tar'], {}), '(tar)\n', (6547, 6552), False, 'import shutil\n'), ((6573, 6586), 'os.mkdir', 'os.mkdir', (['tar'], {}), '(tar)\n', (6581, 6586), False, 'import os\n'), ((12435, 12474), 'pandas.read_csv', 'pd.read_csv', (['path_curve'], {'encoding': '"""gbk"""'}), "(path_curve, encoding='gbk')\n", (12446, 12474), True, 'import pandas as pd\n'), ((6675, 6686), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (6683, 6686), False, 'import sys\n'), ((13715, 13734), 'numpy.array', 'np.array', (['file_list'], {}), '(file_list)\n', (13723, 13734), True, 'import numpy as np\n'), ((7450, 7471), 'numpy.where', 'np.where', (['(Res == trai)'], {}), '(Res == trai)\n', (7458, 7471), True, 'import numpy as np\n'), ((7142, 7163), 'numpy.where', 'np.where', (['(Res == trai)'], {}), '(Res == trai)\n', (7150, 7163), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# coding: utf-8
#libraries
import keras
import tensorflow as tf
from keras import backend as K
import cv2
import os
import numpy as np
from keras.optimizers import Adam
from keras.models import model_from_json, load_model
from keras.layers import Input, Dense
from keras.models import Model,Sequential
from sklearn.model_selection import train_test_split
from keras.layers import Convolution2D as Conv2D
from keras.layers.convolutional import Deconv2D as Conv2DTranspose
from keras.layers import Lambda, Input, Dense, MaxPooling2D, BatchNormalization,Input
from keras.layers import UpSampling2D, Dropout, Flatten, Reshape, RepeatVector, LeakyReLU,Activation
from keras.callbacks import ModelCheckpoint
from keras.losses import mse, binary_crossentropy
from keras.callbacks import EarlyStopping
keras.callbacks.TerminateOnNaN()
seed = 7
np.random.seed(seed)
from keras.callbacks import CSVLogger
from keras.callbacks import Callback, LearningRateScheduler
# config = tf.ConfigProto( device_count = {'GPU': 1 , 'GPU': 2} )
# sess = tf.Session(config=config)
# keras.backend.set_session(sess)
os.environ["CUDA_VISIBLE_DEVICES"]="1"#Setting the script to run on GPU:1,2
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
#os.environ["CUDA_VISIBLE_DEVICES"]="1,2"#Setting the script to run on GPU:1,2
import random
import os
import sys
import cv2
import csv
import glob
import numpy as np
import time
from sklearn.utils import shuffle
# path to USB
USBPath = "/home/scope/Carla/autopilot_Carla_ad/sample_data/"
# list of folders used in training
trainingFolders = ["run3","run4"]
#Only parameters that has to be changed
Working_directory = "/home/scope/Carla/autopilot_Carla_ad/leaderboard/team_code/detector_code/trial1/"#working directory
Working_folder = 'new-B-1.2'#experiment
Working_path = Working_directory + Working_folder + '/'
trainfolder = 'train_reconstruction_result'#train folder
data = CSVLogger(Working_path + 'kerasloss.csv', append=True, separator=';')
#Load complete input images without shuffling
def load_images(paths):
numImages = 0
inputs = []
for path in paths:
numFiles = len(glob.glob1(path,'*.png'))
numImages += numFiles
for img in glob.glob(path+'*.png'):
img = cv2.imread(img)
img = cv2.resize(img, (224, 224))
img = img / 255.
inputs.append(img)
#inpu = shuffle(inputs)
print("Total number of images:%d" %(numImages))
return inputs
def createFolderPaths(folders):
paths = []
for folder in folders:
path = USBPath + folder + '/' + 'rgb_right_detector' + '/'
paths.append(path)
return paths
def load_training_images():
paths = createFolderPaths(trainingFolders)
return load_images(paths)
def load_data():
#Loading images from the datasets
csv_input = load_training_images()
len(csv_input)#length of the data
csv_input = shuffle(csv_input)
img_train, img_test = np.array(csv_input[0:len(csv_input)-200]), np.array(csv_input[len(csv_input)-200:len(csv_input)])
img_train = np.reshape(img_train, [-1, img_train.shape[1],img_train.shape[2],img_train.shape[3]])
img_test = np.reshape(img_test, [-1, img_test.shape[1],img_test.shape[2],img_test.shape[3]])
#Shuffle the data in order to get different images in train and test datasets.
#img_train = shuffle(img_train)
#img_test = shuffle(img_test)
inp = (img_train, img_test)
return inp
#Sampling function used by the VAE
def sample_func(args):
z_mean, z_log_var = args
batch = K.shape(z_mean)[0]
dim = K.int_shape(z_mean)[1]
# by default, random_normal has mean = 0 and std = 1.0
epsilon = K.random_normal(shape=(batch, dim))
return z_mean + K.exp(0.5 * z_log_var) * epsilon
#CNN-VAE model. Only important part is the N_latent variable which holds the latent space data.
def CreateModels(n_latent=100, sample_enc=sample_func, beta=1.2, C=0):
model = Sequential()
input_img = Input(shape=(224,224,3), name='image')
x = Conv2D(128, (3, 3), use_bias=False, padding='same')(input_img)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(64, (3, 3), padding='same',use_bias=False)(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(32, (3, 3), padding='same',use_bias=False)(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(16, (3, 3), padding='same',use_bias=False)(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Flatten()(x)
x = Dense(2048)(x)
x = LeakyReLU(0.1)(x)
x = Dense(1000)(x)
x = LeakyReLU(0.1)(x)
x = Dense(250)(x)
x = LeakyReLU(0.1)(x)
# x = Dense(50)(x)
# x = LeakyReLU(0.1)(x)
z_mean = Dense(n_latent, name='z_mean')(x)
z_log_var = Dense(n_latent, name='z_log_var')(x)
z = Lambda(sample_func, output_shape=(n_latent,), name='z')([z_mean, z_log_var])
encoder = Model(input_img, [z_mean, z_log_var, z], name='encoder')
#encoder.summary()
latent_inputs = Input(shape=(n_latent,), name='z_sampling')
# x = Dense(50)(latent_inputs)
# x = LeakyReLU(0.1)(x)
x = Dense(250)(latent_inputs)
x = LeakyReLU(0.1)(x)
x = Dense(1000)(x)
x = LeakyReLU(0.1)(x)
x = Dense(2048)(x)
x = LeakyReLU(0.1)(x)
x = Dense(3136)(x)
x = LeakyReLU(0.1)(x)
x = Reshape((14, 14, 16))(x)
x = Conv2D(16, (3, 3), padding='same', use_bias=False)(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = UpSampling2D((2,2))(x)
x = Conv2D(32, (3, 3), padding='same', use_bias=False)(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = UpSampling2D((2,2))(x)
x = Conv2D(64, (3, 3), padding='same', use_bias=False)(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = UpSampling2D((2,2))(x)
x = Conv2D(128, (3, 3), padding='same', use_bias=False)(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = UpSampling2D((2,2))(x)
x = Conv2D(3, (3, 3), padding='same', use_bias=False)(x)
x = BatchNormalization()(x)
decoded = Activation('sigmoid')(x)
decoder = Model(latent_inputs, decoded)
#decoder.summary()
outputs = decoder(encoder(input_img)[2])
autoencoder = Model(input_img,outputs)
#autoencoder.summary()
def vae_loss(true, pred):
rec_loss = mse(K.flatten(true), K.flatten(pred))
rec_loss *= 224*224*3
kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
kl_loss = K.sum(kl_loss, axis=-1)
kl_loss *= -0.5
vae_loss = K.mean(rec_loss + beta*(kl_loss-C))
return vae_loss
#autoencoder.add_loss(vae_loss)
def lr_scheduler(epoch): #learningrate scheduler to adjust learning rate.
lr = 1e-6
if epoch > 50:
print("New learning rate")
lr = 1e-8
if epoch > 75:
print("New learning rate")
lr = 1e-8
return lr
scheduler = LearningRateScheduler(lr_scheduler)
#Define adam optimizer
adam = keras.optimizers.Adam(lr=1e-6, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
autoencoder.compile(optimizer='adam',loss=vae_loss, metrics=[vae_loss])
#Define adam optimizer
#adam = keras.optimizers.Adam(lr=0.000001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
#autoencoder.compile(optimizer='rmsprop',loss=vae_loss, metrics=[vae_loss])
#autoencoder.compile(optimizer='adam',loss=vae_loss, metrics=[vae_loss])
return autoencoder, encoder,decoder, z_log_var
#Train function to fit the data to the model
def train(X,autoencoder):
X_train,X_test = X
filePath = Working_path + 'weights.best.hdf5'#checkpoint weights
checkpoint = ModelCheckpoint(filePath, monitor='vae_loss', verbose=1, save_best_only=True, mode='min')
EarlyStopping(monitor='vae_loss', patience=10, verbose=0),
callbacks_list = [checkpoint, data]
autoencoder.fit(X_train, X_train,epochs=75,batch_size=16,shuffle=True,validation_data=(X_test, X_test),callbacks=callbacks_list, verbose=2)
#checkpoint = ModelCheckpoint(filePath, monitor='vae_loss', verbose=2, save_best_only=True, mode='min')
#EarlyStopping(monitor='vae_loss', patience=5, verbose=0),
#es=EarlyStopping(monitor='vae_loss', min_delta=0, patience=5, verbose=0, mode='auto', baseline=None, restore_best_weights=False)
#callbacks_list = [checkpoint, data]
#autoencoder.fit(X_train, X_train,epochs=2,batch_size=16,shuffle=True,validation_data=(X_test, X_test),callbacks=callbacks_list, verbose=1)
#Save the autoencoder model
def SaveAutoencoderModel(autoencoder):
auto_model_json = autoencoder.to_json()
with open(Working_path + 'auto_model.json', "w") as json_file:
json_file.write(auto_model_json)
autoencoder.save_weights(Working_path + 'auto_model.h5')
print("Saved Autoencoder model to disk")
#Save the encoder model
def SaveEncoderModel(encoder):
en_model_json = encoder.to_json()
with open(Working_path + 'en_model.json', "w") as json_file:
json_file.write(en_model_json)
encoder.save_weights(Working_path + 'en_model.h5')
print("Saved Encoder model to disk")
#Test the trained models on a different test data
def test(autoencoder,encoder,test):
autoencoder_res = autoencoder.predict(test)
encoder_res = encoder.predict(test)
res_x = test.copy()
res_y = autoencoder_res.copy()
res_x = res_x * 255
res_y = res_y * 255
return res_x, res_y, encoder_res
#Save the reconstructed test data in a separate folder.
#For this create a folder named results in the directory you are working in.
def savedata(test_in, test_out, test_encoded, Working_path, trainfolder):
os.makedirs(Working_path + trainfolder + '/', exist_ok=True)
for i in range(len(test_in)):
test_in = np.reshape(test_in,[-1, 224,224,3])#Reshape the data
test_out = np.reshape(test_out,[-1, 224,224,3])#Reshape the data
cv2.imwrite(Working_path + trainfolder + '/' + str(i) +'_in.png', test_in[i])
cv2.imwrite(Working_path + trainfolder + '/' + str(i) +'_out.png', test_out[i])
if __name__ == '__main__':
print("loading image")
inp = load_data()
print("created model")
autoencoder,encoder,decoder,z_log_var = CreateModels()# Running the autoencoder model
print("training model")
train(inp,autoencoder)#Train the model with the data
print("testing model")
test_in, test_out, test_encoded = test(autoencoder, encoder, inp[1])#Test the trained model with new data
print("save model performance")
savedata(test_in, test_out, test_encoded, Working_path, trainfolder)#Save the data
print("save encoder model")
SaveEncoderModel(encoder)
SaveAutoencoderModel(autoencoder)#Save the autoencoder and encoder models
| [
"keras.backend.shape",
"keras.backend.sum",
"keras.callbacks.TerminateOnNaN",
"keras.backend.flatten",
"keras.layers.Activation",
"keras.layers.Dense",
"numpy.reshape",
"keras.backend.square",
"numpy.random.seed",
"keras.models.Model",
"keras.callbacks.EarlyStopping",
"keras.backend.exp",
"g... | [((818, 850), 'keras.callbacks.TerminateOnNaN', 'keras.callbacks.TerminateOnNaN', ([], {}), '()\n', (848, 850), False, 'import keras\n'), ((860, 880), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (874, 880), True, 'import numpy as np\n'), ((1916, 1985), 'keras.callbacks.CSVLogger', 'CSVLogger', (["(Working_path + 'kerasloss.csv')"], {'append': '(True)', 'separator': '""";"""'}), "(Working_path + 'kerasloss.csv', append=True, separator=';')\n", (1925, 1985), False, 'from keras.callbacks import CSVLogger\n'), ((2918, 2936), 'sklearn.utils.shuffle', 'shuffle', (['csv_input'], {}), '(csv_input)\n', (2925, 2936), False, 'from sklearn.utils import shuffle\n'), ((3078, 3169), 'numpy.reshape', 'np.reshape', (['img_train', '[-1, img_train.shape[1], img_train.shape[2], img_train.shape[3]]'], {}), '(img_train, [-1, img_train.shape[1], img_train.shape[2],\n img_train.shape[3]])\n', (3088, 3169), True, 'import numpy as np\n'), ((3179, 3267), 'numpy.reshape', 'np.reshape', (['img_test', '[-1, img_test.shape[1], img_test.shape[2], img_test.shape[3]]'], {}), '(img_test, [-1, img_test.shape[1], img_test.shape[2], img_test.\n shape[3]])\n', (3189, 3267), True, 'import numpy as np\n'), ((3686, 3721), 'keras.backend.random_normal', 'K.random_normal', ([], {'shape': '(batch, dim)'}), '(shape=(batch, dim))\n', (3701, 3721), True, 'from keras import backend as K\n'), ((3955, 3967), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3965, 3967), False, 'from keras.models import Model, Sequential\n'), ((3984, 4024), 'keras.layers.Input', 'Input', ([], {'shape': '(224, 224, 3)', 'name': '"""image"""'}), "(shape=(224, 224, 3), name='image')\n", (3989, 4024), False, 'from keras.layers import Lambda, Input, Dense, MaxPooling2D, BatchNormalization, Input\n'), ((5125, 5181), 'keras.models.Model', 'Model', (['input_img', '[z_mean, z_log_var, z]'], {'name': '"""encoder"""'}), "(input_img, [z_mean, z_log_var, z], name='encoder')\n", (5130, 5181), False, 'from keras.models import Model, Sequential\n'), ((5226, 5269), 'keras.layers.Input', 'Input', ([], {'shape': '(n_latent,)', 'name': '"""z_sampling"""'}), "(shape=(n_latent,), name='z_sampling')\n", (5231, 5269), False, 'from keras.layers import Lambda, Input, Dense, MaxPooling2D, BatchNormalization, Input\n'), ((6331, 6360), 'keras.models.Model', 'Model', (['latent_inputs', 'decoded'], {}), '(latent_inputs, decoded)\n', (6336, 6360), False, 'from keras.models import Model, Sequential\n'), ((6448, 6473), 'keras.models.Model', 'Model', (['input_img', 'outputs'], {}), '(input_img, outputs)\n', (6453, 6473), False, 'from keras.models import Model, Sequential\n'), ((7173, 7208), 'keras.callbacks.LearningRateScheduler', 'LearningRateScheduler', (['lr_scheduler'], {}), '(lr_scheduler)\n', (7194, 7208), False, 'from keras.callbacks import Callback, LearningRateScheduler\n'), ((7247, 7348), 'keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'lr': '(1e-06)', 'beta_1': '(0.9)', 'beta_2': '(0.999)', 'epsilon': 'None', 'decay': '(0.0)', 'amsgrad': '(False)'}), '(lr=1e-06, beta_1=0.9, beta_2=0.999, epsilon=None,\n decay=0.0, amsgrad=False)\n', (7268, 7348), False, 'import keras\n'), ((7953, 8047), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['filePath'], {'monitor': '"""vae_loss"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""min"""'}), "(filePath, monitor='vae_loss', verbose=1, save_best_only=\n True, mode='min')\n", (7968, 8047), False, 'from keras.callbacks import ModelCheckpoint\n'), ((9902, 9962), 'os.makedirs', 'os.makedirs', (["(Working_path + trainfolder + '/')"], {'exist_ok': '(True)'}), "(Working_path + trainfolder + '/', exist_ok=True)\n", (9913, 9962), False, 'import os\n'), ((2213, 2238), 'glob.glob', 'glob.glob', (["(path + '*.png')"], {}), "(path + '*.png')\n", (2222, 2238), False, 'import glob\n'), ((3561, 3576), 'keras.backend.shape', 'K.shape', (['z_mean'], {}), '(z_mean)\n', (3568, 3576), True, 'from keras import backend as K\n'), ((3590, 3609), 'keras.backend.int_shape', 'K.int_shape', (['z_mean'], {}), '(z_mean)\n', (3601, 3609), True, 'from keras import backend as K\n'), ((4031, 4082), 'keras.layers.Convolution2D', 'Conv2D', (['(128)', '(3, 3)'], {'use_bias': '(False)', 'padding': '"""same"""'}), "(128, (3, 3), use_bias=False, padding='same')\n", (4037, 4082), True, 'from keras.layers import Convolution2D as Conv2D\n'), ((4103, 4123), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (4121, 4123), False, 'from keras.layers import Lambda, Input, Dense, MaxPooling2D, BatchNormalization, Input\n'), ((4135, 4149), 'keras.layers.LeakyReLU', 'LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (4144, 4149), False, 'from keras.layers import UpSampling2D, Dropout, Flatten, Reshape, RepeatVector, LeakyReLU, Activation\n'), ((4161, 4197), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'padding': '"""same"""'}), "((2, 2), padding='same')\n", (4173, 4197), False, 'from keras.layers import Lambda, Input, Dense, MaxPooling2D, BatchNormalization, Input\n'), ((4210, 4260), 'keras.layers.Convolution2D', 'Conv2D', (['(64)', '(3, 3)'], {'padding': '"""same"""', 'use_bias': '(False)'}), "(64, (3, 3), padding='same', use_bias=False)\n", (4216, 4260), True, 'from keras.layers import Convolution2D as Conv2D\n'), ((4271, 4291), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (4289, 4291), False, 'from keras.layers import Lambda, Input, Dense, MaxPooling2D, BatchNormalization, Input\n'), ((4303, 4317), 'keras.layers.LeakyReLU', 'LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (4312, 4317), False, 'from keras.layers import UpSampling2D, Dropout, Flatten, Reshape, RepeatVector, LeakyReLU, Activation\n'), ((4329, 4365), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'padding': '"""same"""'}), "((2, 2), padding='same')\n", (4341, 4365), False, 'from keras.layers import Lambda, Input, Dense, MaxPooling2D, BatchNormalization, Input\n'), ((4378, 4428), 'keras.layers.Convolution2D', 'Conv2D', (['(32)', '(3, 3)'], {'padding': '"""same"""', 'use_bias': '(False)'}), "(32, (3, 3), padding='same', use_bias=False)\n", (4384, 4428), True, 'from keras.layers import Convolution2D as Conv2D\n'), ((4439, 4459), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (4457, 4459), False, 'from keras.layers import Lambda, Input, Dense, MaxPooling2D, BatchNormalization, Input\n'), ((4471, 4485), 'keras.layers.LeakyReLU', 'LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (4480, 4485), False, 'from keras.layers import UpSampling2D, Dropout, Flatten, Reshape, RepeatVector, LeakyReLU, Activation\n'), ((4497, 4533), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'padding': '"""same"""'}), "((2, 2), padding='same')\n", (4509, 4533), False, 'from keras.layers import Lambda, Input, Dense, MaxPooling2D, BatchNormalization, Input\n'), ((4546, 4596), 'keras.layers.Convolution2D', 'Conv2D', (['(16)', '(3, 3)'], {'padding': '"""same"""', 'use_bias': '(False)'}), "(16, (3, 3), padding='same', use_bias=False)\n", (4552, 4596), True, 'from keras.layers import Convolution2D as Conv2D\n'), ((4607, 4627), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (4625, 4627), False, 'from keras.layers import Lambda, Input, Dense, MaxPooling2D, BatchNormalization, Input\n'), ((4639, 4653), 'keras.layers.LeakyReLU', 'LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (4648, 4653), False, 'from keras.layers import UpSampling2D, Dropout, Flatten, Reshape, RepeatVector, LeakyReLU, Activation\n'), ((4665, 4701), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'padding': '"""same"""'}), "((2, 2), padding='same')\n", (4677, 4701), False, 'from keras.layers import Lambda, Input, Dense, MaxPooling2D, BatchNormalization, Input\n'), ((4714, 4723), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (4721, 4723), False, 'from keras.layers import UpSampling2D, Dropout, Flatten, Reshape, RepeatVector, LeakyReLU, Activation\n'), ((4735, 4746), 'keras.layers.Dense', 'Dense', (['(2048)'], {}), '(2048)\n', (4740, 4746), False, 'from keras.layers import Lambda, Input, Dense, MaxPooling2D, BatchNormalization, Input\n'), ((4758, 4772), 'keras.layers.LeakyReLU', 'LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (4767, 4772), False, 'from keras.layers import UpSampling2D, Dropout, Flatten, Reshape, RepeatVector, LeakyReLU, Activation\n'), ((4784, 4795), 'keras.layers.Dense', 'Dense', (['(1000)'], {}), '(1000)\n', (4789, 4795), False, 'from keras.layers import Lambda, Input, Dense, MaxPooling2D, BatchNormalization, Input\n'), ((4807, 4821), 'keras.layers.LeakyReLU', 'LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (4816, 4821), False, 'from keras.layers import UpSampling2D, Dropout, Flatten, Reshape, RepeatVector, LeakyReLU, Activation\n'), ((4833, 4843), 'keras.layers.Dense', 'Dense', (['(250)'], {}), '(250)\n', (4838, 4843), False, 'from keras.layers import Lambda, Input, Dense, MaxPooling2D, BatchNormalization, Input\n'), ((4855, 4869), 'keras.layers.LeakyReLU', 'LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (4864, 4869), False, 'from keras.layers import UpSampling2D, Dropout, Flatten, Reshape, RepeatVector, LeakyReLU, Activation\n'), ((4938, 4968), 'keras.layers.Dense', 'Dense', (['n_latent'], {'name': '"""z_mean"""'}), "(n_latent, name='z_mean')\n", (4943, 4968), False, 'from keras.layers import Lambda, Input, Dense, MaxPooling2D, BatchNormalization, Input\n'), ((4988, 5021), 'keras.layers.Dense', 'Dense', (['n_latent'], {'name': '"""z_log_var"""'}), "(n_latent, name='z_log_var')\n", (4993, 5021), False, 'from keras.layers import Lambda, Input, Dense, MaxPooling2D, BatchNormalization, Input\n'), ((5033, 5088), 'keras.layers.Lambda', 'Lambda', (['sample_func'], {'output_shape': '(n_latent,)', 'name': '"""z"""'}), "(sample_func, output_shape=(n_latent,), name='z')\n", (5039, 5088), False, 'from keras.layers import Lambda, Input, Dense, MaxPooling2D, BatchNormalization, Input\n'), ((5341, 5351), 'keras.layers.Dense', 'Dense', (['(250)'], {}), '(250)\n', (5346, 5351), False, 'from keras.layers import Lambda, Input, Dense, MaxPooling2D, BatchNormalization, Input\n'), ((5375, 5389), 'keras.layers.LeakyReLU', 'LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (5384, 5389), False, 'from keras.layers import UpSampling2D, Dropout, Flatten, Reshape, RepeatVector, LeakyReLU, Activation\n'), ((5401, 5412), 'keras.layers.Dense', 'Dense', (['(1000)'], {}), '(1000)\n', (5406, 5412), False, 'from keras.layers import Lambda, Input, Dense, MaxPooling2D, BatchNormalization, Input\n'), ((5424, 5438), 'keras.layers.LeakyReLU', 'LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (5433, 5438), False, 'from keras.layers import UpSampling2D, Dropout, Flatten, Reshape, RepeatVector, LeakyReLU, Activation\n'), ((5450, 5461), 'keras.layers.Dense', 'Dense', (['(2048)'], {}), '(2048)\n', (5455, 5461), False, 'from keras.layers import Lambda, Input, Dense, MaxPooling2D, BatchNormalization, Input\n'), ((5473, 5487), 'keras.layers.LeakyReLU', 'LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (5482, 5487), False, 'from keras.layers import UpSampling2D, Dropout, Flatten, Reshape, RepeatVector, LeakyReLU, Activation\n'), ((5499, 5510), 'keras.layers.Dense', 'Dense', (['(3136)'], {}), '(3136)\n', (5504, 5510), False, 'from keras.layers import Lambda, Input, Dense, MaxPooling2D, BatchNormalization, Input\n'), ((5522, 5536), 'keras.layers.LeakyReLU', 'LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (5531, 5536), False, 'from keras.layers import UpSampling2D, Dropout, Flatten, Reshape, RepeatVector, LeakyReLU, Activation\n'), ((5549, 5570), 'keras.layers.Reshape', 'Reshape', (['(14, 14, 16)'], {}), '((14, 14, 16))\n', (5556, 5570), False, 'from keras.layers import UpSampling2D, Dropout, Flatten, Reshape, RepeatVector, LeakyReLU, Activation\n'), ((5583, 5633), 'keras.layers.Convolution2D', 'Conv2D', (['(16)', '(3, 3)'], {'padding': '"""same"""', 'use_bias': '(False)'}), "(16, (3, 3), padding='same', use_bias=False)\n", (5589, 5633), True, 'from keras.layers import Convolution2D as Conv2D\n'), ((5645, 5665), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (5663, 5665), False, 'from keras.layers import Lambda, Input, Dense, MaxPooling2D, BatchNormalization, Input\n'), ((5677, 5691), 'keras.layers.LeakyReLU', 'LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (5686, 5691), False, 'from keras.layers import UpSampling2D, Dropout, Flatten, Reshape, RepeatVector, LeakyReLU, Activation\n'), ((5703, 5723), 'keras.layers.UpSampling2D', 'UpSampling2D', (['(2, 2)'], {}), '((2, 2))\n', (5715, 5723), False, 'from keras.layers import UpSampling2D, Dropout, Flatten, Reshape, RepeatVector, LeakyReLU, Activation\n'), ((5735, 5785), 'keras.layers.Convolution2D', 'Conv2D', (['(32)', '(3, 3)'], {'padding': '"""same"""', 'use_bias': '(False)'}), "(32, (3, 3), padding='same', use_bias=False)\n", (5741, 5785), True, 'from keras.layers import Convolution2D as Conv2D\n'), ((5797, 5817), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (5815, 5817), False, 'from keras.layers import Lambda, Input, Dense, MaxPooling2D, BatchNormalization, Input\n'), ((5829, 5843), 'keras.layers.LeakyReLU', 'LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (5838, 5843), False, 'from keras.layers import UpSampling2D, Dropout, Flatten, Reshape, RepeatVector, LeakyReLU, Activation\n'), ((5855, 5875), 'keras.layers.UpSampling2D', 'UpSampling2D', (['(2, 2)'], {}), '((2, 2))\n', (5867, 5875), False, 'from keras.layers import UpSampling2D, Dropout, Flatten, Reshape, RepeatVector, LeakyReLU, Activation\n'), ((5887, 5937), 'keras.layers.Convolution2D', 'Conv2D', (['(64)', '(3, 3)'], {'padding': '"""same"""', 'use_bias': '(False)'}), "(64, (3, 3), padding='same', use_bias=False)\n", (5893, 5937), True, 'from keras.layers import Convolution2D as Conv2D\n'), ((5949, 5969), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (5967, 5969), False, 'from keras.layers import Lambda, Input, Dense, MaxPooling2D, BatchNormalization, Input\n'), ((5981, 5995), 'keras.layers.LeakyReLU', 'LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (5990, 5995), False, 'from keras.layers import UpSampling2D, Dropout, Flatten, Reshape, RepeatVector, LeakyReLU, Activation\n'), ((6007, 6027), 'keras.layers.UpSampling2D', 'UpSampling2D', (['(2, 2)'], {}), '((2, 2))\n', (6019, 6027), False, 'from keras.layers import UpSampling2D, Dropout, Flatten, Reshape, RepeatVector, LeakyReLU, Activation\n'), ((6039, 6090), 'keras.layers.Convolution2D', 'Conv2D', (['(128)', '(3, 3)'], {'padding': '"""same"""', 'use_bias': '(False)'}), "(128, (3, 3), padding='same', use_bias=False)\n", (6045, 6090), True, 'from keras.layers import Convolution2D as Conv2D\n'), ((6102, 6122), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (6120, 6122), False, 'from keras.layers import Lambda, Input, Dense, MaxPooling2D, BatchNormalization, Input\n'), ((6134, 6148), 'keras.layers.LeakyReLU', 'LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (6143, 6148), False, 'from keras.layers import UpSampling2D, Dropout, Flatten, Reshape, RepeatVector, LeakyReLU, Activation\n'), ((6160, 6180), 'keras.layers.UpSampling2D', 'UpSampling2D', (['(2, 2)'], {}), '((2, 2))\n', (6172, 6180), False, 'from keras.layers import UpSampling2D, Dropout, Flatten, Reshape, RepeatVector, LeakyReLU, Activation\n'), ((6192, 6241), 'keras.layers.Convolution2D', 'Conv2D', (['(3)', '(3, 3)'], {'padding': '"""same"""', 'use_bias': '(False)'}), "(3, (3, 3), padding='same', use_bias=False)\n", (6198, 6241), True, 'from keras.layers import Convolution2D as Conv2D\n'), ((6253, 6273), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (6271, 6273), False, 'from keras.layers import Lambda, Input, Dense, MaxPooling2D, BatchNormalization, Input\n'), ((6291, 6312), 'keras.layers.Activation', 'Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (6301, 6312), False, 'from keras.layers import UpSampling2D, Dropout, Flatten, Reshape, RepeatVector, LeakyReLU, Activation\n'), ((6706, 6729), 'keras.backend.sum', 'K.sum', (['kl_loss'], {'axis': '(-1)'}), '(kl_loss, axis=-1)\n', (6711, 6729), True, 'from keras import backend as K\n'), ((6773, 6812), 'keras.backend.mean', 'K.mean', (['(rec_loss + beta * (kl_loss - C))'], {}), '(rec_loss + beta * (kl_loss - C))\n', (6779, 6812), True, 'from keras import backend as K\n'), ((8047, 8104), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""vae_loss"""', 'patience': '(10)', 'verbose': '(0)'}), "(monitor='vae_loss', patience=10, verbose=0)\n", (8060, 8104), False, 'from keras.callbacks import EarlyStopping\n'), ((10015, 10053), 'numpy.reshape', 'np.reshape', (['test_in', '[-1, 224, 224, 3]'], {}), '(test_in, [-1, 224, 224, 3])\n', (10025, 10053), True, 'import numpy as np\n'), ((10087, 10126), 'numpy.reshape', 'np.reshape', (['test_out', '[-1, 224, 224, 3]'], {}), '(test_out, [-1, 224, 224, 3])\n', (10097, 10126), True, 'import numpy as np\n'), ((2138, 2163), 'glob.glob1', 'glob.glob1', (['path', '"""*.png"""'], {}), "(path, '*.png')\n", (2148, 2163), False, 'import glob\n'), ((2256, 2271), 'cv2.imread', 'cv2.imread', (['img'], {}), '(img)\n', (2266, 2271), False, 'import cv2\n'), ((2290, 2317), 'cv2.resize', 'cv2.resize', (['img', '(224, 224)'], {}), '(img, (224, 224))\n', (2300, 2317), False, 'import cv2\n'), ((3742, 3764), 'keras.backend.exp', 'K.exp', (['(0.5 * z_log_var)'], {}), '(0.5 * z_log_var)\n', (3747, 3764), True, 'from keras import backend as K\n'), ((6554, 6569), 'keras.backend.flatten', 'K.flatten', (['true'], {}), '(true)\n', (6563, 6569), True, 'from keras import backend as K\n'), ((6571, 6586), 'keras.backend.flatten', 'K.flatten', (['pred'], {}), '(pred)\n', (6580, 6586), True, 'from keras import backend as K\n'), ((6671, 6687), 'keras.backend.exp', 'K.exp', (['z_log_var'], {}), '(z_log_var)\n', (6676, 6687), True, 'from keras import backend as K\n'), ((6652, 6668), 'keras.backend.square', 'K.square', (['z_mean'], {}), '(z_mean)\n', (6660, 6668), True, 'from keras import backend as K\n')] |
#! /usr/bin/env python
from __future__ import absolute_import
from pybedtools import BedTool
import argparse
import numpy as np
parser = argparse.ArgumentParser(
description="""
Given two or mote bed files, ``multiBedSumary.py`` computes the sum of overlapping intervals in every genomic region. The default output of ``multiBedSumary.py`` (a compressed numpy array, .npz) can be used from various tools of the deepseq2 package such as ``plotCorrelation`` or ``plotPCA`` for visualization and diagnostic purposes.
""")
# required = parser.add_argument_group('Required arguments')
parser.add_argument('--regions', '-r',
help='BED file containing all regions that should be considered.',
required=True)
parser.add_argument('--bedfiles', '-b',
help='List of bed files, separated by spaces.',
nargs='+',
required=True)
parser.add_argument('--outFileName', '-out',
help='File name to save the compressed matrix file (npz format)'
'needed by the "plotHeatmap" and "plotProfile" tools.',
required=True)
parser.add_argument('--labels', '-l',
help='User defined labels instead of default labels from '
'file names. '
'Multiple labels have to be separated by spaces, e.g., '
'--labels sample1 sample2 sample3',
nargs='+')
args = parser.parse_args()
# count all overlaps with reference intervals
annot = BedTool().annotate(i=args.regions,
files=args.bedfiles,
counts=True)
# load counts to numpy array
counts = np.loadtxt(annot.fn, usecols=list(range(3, 3 + len(args.bedfiles))))
# combine matrix and lavels, save as npz file
if args.labels is None:
labels = args.bedfiles
else:
labels = args.labels
np.savez(args.outFileName, labels=np.array(labels), matrix=counts)
| [
"pybedtools.BedTool",
"numpy.array",
"argparse.ArgumentParser"
] | [((139, 531), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""\n\nGiven two or mote bed files, ``multiBedSumary.py`` computes the sum of overlapping intervals in every genomic region. The default output of ``multiBedSumary.py`` (a compressed numpy array, .npz) can be used from various tools of the deepseq2 package such as ``plotCorrelation`` or ``plotPCA`` for visualization and diagnostic purposes.\n\n"""'}), '(description=\n """\n\nGiven two or mote bed files, ``multiBedSumary.py`` computes the sum of overlapping intervals in every genomic region. The default output of ``multiBedSumary.py`` (a compressed numpy array, .npz) can be used from various tools of the deepseq2 package such as ``plotCorrelation`` or ``plotPCA`` for visualization and diagnostic purposes.\n\n"""\n )\n', (162, 531), False, 'import argparse\n'), ((1569, 1578), 'pybedtools.BedTool', 'BedTool', ([], {}), '()\n', (1576, 1578), False, 'from pybedtools import BedTool\n'), ((1963, 1979), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (1971, 1979), True, 'import numpy as np\n')] |
"""
Data handling for images and reflectance
"""
from gzopen import gzopen
import logging
import logging.config
import yaml
import copy as cp
import re
import ntpath
import h5py
import datetime
import numpy as np
import time
import io
import glob
import pandas as pd
import matplotlib.pyplot as plt
class wafer():
"""
Class representing a wafer
"""
def __init__(self):
self.sample_id = None
self.sample_description = None
self.logger = logging.getLogger("Wafer")
self.logger.setLevel(logging.INFO)
self.score = lambda i: ("+" if i > 0 else "") + str(i)
def fft_smoothing(self, d, param):
"""
Helper functions
d is a 1D vector, param is the cutoff frequency
"""
rft = np.fft.rfft(d)
rft[int(param):] = 0.
d = np.fft.irfft(rft, len(d))
return d
class LSA(wafer):
"""
Subclass to handle reading, writing, and logging LSA data
"""
def __init__(self):
super(LSA, self).__init__()
self.logger = logging.getLogger("WaferLSA")
self.data_type = "LSA"
self.img_prefix = "b"
self.spec_prefix = "s"
def coord_format(self, d, l):
q = str(int(abs(d))).zfill(l)
if d >= 0.:
q = "+" + q
else:
q = "-" + q
return q
def read(self, fn):
"""
Reads the positon, dwell, and peak temperature of a processed wafer
"""
self.logger.info("Reading %s", fn)
data_list = []
#data = np.genfromtxt(fn, dtype=float, delimiter=',', skip_header=1)
data = pd.read_csv(fn, delimiter=',', skiprows=1, header=None).values
for i in range(data.shape[0]):
data_dict = {"pos" : data[i, 0:2].tolist(),
"x" : data[i, 0],
"y" : data[i, 1],
"dwell": data[i, 2],
"Tpeak": data[i, 3]}
try:
data_dict["Power"] = data[i, 4]
except:
self.logger.debug("No power value available")
data_list.append(data_dict)
self.image_name(data_dict)
return data_list
def image_name(self, stripe, suffix = None):
"""
Returns the (presumable) file name given the information from the LSA process log
"""
name = self.img_prefix
#name += "_"
name += self.coord_format(stripe["x"], 2)
name += "_"
name += self.coord_format(stripe["y"], 2)
name += "_"
name += self.coord_format(stripe["dwell"], 5)
name += "_"
name += self.coord_format(stripe["Tpeak"], 4)
if suffix is not None:
name += "_"
name += suffix
name += ".bmp"
return name
def spec_name(self, stripe, suffix = None):
"""
Returns the (presumable) file name given the information from the LSA process log
"""
name = self.spec_prefix
#name += "_"
name += self.coord_format(stripe["x"], 2)
name += "_"
name += self.coord_format(stripe["y"], 2)
name += "_"
name += self.coord_format(stripe["dwell"], 5)
name += "_"
name += self.coord_format(stripe["Tpeak"], 4)
if suffix is not None:
name += "_"
name += suffix
name += ".csv"
return name
def spec_name_old(self, stripe):
"""
Returns the (presumable) file name given the information from the LSA process log
"""
name = self.spec_prefix
#name += "_"
name += self.coord_format(stripe["x"], 2)
name += "_"
name += self.coord_format(stripe["y"], 2)
name += "_"
name += str(int(stripe["dwell"])).zfill(5)
name += "_"
name += str(int(stripe["Tpeak"])).zfill(4)
name += ".csv"
return name
class spec(wafer):
"""
Subclass to handle reading, writing, and logging spectroscopy data
"""
def __init__(self):
super(spec, self).__init__()
self.logger = logging.getLogger("WaferSpec")
self.data_type = "Spec"
self.dead_fix = True
self.dead_pixel = 1388
self.smooth = 15
def user_coord(self, data_dict):
"""
Given a stripe of specs, split into individual specs
with user coordinates
"""
spect_dict = []
xmin = data_dict["Meta"]["Position"]["Range"][0][0] + data_dict["Meta"]["Position"]["Center"][0]
xmax = data_dict["Meta"]["Position"]["Range"][1][0] + data_dict["Meta"]["Position"]["Center"][0]
ymin = data_dict["Meta"]["Position"]["Range"][0][1] + data_dict["Meta"]["Position"]["Center"][1]
ymax = data_dict["Meta"]["Position"]["Range"][1][1] + data_dict["Meta"]["Position"]["Center"][1]
nlines = data_dict["Meta"]["Position"]["Scan lines"]
x_list = np.linspace(xmin, xmax, nlines).tolist()
y_list = np.linspace(ymin, xmax, nlines).tolist()
for x, y, i in zip(x_list, y_list, range(len(x_list))):
d = {"pos" : [x, y],
"x" : x,
"y" : y,
"Tpeak" : data_dict["Meta"]["Tpeak"],
"dwell" : data_dict["Meta"]["dwell"],
"Spec" : data_dict["Spec"][:, i],
"Wavelengths" : data_dict["Wavelengths"]
}
spect_dict.append(d)
return spect_dict
def init_meta(self):
"""
Initialize metadata here
"""
#Structure of metadata defined here
meta = {
"Timestamp": None,
"Position": None,
"Ocean Optics Spectrometer": None,
"Collection": None,
"Tpeak": None,
"dwell": None
}
return meta
def read_old_meta(self, fn, meta):
"""
Reading new metadata from file, the old way
"""
position = {}
with gzopen(fn) as f:
first_line = f.readline().strip().split(' ')
first_line = [elem.replace("(","").replace(")","") for elem in first_line]
position['Center'] = [float(i) for i in first_line[3].split(',')]
rng = []
rng.append([float(i) for i in first_line[7].split(',')])
rng.append([float(i) for i in first_line[11].split(',')])
position['Range'] = rng
position['Scan lines'] = int(first_line[14])
if position in locals():
meta["Position"] = position
f.close()
return meta
def read_new_meta(self, fn, meta):
"""
Reading new metadata from file
"""
position = None
spectrometer = None
collection = None
with gzopen(fn) as f:
cnt = 0
for line in f:
if line.startswith('/'):
if "Position" in line:
position = {}
if "Ocean Optics Spectrometer" in line:
spectrometer = {}
if "Collection" in line:
collection = {}
if "Timestamp" in line:
timestamp = int(line.strip().split()[2])
meta["Timestamp"] = timestamp
if "Center" in line:
position['Center'] = [float(i) for i in line.strip().split()[-1].replace("(", "").replace(")", "").split(',')]
if "Range" in line:
rng = []
l = line.strip().split()[-2:]
l = [elem.replace("(","").replace(")","") for elem in l]
rng.append([float(i) for i in l[0].split(',')])
rng.append([float(i) for i in l[1].split(',')])
position['Range'] = rng
if "Scan lines" in line:
position['Scan lines'] = int(line.strip().split()[-1])
if "Focus" in line:
position['Focus'] = float(line.strip().split()[-1])
if "Model" in line:
spectrometer["Model"] = line.strip().split()[-1]
if "S/N" in line:
spectrometer["S/N"] = line.strip().split()[-1]
if "API" in line:
spectrometer["API"] = line.strip().split()[-1]
if "Shutter" in line:
spectrometer["Shutter"] = float(line.strip().split()[-2])
if "Averages" in line:
collection["Averages"] = int(line.strip().split()[-1])
if "Dark pixel correction" in line:
collection["Dark pixel correction"] = str(line.strip().split()[-1])
if "Non-linear correction" in line:
collection["Non-linear correction"] = str(line.strip().split()[-1])
cnt += 1
if position != None:
meta["Position"] = position
if spectrometer != None:
meta["Ocean Optics Spectrometer"] = spectrometer
if collection != None:
meta["Collection"] = collection
f.close()
return meta
def write_csv_meta_header(self, meta):
"""
Returns a string formatted according to the metadata format the genplot (or Mike in general) produces
"""
meta_string = "/* Block scan\n"
dt = time.ctime(int(meta["Timestamp"]))
key = "Timestamp" ; meta_string += "/* %s: %s %s\n" %(key, meta[key], dt)
key = "Position" ; meta_string += "/* %s:\n" %(key)
key_1 = "Center" ; meta_string += "/* %s: (%12.8f, %12.8f)\n" %(key_1, meta[key][key_1][0], meta[key][key_1][1])
key_1 = "Range" ; meta_string += "/* %s: (%12.8f, %12.8f) (%12.8f, %12.8f)\n" %(key_1, meta[key][key_1][0][0], meta[key][key_1][0][1], meta[key][key_1][1][0], meta[key][key_1][1][1])
key_1 = 'Scan lines'; meta_string += "/* %s: %8d \n" %(key_1, meta[key][key_1])
key_1 = "Focus" ; meta_string += "/* %s: %12.8f\n" %(key_1, meta[key][key_1])
key = "Ocean Optics Spectrometer" ; meta_string += "/* %s:\n" %(key)
key_1 = 'Model' ; meta_string += "/* %s: %s\n" %(key_1, meta[key][key_1])
key_1 = 'S/N' ; meta_string += "/* %s: %s\n" %(key_1, meta[key][key_1])
key_1 = 'API' ; meta_string += "/* %s: %s\n" %(key_1, meta[key][key_1])
key_1 = 'Shutter' ; meta_string += "/* %s: %15.8f\n" %(key_1, meta[key][key_1])
key = "Collection" ; meta_string += "/* %s:\n" %(key)
key_1 = 'Averages' ; meta_string += "/* %s: %s\n" %(key_1, meta[key][key_1])
key_1 = 'Dark pixel correction' ; meta_string += "/* %s: %s\n" %(key_1, meta[key][key_1])
key_1 = 'Non-linear correction' ; meta_string += "/* %s: %s\n" %(key_1, meta[key][key_1])
return meta_string
def write_csv_meta_header_reference(self, meta):
"""
Returns a string formatted according to the metadata format the genplot (or Mike in general) produces
"""
meta_string = "/* OceanOptics spectrum\n"
dt = time.ctime(int(meta["Timestamp"]))
key = "Timestamp" ; meta_string += "/* %s: %s %s\n" %(key, meta[key], dt)
key = "Ocean Optics Spectrometer" ; meta_string += "/* %s:\n" %(key)
key_1 = 'Model' ; meta_string += "/* %s: %s\n" %(key_1, meta[key][key_1])
key_1 = 'S/N' ; meta_string += "/* %s: %s\n" %(key_1, meta[key][key_1])
key_1 = 'API' ; meta_string += "/* %s: %s\n" %(key_1, meta[key][key_1])
key_1 = 'Shutter' ; meta_string += "/* %s: %15.8f\n" %(key_1, meta[key][key_1])
key = "Collection" ; meta_string += "/* %s:\n" %(key)
key_1 = 'Averages' ; meta_string += "/* %s: %s\n" %(key_1, meta[key][key_1])
key_1 = 'Dark pixel correction' ; meta_string += "/* %s: %s\n" %(key_1, meta[key][key_1])
key_1 = 'Non-linear correction' ; meta_string += "/* %s: %s\n" %(key_1, meta[key][key_1])
meta_string += "/* wavelength [nm] , raw\n"
return meta_string
def write_csv_data(self, wavelengths, spectra_data_list):
"""
Returns a string for writing the data with wavelength first, followed by
columns of measured stripe data
"""
data_string = "/* Data\n"
wl = np.array(wavelengths).flatten()
sp = np.array(spectra_data_list)
if wl.shape[0] == sp.shape[0]:
data_list = [wl.tolist()] + sp.T.tolist()
if sp.ndim == 1:
data_list = [wl.tolist()] + [sp.tolist()]
else:
data_list = [wl.tolist()] + sp.tolist()
data_array = np.array(data_list).T
s = io.StringIO()
np.savetxt(s, data_array, delimiter=',', fmt='%f')
data_string += s.getvalue()
return data_string
def write_csv_spec(self, fn_data, meta, wavelengths, spectra_data_list):
"""
Writes spectra csv file
"""
with open(fn_data, 'w') as f:
f.write(self.write_csv_meta_header(meta))
f.write(self.write_csv_data(wavelengths, spectra_data_list))
def write_csv_spec_reference(self, fn_data, meta, wavelengths, spectra_data_list):
"""
Writes spectra csv file for mirror and blank
"""
with open(fn_data, 'w') as f:
f.write(self.write_csv_meta_header_reference(meta))
f.write(self.write_csv_data(wavelengths, spectra_data_list))
def read_spec(self, fn_data):
"""
Read lasgo-type spectroscopy scan files
"""
meta = self.init_meta()
self.logger.info("Reading file %s", fn_data)
self.newversion = False
with gzopen(fn_data) as f:
cnt = 0
for line in f:
if line.startswith('/'):
if "correction" in line:
self.newversion = True
cnt += 1
f.close()
if self.newversion:
#New format of files for reading
self.logger.debug("Reading new file format")
#Parse header and metadata
meta = self.read_new_meta(fn_data, meta)
#data = np.genfromtxt(fn_data, dtype=float, delimiter=',', skip_header=17)
data = pd.read_csv(fn_data, delimiter=',', skiprows=17, header=None).values
else:
#Old format of files for reading
self.logger.debug("Reading old file format")
#Parse header and metadata
meta = self.read_old_meta(fn_data, meta)
#data = np.genfromtxt(fn_data, dtype=float, delimiter=',', skip_header=1)
data = pd.read_csv(fn_data, delimiter=',', skiprows=1, header=None).values
self.logger.info("Read file %s", fn_data)
#Parse information from the filename
try:
fn_base = ntpath.basename(fn_data)
fn_meta = fn_base.split("_")
#The last part is the temperature in C
meta["Tpeak"] = float(fn_meta[-1].split(".")[0])
#The second last part is the temperature in dwell time in microsec
meta["dwell"] = float(fn_meta[-2])
#We can check the coordinates with the ones already read from the comment block
dy = float(fn_meta[-3])
dx = float(re.sub('[^0-9,+,-]','',fn_meta[-4].split("/")[-1]))
dd = np.linalg.norm(np.array([dx,dy])-np.array(meta['Position']["Center"]))
if dd > 0.0001:
self.logger.warning("The coordinate from the metadata and the filename disagree %f", dd)
except:
self.logger.warning("Could not retrieve metadata from filename")
#Getting rid of a dead pixel
if self.dead_fix:
data[self.dead_pixel, 1:] = (data[self.dead_pixel-1, 1:] + data[self.dead_pixel+1, 1:]) * 0.5
self.logger.info("Eliminated dead pixel at %d", self.dead_pixel)
#Wavelengths
wl = data[:,0]
data = np.array(data)[:,1:]
data_dict = {"Wavelengths": wl, "Spec": data, "Meta": meta}
return data_dict
def read_mirror_blank(self, fn_data):
"""
Read lasgo-type spectroscopy scan files, at the mirror or blank
"""
meta = self.init_meta()
spectrometer = {}
collection = {}
self.logger.info("Reading file %s", fn_data)
self.newversion = False
with gzopen(fn_data) as f:
cnt = 0
for line in f:
if line.startswith('/'):
if "correction" in line:
self.newversion = True
cnt += 1
f.close()
if self.newversion:
#New format of files for reading
self.logger.debug("Reading new file format")
#Parse header and metadata
meta = self.read_new_meta(fn_data, meta)
#data = np.genfromtxt(fn_data, dtype=float, delimiter=',', skip_header=13) #Mike calls it ref
data = pd.read_csv(fn_data, delimiter=',', skiprows=13, header=None).values
else:
#Old format of files for reading
self.logger.debug("Reading old file format")
#Parse header and metadata
meta = self.read_old_meta(fn_data, meta)
#data = np.genfromtxt(fn_data, dtype=float, delimiter=',', skip_header=1) #Mike calls it ref
data = pd.read_csv(fn_data, delimiter=',', skiprows=1, header=None).values
self.logger.info("Read file %s", fn_data)
#Getting rid of a dead pixel
if self.dead_fix:
data[self.dead_pixel,1] = (data[self.dead_pixel-1, 1]+data[self.dead_pixel+1, 1]) * 0.5
self.logger.info("Eliminated dead pixel at %d", self.dead_pixel)
#Wavelengths
wl = data[:,0]
data = np.array(data)[:,1]
data_dict = {"Wavelengths": wl, "Spec": data, "Meta": meta}
return data_dict
def write_mirror_blank(self, fn_data):
return
def normalize(self, data, mirror, blank):
scaling = 1.
start = time.time()
#Smooth data
mirror_s = self.fft_smoothing(mirror[:], self.smooth)
blank_s = self.fft_smoothing(blank[:], self.smooth)
data_s = cp.deepcopy(data)
if len(data.shape) == 1:
data_s[:] = self.fft_smoothing(data[:], self.smooth)
else:
for i in range(data.shape[1]):
data_s[:,i] = self.fft_smoothing(data[:,i], self.smooth)
#Normalization
#norm = np.maximum(np.ones(mirror_s.shape), mirror_s[:] - blank_s[:])
norm = mirror_s[:] - blank_s[:]
normal_data = cp.deepcopy(data_s)
#normal_data = ((np.maximum(data_s, 1.) - blank_s[:].reshape(-1, 1))/norm.reshape(-1, 1)) * scaling
normal_data = ((data_s - blank_s[:].reshape(-1, 1))/norm.reshape(-1, 1)) * scaling
end = time.time()
return normal_data
def convert_spectrometer_dict(self, spectrometer_dict):
"""
Returns meta information in Mike's format from the spectrometer
"""
data_dict = spectrometer_dict
info_o = {}
info_o["Model"] = data_dict['model']
info_o["S/N"] = data_dict['serial']
info_o["API"] = "None"
info_o["Shutter"] = data_dict['ms_integrate']
return info_o, data_dict
def convert_spec_dict(self, spectrum_dict):
"""
Returns meta information in Mike's format from the spectrum
"""
data_dict = spectrum_dict
info_c = {}
info_c["Averages"] = data_dict['num_average']
if data_dict['use_dark_pixel'] == 1:
info_c["Dark pixel correction"] = "On"
else:
info_c["Dark pixel correction"] = "Off"
if data_dict['use_nl_correct'] == 1:
info_c["Non-linear correction"] = "On"
else:
info_c["Non-linear correction"] = "Off"
return info_c, data_dict
def convert_scan_info(self, zone, pos_list, focus_list):
"""
Returns meta information of a stripe scan
"""
info_p = {}
info_p["Center"] = [zone["x"], zone["y"]]
info_p["Range"] = [[pos_list[0][0] - info_p["Center"][0], pos_list[0][1] - info_p["Center"][1]],
[pos_list[-1][0] - info_p["Center"][0], pos_list[-1][1] - info_p["Center"][1]]]
info_p["Scan lines"] = len(pos_list)
info_p["Focus"] = np.average(focus_list)
return info_p
def convert_meta(self, pos_list, focus_list, spectrometer_dict, spectrum_dict, zone = None):
"""
Returns complete meta information to save in Mike's stripe format
"""
meta = {}
info_o, raw_spectrometer_info = self.convert_spectrometer_dict(spectrometer_dict)
info_c, raw_spec_info = self.convert_spec_dict(spectrum_dict)
meta["Timestamp"] = raw_spec_info['timestamp']
if zone is not None:
info_p = self.convert_scan_info(zone, pos_list, focus_list)
meta["Position"] = info_p
meta["Ocean Optics Spectrometer"] = info_o
meta["Collection"] = info_c
return meta
def convert_meta_reference(self, spectrometer_dict, spectrum_dict):
"""
Returns complete meta information to save in Mike's stripe for mirror
and blank
"""
meta = {}
info_o, raw_spectrometer_info = self.convert_spectrometer_dict(spectrometer_dict)
info_c, raw_spec_info = self.convert_spec_dict(spectrum_dict)
meta["Timestamp"] = raw_spec_info['timestamp']
meta["Ocean Optics Spectrometer"] = info_o
meta["Collection"] = info_c
return meta
| [
"logging.getLogger",
"ntpath.basename",
"pandas.read_csv",
"numpy.average",
"numpy.fft.rfft",
"numpy.array",
"numpy.linspace",
"gzopen.gzopen",
"numpy.savetxt",
"copy.deepcopy",
"io.StringIO",
"time.time"
] | [((480, 506), 'logging.getLogger', 'logging.getLogger', (['"""Wafer"""'], {}), "('Wafer')\n", (497, 506), False, 'import logging\n'), ((772, 786), 'numpy.fft.rfft', 'np.fft.rfft', (['d'], {}), '(d)\n', (783, 786), True, 'import numpy as np\n'), ((1052, 1081), 'logging.getLogger', 'logging.getLogger', (['"""WaferLSA"""'], {}), "('WaferLSA')\n", (1069, 1081), False, 'import logging\n'), ((4161, 4191), 'logging.getLogger', 'logging.getLogger', (['"""WaferSpec"""'], {}), "('WaferSpec')\n", (4178, 4191), False, 'import logging\n'), ((12987, 13014), 'numpy.array', 'np.array', (['spectra_data_list'], {}), '(spectra_data_list)\n', (12995, 13014), True, 'import numpy as np\n'), ((13316, 13329), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (13327, 13329), False, 'import io\n'), ((13338, 13388), 'numpy.savetxt', 'np.savetxt', (['s', 'data_array'], {'delimiter': '""","""', 'fmt': '"""%f"""'}), "(s, data_array, delimiter=',', fmt='%f')\n", (13348, 13388), True, 'import numpy as np\n'), ((18718, 18729), 'time.time', 'time.time', ([], {}), '()\n', (18727, 18729), False, 'import time\n'), ((18890, 18907), 'copy.deepcopy', 'cp.deepcopy', (['data'], {}), '(data)\n', (18901, 18907), True, 'import copy as cp\n'), ((19299, 19318), 'copy.deepcopy', 'cp.deepcopy', (['data_s'], {}), '(data_s)\n', (19310, 19318), True, 'import copy as cp\n'), ((19532, 19543), 'time.time', 'time.time', ([], {}), '()\n', (19541, 19543), False, 'import time\n'), ((21089, 21111), 'numpy.average', 'np.average', (['focus_list'], {}), '(focus_list)\n', (21099, 21111), True, 'import numpy as np\n'), ((1634, 1689), 'pandas.read_csv', 'pd.read_csv', (['fn'], {'delimiter': '""","""', 'skiprows': '(1)', 'header': 'None'}), "(fn, delimiter=',', skiprows=1, header=None)\n", (1645, 1689), True, 'import pandas as pd\n'), ((6111, 6121), 'gzopen.gzopen', 'gzopen', (['fn'], {}), '(fn)\n', (6117, 6121), False, 'from gzopen import gzopen\n'), ((6908, 6918), 'gzopen.gzopen', 'gzopen', (['fn'], {}), '(fn)\n', (6914, 6918), False, 'from gzopen import gzopen\n'), ((13282, 13301), 'numpy.array', 'np.array', (['data_list'], {}), '(data_list)\n', (13290, 13301), True, 'import numpy as np\n'), ((14328, 14343), 'gzopen.gzopen', 'gzopen', (['fn_data'], {}), '(fn_data)\n', (14334, 14343), False, 'from gzopen import gzopen\n'), ((15483, 15507), 'ntpath.basename', 'ntpath.basename', (['fn_data'], {}), '(fn_data)\n', (15498, 15507), False, 'import ntpath\n'), ((16612, 16626), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (16620, 16626), True, 'import numpy as np\n'), ((17047, 17062), 'gzopen.gzopen', 'gzopen', (['fn_data'], {}), '(fn_data)\n', (17053, 17062), False, 'from gzopen import gzopen\n'), ((18461, 18475), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (18469, 18475), True, 'import numpy as np\n'), ((4987, 5018), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', 'nlines'], {}), '(xmin, xmax, nlines)\n', (4998, 5018), True, 'import numpy as np\n'), ((5045, 5076), 'numpy.linspace', 'np.linspace', (['ymin', 'xmax', 'nlines'], {}), '(ymin, xmax, nlines)\n', (5056, 5076), True, 'import numpy as np\n'), ((12942, 12963), 'numpy.array', 'np.array', (['wavelengths'], {}), '(wavelengths)\n', (12950, 12963), True, 'import numpy as np\n'), ((14902, 14963), 'pandas.read_csv', 'pd.read_csv', (['fn_data'], {'delimiter': '""","""', 'skiprows': '(17)', 'header': 'None'}), "(fn_data, delimiter=',', skiprows=17, header=None)\n", (14913, 14963), True, 'import pandas as pd\n'), ((15284, 15344), 'pandas.read_csv', 'pd.read_csv', (['fn_data'], {'delimiter': '""","""', 'skiprows': '(1)', 'header': 'None'}), "(fn_data, delimiter=',', skiprows=1, header=None)\n", (15295, 15344), True, 'import pandas as pd\n'), ((17640, 17701), 'pandas.read_csv', 'pd.read_csv', (['fn_data'], {'delimiter': '""","""', 'skiprows': '(13)', 'header': 'None'}), "(fn_data, delimiter=',', skiprows=13, header=None)\n", (17651, 17701), True, 'import pandas as pd\n'), ((18041, 18101), 'pandas.read_csv', 'pd.read_csv', (['fn_data'], {'delimiter': '""","""', 'skiprows': '(1)', 'header': 'None'}), "(fn_data, delimiter=',', skiprows=1, header=None)\n", (18052, 18101), True, 'import pandas as pd\n'), ((16022, 16040), 'numpy.array', 'np.array', (['[dx, dy]'], {}), '([dx, dy])\n', (16030, 16040), True, 'import numpy as np\n'), ((16040, 16076), 'numpy.array', 'np.array', (["meta['Position']['Center']"], {}), "(meta['Position']['Center'])\n", (16048, 16076), True, 'import numpy as np\n')] |
"""
=================================
Find Photodiode On and Off Events
=================================
In this example, we use ``pd-parser`` to find photodiode events and
align both the onset of the deflection and the cessation to
to behavior.
"""
# Authors: <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
###############################################################################
# Simulate data and use it to make a raw object
#
# We'll make an `mne.io.Raw` object so that we can save out some random
# data with a photodiode event channel in it in fif format (a commonly used
# electrophysiology data format).
import os.path as op
import numpy as np
import mne
from mne.utils import _TempDir
import pd_parser
from pd_parser.parse_pd import _load_data
import matplotlib.pyplot as plt
import matplotlib.cm as cm
out_dir = _TempDir()
# simulate photodiode data
np.random.seed(29)
n_events = 300
# let's make our photodiode events on random uniform from 0.5 to 1 second
n_secs_on = np.random.random(n_events) * 0.5 + 0.5
prop_corrupted = 0.01
raw, beh, events, corrupted_indices = \
pd_parser.simulate_pd_data(n_events=n_events, n_secs_on=n_secs_on,
prop_corrupted=prop_corrupted)
# make fake electrophysiology data
info = mne.create_info(['ch1', 'ch2', 'ch3'], raw.info['sfreq'],
['seeg'] * 3)
raw2 = mne.io.RawArray(np.random.random((3, raw.times.size)) * 1e-6, info)
raw2.info['lowpass'] = raw.info['lowpass'] # these must match to combine
raw.add_channels([raw2])
# bids needs these data fields
raw.info['dig'] = None
raw.info['line_freq'] = 60
# add some offsets to the behavior so it's a bit more realistic
offsets = np.random.randn(n_events) * 0.01
beh['time'] = np.array(beh['time']) + offsets
# save to disk as required by ``pd-parser``
fname = op.join(out_dir, 'sub-1_task-mytask_raw.fif')
raw.save(fname)
###############################################################################
# Find the photodiode events relative to the behavioral timing of interest
#
# This function will use the default parameters to find and align the
# photodiode events, excluding events that were off.
# One percent of the 300 events (3) were corrupted as shown in the plots and
# some were too far off from large offsets that we're going to exclude them.
pd_parser.parse_pd(fname, pd_event_name='Stim On', beh=beh,
pd_ch_names=['pd'], beh_key='time',
max_len=1.5) # none are on longer than 1.5 seconds
###############################################################################
# Find cessations of the photodiode deflections
#
# Another piece of information in the photodiode channel is the cessation of
# the events. Let's find those and add them to the events.
pd_parser.add_pd_off_events(fname, off_event_name='Stim Off', max_len=1.5)
###############################################################################
# Check recovered event lengths and compare to the simulation ground truth
#
# Let's load in the on and off events and plot their difference compared to
# the ``n_secs_on`` event lengths we used to simulate.
# The plot below show the differences between the simulated
# deflection event lengths on the x axis scattered against the
# recovered event lengths on the y axis. The identity line (the line with 1:1
# correspondance) is not shown as it would occlude the plotted data; the
# the lengths are recovered within 1 millisecond. Note that the colors are
# arbitrary and are only used to increase contrast and ease of visualization.
annot, pd_ch_names, beh = _load_data(fname)
raw.set_annotations(annot)
events, event_id = mne.events_from_annotations(raw)
on_events = events[events[:, 2] == event_id['Stim On']]
off_events = events[events[:, 2] == event_id['Stim Off']]
recovered = (off_events[:, 0] - on_events[:, 0]) / raw.info['sfreq']
not_corrupted = [s != 'n/a' for s in beh['pd_parser_sample']]
ground_truth_not_corrupted = n_secs_on[not_corrupted]
fig, ax = plt.subplots()
ax.scatter(ground_truth_not_corrupted, recovered,
s=1, color=cm.rainbow(np.linspace(0, 1, len(recovered))))
ax.set_title('Photodiode offset eventfidelity of recovery')
ax.set_xlabel('ground truth duration (s)')
ax.set_ylabel('recovered duration (s)')
print('Mean difference in the recovered from simulated length is {:.3f} '
'milliseconds'.format(
1000 * abs(ground_truth_not_corrupted - recovered).mean()))
| [
"mne.utils._TempDir",
"mne.create_info",
"pd_parser.parse_pd._load_data",
"pd_parser.add_pd_off_events",
"pd_parser.simulate_pd_data",
"numpy.random.random",
"os.path.join",
"mne.events_from_annotations",
"numpy.array",
"numpy.random.seed",
"matplotlib.pyplot.subplots",
"numpy.random.randn",
... | [((835, 845), 'mne.utils._TempDir', '_TempDir', ([], {}), '()\n', (843, 845), False, 'from mne.utils import _TempDir\n'), ((874, 892), 'numpy.random.seed', 'np.random.seed', (['(29)'], {}), '(29)\n', (888, 892), True, 'import numpy as np\n'), ((1099, 1200), 'pd_parser.simulate_pd_data', 'pd_parser.simulate_pd_data', ([], {'n_events': 'n_events', 'n_secs_on': 'n_secs_on', 'prop_corrupted': 'prop_corrupted'}), '(n_events=n_events, n_secs_on=n_secs_on,\n prop_corrupted=prop_corrupted)\n', (1125, 1200), False, 'import pd_parser\n'), ((1271, 1342), 'mne.create_info', 'mne.create_info', (["['ch1', 'ch2', 'ch3']", "raw.info['sfreq']", "(['seeg'] * 3)"], {}), "(['ch1', 'ch2', 'ch3'], raw.info['sfreq'], ['seeg'] * 3)\n", (1286, 1342), False, 'import mne\n'), ((1828, 1873), 'os.path.join', 'op.join', (['out_dir', '"""sub-1_task-mytask_raw.fif"""'], {}), "(out_dir, 'sub-1_task-mytask_raw.fif')\n", (1835, 1873), True, 'import os.path as op\n'), ((2326, 2439), 'pd_parser.parse_pd', 'pd_parser.parse_pd', (['fname'], {'pd_event_name': '"""Stim On"""', 'beh': 'beh', 'pd_ch_names': "['pd']", 'beh_key': '"""time"""', 'max_len': '(1.5)'}), "(fname, pd_event_name='Stim On', beh=beh, pd_ch_names=[\n 'pd'], beh_key='time', max_len=1.5)\n", (2344, 2439), False, 'import pd_parser\n'), ((2780, 2854), 'pd_parser.add_pd_off_events', 'pd_parser.add_pd_off_events', (['fname'], {'off_event_name': '"""Stim Off"""', 'max_len': '(1.5)'}), "(fname, off_event_name='Stim Off', max_len=1.5)\n", (2807, 2854), False, 'import pd_parser\n'), ((3598, 3615), 'pd_parser.parse_pd._load_data', '_load_data', (['fname'], {}), '(fname)\n', (3608, 3615), False, 'from pd_parser.parse_pd import _load_data\n'), ((3662, 3694), 'mne.events_from_annotations', 'mne.events_from_annotations', (['raw'], {}), '(raw)\n', (3689, 3694), False, 'import mne\n'), ((4006, 4020), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4018, 4020), True, 'import matplotlib.pyplot as plt\n'), ((1696, 1721), 'numpy.random.randn', 'np.random.randn', (['n_events'], {}), '(n_events)\n', (1711, 1721), True, 'import numpy as np\n'), ((1743, 1764), 'numpy.array', 'np.array', (["beh['time']"], {}), "(beh['time'])\n", (1751, 1764), True, 'import numpy as np\n'), ((994, 1020), 'numpy.random.random', 'np.random.random', (['n_events'], {}), '(n_events)\n', (1010, 1020), True, 'import numpy as np\n'), ((1389, 1426), 'numpy.random.random', 'np.random.random', (['(3, raw.times.size)'], {}), '((3, raw.times.size))\n', (1405, 1426), True, 'import numpy as np\n')] |
import unittest
import os
import numpy as np
from astropy import constants
import lal
import matplotlib.pyplot as plt
import bilby
from bilby.core import utils
class TestConstants(unittest.TestCase):
def test_speed_of_light(self):
self.assertEqual(utils.speed_of_light, lal.C_SI)
self.assertLess(
abs(utils.speed_of_light - constants.c.value) / utils.speed_of_light, 1e-16
)
def test_parsec(self):
self.assertEqual(utils.parsec, lal.PC_SI)
self.assertLess(abs(utils.parsec - constants.pc.value) / utils.parsec, 1e-11)
def test_solar_mass(self):
self.assertEqual(utils.solar_mass, lal.MSUN_SI)
self.assertLess(
abs(utils.solar_mass - constants.M_sun.value) / utils.solar_mass, 1e-4
)
def test_radius_of_earth(self):
self.assertEqual(bilby.core.utils.radius_of_earth, lal.REARTH_SI)
self.assertLess(
abs(utils.radius_of_earth - constants.R_earth.value)
/ utils.radius_of_earth,
1e-5,
)
def test_gravitational_constant(self):
self.assertEqual(bilby.core.utils.gravitational_constant, lal.G_SI)
class TestFFT(unittest.TestCase):
def setUp(self):
self.sampling_frequency = 10
def tearDown(self):
del self.sampling_frequency
def test_nfft_sine_function(self):
injected_frequency = 2.7324
duration = 100
times = utils.create_time_series(self.sampling_frequency, duration)
time_domain_strain = np.sin(2 * np.pi * times * injected_frequency + 0.4)
frequency_domain_strain, frequencies = bilby.core.utils.nfft(
time_domain_strain, self.sampling_frequency
)
frequency_at_peak = frequencies[np.argmax(np.abs(frequency_domain_strain))]
self.assertAlmostEqual(injected_frequency, frequency_at_peak, places=1)
def test_nfft_infft(self):
time_domain_strain = np.random.normal(0, 1, 10)
frequency_domain_strain, _ = bilby.core.utils.nfft(
time_domain_strain, self.sampling_frequency
)
new_time_domain_strain = bilby.core.utils.infft(
frequency_domain_strain, self.sampling_frequency
)
self.assertTrue(np.allclose(time_domain_strain, new_time_domain_strain))
class TestInferParameters(unittest.TestCase):
def setUp(self):
def source_function(freqs, a, b, *args, **kwargs):
return None
class TestClass:
def test_method(self, a, b, *args, **kwargs):
pass
class TestClass2:
def test_method(self, freqs, a, b, *args, **kwargs):
pass
self.source1 = source_function
test_obj = TestClass()
self.source2 = test_obj.test_method
test_obj2 = TestClass2()
self.source3 = test_obj2.test_method
def tearDown(self):
del self.source1
del self.source2
def test_args_kwargs_handling(self):
expected = ["a", "b"]
actual = utils.infer_parameters_from_function(self.source1)
self.assertListEqual(expected, actual)
def test_self_handling(self):
expected = ["a", "b"]
actual = utils.infer_args_from_method(self.source2)
self.assertListEqual(expected, actual)
def test_self_handling_method_as_function(self):
expected = ["a", "b"]
actual = utils.infer_parameters_from_function(self.source3)
self.assertListEqual(expected, actual)
class TestTimeAndFrequencyArrays(unittest.TestCase):
def setUp(self):
self.start_time = 1.3
self.sampling_frequency = 5
self.duration = 1.6
self.frequency_array = utils.create_frequency_series(
sampling_frequency=self.sampling_frequency, duration=self.duration
)
self.time_array = utils.create_time_series(
sampling_frequency=self.sampling_frequency,
duration=self.duration,
starting_time=self.start_time,
)
def tearDown(self):
del self.start_time
del self.sampling_frequency
del self.duration
del self.frequency_array
del self.time_array
def test_create_time_array(self):
expected_time_array = np.array([1.3, 1.5, 1.7, 1.9, 2.1, 2.3, 2.5, 2.7])
time_array = utils.create_time_series(
sampling_frequency=self.sampling_frequency,
duration=self.duration,
starting_time=self.start_time,
)
self.assertTrue(np.allclose(expected_time_array, time_array))
def test_create_frequency_array(self):
expected_frequency_array = np.array([0.0, 0.625, 1.25, 1.875, 2.5])
frequency_array = utils.create_frequency_series(
sampling_frequency=self.sampling_frequency, duration=self.duration
)
self.assertTrue(np.allclose(expected_frequency_array, frequency_array))
def test_get_sampling_frequency_from_time_array(self):
(
new_sampling_freq,
_,
) = utils.get_sampling_frequency_and_duration_from_time_array(self.time_array)
self.assertEqual(self.sampling_frequency, new_sampling_freq)
def test_get_sampling_frequency_from_time_array_unequally_sampled(self):
self.time_array[-1] += 0.0001
with self.assertRaises(ValueError):
_, _ = utils.get_sampling_frequency_and_duration_from_time_array(
self.time_array
)
def test_get_duration_from_time_array(self):
_, new_duration = utils.get_sampling_frequency_and_duration_from_time_array(
self.time_array
)
self.assertEqual(self.duration, new_duration)
def test_get_start_time_from_time_array(self):
new_start_time = self.time_array[0]
self.assertEqual(self.start_time, new_start_time)
def test_get_sampling_frequency_from_frequency_array(self):
(
new_sampling_freq,
_,
) = utils.get_sampling_frequency_and_duration_from_frequency_array(
self.frequency_array
)
self.assertEqual(self.sampling_frequency, new_sampling_freq)
def test_get_sampling_frequency_from_frequency_array_unequally_sampled(self):
self.frequency_array[-1] += 0.0001
with self.assertRaises(ValueError):
_, _ = utils.get_sampling_frequency_and_duration_from_frequency_array(
self.frequency_array
)
def test_get_duration_from_frequency_array(self):
(
_,
new_duration,
) = utils.get_sampling_frequency_and_duration_from_frequency_array(
self.frequency_array
)
self.assertEqual(self.duration, new_duration)
def test_consistency_time_array_to_time_array(self):
(
new_sampling_frequency,
new_duration,
) = utils.get_sampling_frequency_and_duration_from_time_array(self.time_array)
new_start_time = self.time_array[0]
new_time_array = utils.create_time_series(
sampling_frequency=new_sampling_frequency,
duration=new_duration,
starting_time=new_start_time,
)
self.assertTrue(np.allclose(self.time_array, new_time_array))
def test_consistency_frequency_array_to_frequency_array(self):
(
new_sampling_frequency,
new_duration,
) = utils.get_sampling_frequency_and_duration_from_frequency_array(
self.frequency_array
)
new_frequency_array = utils.create_frequency_series(
sampling_frequency=new_sampling_frequency, duration=new_duration
)
self.assertTrue(np.allclose(self.frequency_array, new_frequency_array))
def test_illegal_sampling_frequency_and_duration(self):
with self.assertRaises(utils.IllegalDurationAndSamplingFrequencyException):
_ = utils.create_time_series(
sampling_frequency=7.7, duration=1.3, starting_time=0
)
class TestReflect(unittest.TestCase):
def test_in_range(self):
xprime = np.array([0.1, 0.5, 0.9])
x = np.array([0.1, 0.5, 0.9])
self.assertTrue(np.testing.assert_allclose(utils.reflect(xprime), x) is None)
def test_in_one_to_two(self):
xprime = np.array([1.1, 1.5, 1.9])
x = np.array([0.9, 0.5, 0.1])
self.assertTrue(np.testing.assert_allclose(utils.reflect(xprime), x) is None)
def test_in_two_to_three(self):
xprime = np.array([2.1, 2.5, 2.9])
x = np.array([0.1, 0.5, 0.9])
self.assertTrue(np.testing.assert_allclose(utils.reflect(xprime), x) is None)
def test_in_minus_one_to_zero(self):
xprime = np.array([-0.9, -0.5, -0.1])
x = np.array([0.9, 0.5, 0.1])
self.assertTrue(np.testing.assert_allclose(utils.reflect(xprime), x) is None)
def test_in_minus_two_to_minus_one(self):
xprime = np.array([-1.9, -1.5, -1.1])
x = np.array([0.1, 0.5, 0.9])
self.assertTrue(np.testing.assert_allclose(utils.reflect(xprime), x) is None)
class TestLatexPlotFormat(unittest.TestCase):
def setUp(self):
self.x = np.linspace(0, 1)
self.y = np.sin(self.x)
self.filename = "test_plot.png"
def tearDown(self):
if os.path.isfile(self.filename):
os.remove(self.filename)
def test_default(self):
@bilby.core.utils.latex_plot_format
def plot():
fig, ax = plt.subplots()
ax.plot(self.x, self.y)
fig.savefig(self.filename)
plot()
self.assertTrue(os.path.isfile(self.filename))
def test_mathedefault_one(self):
@bilby.core.utils.latex_plot_format
def plot():
fig, ax = plt.subplots()
ax.plot(self.x, self.y)
fig.savefig(self.filename)
plot(BILBY_MATHDEFAULT=1)
self.assertTrue(os.path.isfile(self.filename))
def test_mathedefault_zero(self):
@bilby.core.utils.latex_plot_format
def plot():
fig, ax = plt.subplots()
ax.plot(self.x, self.y)
fig.savefig(self.filename)
plot(BILBY_MATHDEFAULT=0)
self.assertTrue(os.path.isfile(self.filename))
def test_matplotlib_style(self):
@bilby.core.utils.latex_plot_format
def plot():
fig, ax = plt.subplots()
ax.plot(self.x, self.y)
fig.savefig(self.filename)
plot(BILBY_STYLE="fivethirtyeight")
self.assertTrue(os.path.isfile(self.filename))
def test_user_style(self):
@bilby.core.utils.latex_plot_format
def plot():
fig, ax = plt.subplots()
ax.plot(self.x, self.y)
fig.savefig(self.filename)
plot(BILBY_STYLE="test/test.mplstyle")
self.assertTrue(os.path.isfile(self.filename))
class TestUnsortedInterp2d(unittest.TestCase):
def setUp(self):
self.xx = np.linspace(0, 1, 10)
self.yy = np.linspace(0, 1, 10)
self.zz = np.random.random((10, 10))
self.interpolant = bilby.core.utils.UnsortedInterp2d(
self.xx, self.yy, self.zz
)
def tearDown(self):
pass
def test_returns_float_for_floats(self):
self.assertIsInstance(self.interpolant(0.5, 0.5), float)
def test_returns_none_for_floats_outside_range(self):
self.assertIsNone(self.interpolant(0.5, -0.5))
self.assertIsNone(self.interpolant(-0.5, 0.5))
def test_returns_float_for_float_and_array(self):
self.assertIsInstance(
self.interpolant(0.5, np.random.random(10)), np.ndarray
)
self.assertIsInstance(
self.interpolant(np.random.random(10), 0.5), np.ndarray
)
self.assertIsInstance(
self.interpolant(np.random.random(10), np.random.random(10)),
np.ndarray
)
def test_raises_for_mismatched_arrays(self):
with self.assertRaises(ValueError):
self.interpolant(
np.random.random(10), np.random.random(20)
)
def test_returns_fill_in_correct_place(self):
x_data = np.random.random(10)
y_data = np.random.random(10)
x_data[3] = -1
self.assertTrue(np.isnan(self.interpolant(x_data, y_data)[3]))
if __name__ == "__main__":
unittest.main()
| [
"bilby.core.utils.get_sampling_frequency_and_duration_from_time_array",
"bilby.core.utils.infer_args_from_method",
"bilby.core.utils.reflect",
"numpy.array",
"bilby.core.utils.infft",
"numpy.sin",
"unittest.main",
"os.remove",
"numpy.random.random",
"numpy.linspace",
"numpy.random.normal",
"nu... | [((12385, 12400), 'unittest.main', 'unittest.main', ([], {}), '()\n', (12398, 12400), False, 'import unittest\n'), ((1445, 1504), 'bilby.core.utils.create_time_series', 'utils.create_time_series', (['self.sampling_frequency', 'duration'], {}), '(self.sampling_frequency, duration)\n', (1469, 1504), False, 'from bilby.core import utils\n'), ((1535, 1587), 'numpy.sin', 'np.sin', (['(2 * np.pi * times * injected_frequency + 0.4)'], {}), '(2 * np.pi * times * injected_frequency + 0.4)\n', (1541, 1587), True, 'import numpy as np\n'), ((1636, 1702), 'bilby.core.utils.nfft', 'bilby.core.utils.nfft', (['time_domain_strain', 'self.sampling_frequency'], {}), '(time_domain_strain, self.sampling_frequency)\n', (1657, 1702), False, 'import bilby\n'), ((1950, 1976), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(10)'], {}), '(0, 1, 10)\n', (1966, 1976), True, 'import numpy as np\n'), ((2014, 2080), 'bilby.core.utils.nfft', 'bilby.core.utils.nfft', (['time_domain_strain', 'self.sampling_frequency'], {}), '(time_domain_strain, self.sampling_frequency)\n', (2035, 2080), False, 'import bilby\n'), ((2136, 2208), 'bilby.core.utils.infft', 'bilby.core.utils.infft', (['frequency_domain_strain', 'self.sampling_frequency'], {}), '(frequency_domain_strain, self.sampling_frequency)\n', (2158, 2208), False, 'import bilby\n'), ((3039, 3089), 'bilby.core.utils.infer_parameters_from_function', 'utils.infer_parameters_from_function', (['self.source1'], {}), '(self.source1)\n', (3075, 3089), False, 'from bilby.core import utils\n'), ((3219, 3261), 'bilby.core.utils.infer_args_from_method', 'utils.infer_args_from_method', (['self.source2'], {}), '(self.source2)\n', (3247, 3261), False, 'from bilby.core import utils\n'), ((3410, 3460), 'bilby.core.utils.infer_parameters_from_function', 'utils.infer_parameters_from_function', (['self.source3'], {}), '(self.source3)\n', (3446, 3460), False, 'from bilby.core import utils\n'), ((3709, 3810), 'bilby.core.utils.create_frequency_series', 'utils.create_frequency_series', ([], {'sampling_frequency': 'self.sampling_frequency', 'duration': 'self.duration'}), '(sampling_frequency=self.sampling_frequency,\n duration=self.duration)\n', (3738, 3810), False, 'from bilby.core import utils\n'), ((3855, 3982), 'bilby.core.utils.create_time_series', 'utils.create_time_series', ([], {'sampling_frequency': 'self.sampling_frequency', 'duration': 'self.duration', 'starting_time': 'self.start_time'}), '(sampling_frequency=self.sampling_frequency,\n duration=self.duration, starting_time=self.start_time)\n', (3879, 3982), False, 'from bilby.core import utils\n'), ((4271, 4321), 'numpy.array', 'np.array', (['[1.3, 1.5, 1.7, 1.9, 2.1, 2.3, 2.5, 2.7]'], {}), '([1.3, 1.5, 1.7, 1.9, 2.1, 2.3, 2.5, 2.7])\n', (4279, 4321), True, 'import numpy as np\n'), ((4343, 4470), 'bilby.core.utils.create_time_series', 'utils.create_time_series', ([], {'sampling_frequency': 'self.sampling_frequency', 'duration': 'self.duration', 'starting_time': 'self.start_time'}), '(sampling_frequency=self.sampling_frequency,\n duration=self.duration, starting_time=self.start_time)\n', (4367, 4470), False, 'from bilby.core import utils\n'), ((4663, 4703), 'numpy.array', 'np.array', (['[0.0, 0.625, 1.25, 1.875, 2.5]'], {}), '([0.0, 0.625, 1.25, 1.875, 2.5])\n', (4671, 4703), True, 'import numpy as np\n'), ((4730, 4831), 'bilby.core.utils.create_frequency_series', 'utils.create_frequency_series', ([], {'sampling_frequency': 'self.sampling_frequency', 'duration': 'self.duration'}), '(sampling_frequency=self.sampling_frequency,\n duration=self.duration)\n', (4759, 4831), False, 'from bilby.core import utils\n'), ((5058, 5132), 'bilby.core.utils.get_sampling_frequency_and_duration_from_time_array', 'utils.get_sampling_frequency_and_duration_from_time_array', (['self.time_array'], {}), '(self.time_array)\n', (5115, 5132), False, 'from bilby.core import utils\n'), ((5562, 5636), 'bilby.core.utils.get_sampling_frequency_and_duration_from_time_array', 'utils.get_sampling_frequency_and_duration_from_time_array', (['self.time_array'], {}), '(self.time_array)\n', (5619, 5636), False, 'from bilby.core import utils\n'), ((6000, 6089), 'bilby.core.utils.get_sampling_frequency_and_duration_from_frequency_array', 'utils.get_sampling_frequency_and_duration_from_frequency_array', (['self.frequency_array'], {}), '(self.\n frequency_array)\n', (6062, 6089), False, 'from bilby.core import utils\n'), ((6598, 6687), 'bilby.core.utils.get_sampling_frequency_and_duration_from_frequency_array', 'utils.get_sampling_frequency_and_duration_from_frequency_array', (['self.frequency_array'], {}), '(self.\n frequency_array)\n', (6660, 6687), False, 'from bilby.core import utils\n'), ((6901, 6975), 'bilby.core.utils.get_sampling_frequency_and_duration_from_time_array', 'utils.get_sampling_frequency_and_duration_from_time_array', (['self.time_array'], {}), '(self.time_array)\n', (6958, 6975), False, 'from bilby.core import utils\n'), ((7045, 7169), 'bilby.core.utils.create_time_series', 'utils.create_time_series', ([], {'sampling_frequency': 'new_sampling_frequency', 'duration': 'new_duration', 'starting_time': 'new_start_time'}), '(sampling_frequency=new_sampling_frequency,\n duration=new_duration, starting_time=new_start_time)\n', (7069, 7169), False, 'from bilby.core import utils\n'), ((7435, 7524), 'bilby.core.utils.get_sampling_frequency_and_duration_from_frequency_array', 'utils.get_sampling_frequency_and_duration_from_frequency_array', (['self.frequency_array'], {}), '(self.\n frequency_array)\n', (7497, 7524), False, 'from bilby.core import utils\n'), ((7572, 7671), 'bilby.core.utils.create_frequency_series', 'utils.create_frequency_series', ([], {'sampling_frequency': 'new_sampling_frequency', 'duration': 'new_duration'}), '(sampling_frequency=new_sampling_frequency,\n duration=new_duration)\n', (7601, 7671), False, 'from bilby.core import utils\n'), ((8127, 8152), 'numpy.array', 'np.array', (['[0.1, 0.5, 0.9]'], {}), '([0.1, 0.5, 0.9])\n', (8135, 8152), True, 'import numpy as np\n'), ((8165, 8190), 'numpy.array', 'np.array', (['[0.1, 0.5, 0.9]'], {}), '([0.1, 0.5, 0.9])\n', (8173, 8190), True, 'import numpy as np\n'), ((8329, 8354), 'numpy.array', 'np.array', (['[1.1, 1.5, 1.9]'], {}), '([1.1, 1.5, 1.9])\n', (8337, 8354), True, 'import numpy as np\n'), ((8367, 8392), 'numpy.array', 'np.array', (['[0.9, 0.5, 0.1]'], {}), '([0.9, 0.5, 0.1])\n', (8375, 8392), True, 'import numpy as np\n'), ((8533, 8558), 'numpy.array', 'np.array', (['[2.1, 2.5, 2.9]'], {}), '([2.1, 2.5, 2.9])\n', (8541, 8558), True, 'import numpy as np\n'), ((8571, 8596), 'numpy.array', 'np.array', (['[0.1, 0.5, 0.9]'], {}), '([0.1, 0.5, 0.9])\n', (8579, 8596), True, 'import numpy as np\n'), ((8742, 8770), 'numpy.array', 'np.array', (['[-0.9, -0.5, -0.1]'], {}), '([-0.9, -0.5, -0.1])\n', (8750, 8770), True, 'import numpy as np\n'), ((8783, 8808), 'numpy.array', 'np.array', (['[0.9, 0.5, 0.1]'], {}), '([0.9, 0.5, 0.1])\n', (8791, 8808), True, 'import numpy as np\n'), ((8959, 8987), 'numpy.array', 'np.array', (['[-1.9, -1.5, -1.1]'], {}), '([-1.9, -1.5, -1.1])\n', (8967, 8987), True, 'import numpy as np\n'), ((9000, 9025), 'numpy.array', 'np.array', (['[0.1, 0.5, 0.9]'], {}), '([0.1, 0.5, 0.9])\n', (9008, 9025), True, 'import numpy as np\n'), ((9198, 9215), 'numpy.linspace', 'np.linspace', (['(0)', '(1)'], {}), '(0, 1)\n', (9209, 9215), True, 'import numpy as np\n'), ((9233, 9247), 'numpy.sin', 'np.sin', (['self.x'], {}), '(self.x)\n', (9239, 9247), True, 'import numpy as np\n'), ((9324, 9353), 'os.path.isfile', 'os.path.isfile', (['self.filename'], {}), '(self.filename)\n', (9338, 9353), False, 'import os\n'), ((10988, 11009), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(10)'], {}), '(0, 1, 10)\n', (10999, 11009), True, 'import numpy as np\n'), ((11028, 11049), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(10)'], {}), '(0, 1, 10)\n', (11039, 11049), True, 'import numpy as np\n'), ((11068, 11094), 'numpy.random.random', 'np.random.random', (['(10, 10)'], {}), '((10, 10))\n', (11084, 11094), True, 'import numpy as np\n'), ((11122, 11182), 'bilby.core.utils.UnsortedInterp2d', 'bilby.core.utils.UnsortedInterp2d', (['self.xx', 'self.yy', 'self.zz'], {}), '(self.xx, self.yy, self.zz)\n', (11155, 11182), False, 'import bilby\n'), ((12199, 12219), 'numpy.random.random', 'np.random.random', (['(10)'], {}), '(10)\n', (12215, 12219), True, 'import numpy as np\n'), ((12237, 12257), 'numpy.random.random', 'np.random.random', (['(10)'], {}), '(10)\n', (12253, 12257), True, 'import numpy as np\n'), ((2255, 2310), 'numpy.allclose', 'np.allclose', (['time_domain_strain', 'new_time_domain_strain'], {}), '(time_domain_strain, new_time_domain_strain)\n', (2266, 2310), True, 'import numpy as np\n'), ((4538, 4582), 'numpy.allclose', 'np.allclose', (['expected_time_array', 'time_array'], {}), '(expected_time_array, time_array)\n', (4549, 4582), True, 'import numpy as np\n'), ((4874, 4928), 'numpy.allclose', 'np.allclose', (['expected_frequency_array', 'frequency_array'], {}), '(expected_frequency_array, frequency_array)\n', (4885, 4928), True, 'import numpy as np\n'), ((5381, 5455), 'bilby.core.utils.get_sampling_frequency_and_duration_from_time_array', 'utils.get_sampling_frequency_and_duration_from_time_array', (['self.time_array'], {}), '(self.time_array)\n', (5438, 5455), False, 'from bilby.core import utils\n'), ((6365, 6454), 'bilby.core.utils.get_sampling_frequency_and_duration_from_frequency_array', 'utils.get_sampling_frequency_and_duration_from_frequency_array', (['self.frequency_array'], {}), '(self.\n frequency_array)\n', (6427, 6454), False, 'from bilby.core import utils\n'), ((7237, 7281), 'numpy.allclose', 'np.allclose', (['self.time_array', 'new_time_array'], {}), '(self.time_array, new_time_array)\n', (7248, 7281), True, 'import numpy as np\n'), ((7714, 7768), 'numpy.allclose', 'np.allclose', (['self.frequency_array', 'new_frequency_array'], {}), '(self.frequency_array, new_frequency_array)\n', (7725, 7768), True, 'import numpy as np\n'), ((7931, 8010), 'bilby.core.utils.create_time_series', 'utils.create_time_series', ([], {'sampling_frequency': '(7.7)', 'duration': '(1.3)', 'starting_time': '(0)'}), '(sampling_frequency=7.7, duration=1.3, starting_time=0)\n', (7955, 8010), False, 'from bilby.core import utils\n'), ((9367, 9391), 'os.remove', 'os.remove', (['self.filename'], {}), '(self.filename)\n', (9376, 9391), False, 'import os\n'), ((9507, 9521), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (9519, 9521), True, 'import matplotlib.pyplot as plt\n'), ((9636, 9665), 'os.path.isfile', 'os.path.isfile', (['self.filename'], {}), '(self.filename)\n', (9650, 9665), False, 'import os\n'), ((9791, 9805), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (9803, 9805), True, 'import matplotlib.pyplot as plt\n'), ((9939, 9968), 'os.path.isfile', 'os.path.isfile', (['self.filename'], {}), '(self.filename)\n', (9953, 9968), False, 'import os\n'), ((10095, 10109), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (10107, 10109), True, 'import matplotlib.pyplot as plt\n'), ((10243, 10272), 'os.path.isfile', 'os.path.isfile', (['self.filename'], {}), '(self.filename)\n', (10257, 10272), False, 'import os\n'), ((10398, 10412), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (10410, 10412), True, 'import matplotlib.pyplot as plt\n'), ((10557, 10586), 'os.path.isfile', 'os.path.isfile', (['self.filename'], {}), '(self.filename)\n', (10571, 10586), False, 'import os\n'), ((10706, 10720), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (10718, 10720), True, 'import matplotlib.pyplot as plt\n'), ((10868, 10897), 'os.path.isfile', 'os.path.isfile', (['self.filename'], {}), '(self.filename)\n', (10882, 10897), False, 'import os\n'), ((1775, 1806), 'numpy.abs', 'np.abs', (['frequency_domain_strain'], {}), '(frequency_domain_strain)\n', (1781, 1806), True, 'import numpy as np\n'), ((11643, 11663), 'numpy.random.random', 'np.random.random', (['(10)'], {}), '(10)\n', (11659, 11663), True, 'import numpy as np\n'), ((11747, 11767), 'numpy.random.random', 'np.random.random', (['(10)'], {}), '(10)\n', (11763, 11767), True, 'import numpy as np\n'), ((11856, 11876), 'numpy.random.random', 'np.random.random', (['(10)'], {}), '(10)\n', (11872, 11876), True, 'import numpy as np\n'), ((11878, 11898), 'numpy.random.random', 'np.random.random', (['(10)'], {}), '(10)\n', (11894, 11898), True, 'import numpy as np\n'), ((12074, 12094), 'numpy.random.random', 'np.random.random', (['(10)'], {}), '(10)\n', (12090, 12094), True, 'import numpy as np\n'), ((12096, 12116), 'numpy.random.random', 'np.random.random', (['(20)'], {}), '(20)\n', (12112, 12116), True, 'import numpy as np\n'), ((8242, 8263), 'bilby.core.utils.reflect', 'utils.reflect', (['xprime'], {}), '(xprime)\n', (8255, 8263), False, 'from bilby.core import utils\n'), ((8444, 8465), 'bilby.core.utils.reflect', 'utils.reflect', (['xprime'], {}), '(xprime)\n', (8457, 8465), False, 'from bilby.core import utils\n'), ((8648, 8669), 'bilby.core.utils.reflect', 'utils.reflect', (['xprime'], {}), '(xprime)\n', (8661, 8669), False, 'from bilby.core import utils\n'), ((8860, 8881), 'bilby.core.utils.reflect', 'utils.reflect', (['xprime'], {}), '(xprime)\n', (8873, 8881), False, 'from bilby.core import utils\n'), ((9077, 9098), 'bilby.core.utils.reflect', 'utils.reflect', (['xprime'], {}), '(xprime)\n', (9090, 9098), False, 'from bilby.core import utils\n')] |
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
import scipy as sc
import pickle
import os
from . import preprocess
from scipy.sparse import vstack, csr_matrix, csc_matrix, lil_matrix
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import normalize
from . import builders
class Dataset(object):
@staticmethod
def load():
train = pd.read_csv('data/train_final.csv', delimiter='\t')
playlists = pd.read_csv('data/playlists_final.csv', delimiter='\t')
target_playlists = pd.read_csv('data/target_playlists.csv', delimiter='\t')
target_tracks = pd.read_csv('data/target_tracks.csv', delimiter = '\t')
tracks = pd.read_csv('data/tracks_final.csv', delimiter='\t')
return Dataset(train, tracks, playlists, target_tracks, target_playlists)
def __init__(self, train, tracks, playlists, target_tracks, target_playlists):
self.train = train
self.tracks = tracks
self.playlists = playlists
self.target_tracks = target_tracks
self.target_playlists = target_playlists
def _normalize_train_dataset(self):
self.track_to_num = pd.Series(self.tracks.index)
self.track_to_num.index = self.tracks['track_id_tmp']
self.playlist_to_num = pd.Series(self.playlists.index)
self.playlist_to_num.index = self.playlists['playlist_id_tmp']
self.train['track_id'] = self.train['track_id'].apply(lambda x : self.track_to_num[x])
self.train['playlist_id'] = self.train['playlist_id'].apply(lambda x : self.playlist_to_num[x])
def _normalize_tracks(self):
# Convert track id
self.tracks['track_id_tmp'] = self.tracks['track_id']
self.tracks['track_id'] = self.tracks.index
self.num_to_tracks = pd.Series(self.tracks['track_id_tmp'])
self.tracks.tags = self.tracks.tags.apply(lambda s: np.array(eval(s), dtype=int))
# Substitute each bad album (i.e. an illformed album such as -1, None, etc) with the 0 album
def transform_album_1(alb):
ar = eval(alb)
if len(ar) == 0 or (len(ar) > 0 and (ar[0] == None or ar[0] == -1)):
ar = [0]
return ar[0]
self.tracks.album = self.tracks.album.apply(lambda alb: transform_album_1(alb))
# Substitute each 0 album with a brand new album
last_album = self.tracks.album.max()
class AlbumTransformer(object):
def __init__(self, last_album):
self.next_album_id = last_album
def __call__(self, alb):
if alb == 0:
alb = self.next_album_id
self.next_album_id += 1
return alb
# self.tracks.album = self.tracks.album.apply(lambda alb: transform_album_2(alb))
self.tracks.album = self.tracks.album.apply(AlbumTransformer(last_album+1))
def _normalize_playlists(self):
self.playlists['playlist_id_tmp'] = self.playlists['playlist_id']
self.playlists['playlist_id'] = self.playlists.index
self.playlist_to_num = pd.Series(self.playlists.index)
self.playlist_to_num.index = self.playlists['playlist_id_tmp']
def _normalize_target_playlists(self):
# Convert target playlist id
self.target_playlists['playlist_id_tmp'] = self.target_playlists['playlist_id']
self.target_playlists['playlist_id'] = self.target_playlists['playlist_id'].apply(lambda x : self.playlist_to_num[x])
self.target_playlists = self.target_playlists.astype(int)
def _normalize_target_tracks(self):
# Convert target tracks id
self.target_tracks['track_id_tmp'] = self.target_tracks['track_id']
self.target_tracks['track_id'] = self.target_tracks['track_id'].apply(lambda x : self.track_to_num[x])
self.target_tracks = self.target_tracks.astype(int)
def _compute_mappings(self):
# Create a dataframe that maps a playlist to the set of its tracks
self.playlist_tracks = pd.DataFrame(self.train['playlist_id'].drop_duplicates())
self.playlist_tracks.index = self.train['playlist_id'].unique()
self.playlist_tracks['track_ids'] = self.train.groupby('playlist_id').apply(lambda x : x['track_id'].values)
self.playlist_tracks = self.playlist_tracks.sort_values('playlist_id')
# Create a dataframe that maps a track to the set of the playlists it appears into
self.track_playlists = pd.DataFrame(self.train['track_id'].drop_duplicates())
self.track_playlists.index = self.train['track_id'].unique()
self.track_playlists['playlist_ids'] = self.train.groupby('track_id').apply(lambda x : x['playlist_id'].values)
self.track_playlists = self.track_playlists.sort_values('track_id')
def _add_owners(self):
self.tracks['owners'] = self.track_playlists['playlist_ids'].apply(lambda x : self.playlists.loc[x]['owner'].values)
null_owners = self.tracks[~self.tracks.owners.notnull()]
for i in range(len(null_owners)):
self.tracks.set_value(null_owners.track_id.iloc[i], 'owners', np.array([]))
def split_holdout(self, test_size=1, min_playlist_tracks=13):
self.train_orig = self.train.copy()
self.target_tracks_orig = self.target_tracks.copy()
self.target_playlists_orig = self.target_playlists.copy()
self.train, self.test, self.target_playlists, self.target_tracks = train_test_split(self.train, test_size, min_playlist_tracks, target_playlists=self.target_playlists_orig)
self.target_playlists = self.target_playlists.astype(int)
self.target_tracks = self.target_tracks.astype(int)
self.train = self.train.astype(int)
self.test = self.test.astype(int)
def normalize(self):
self._normalize_tracks()
self._normalize_playlists()
self._normalize_train_dataset()
self._normalize_target_tracks()
self._normalize_target_playlists()
self._compute_mappings()
self._add_owners()
def build_urm(self, urm_builder=builders.URMBuilder(norm="no")):
self.urm = urm_builder.build(self)
self.urm = csr_matrix(self.urm)
def evaluate(test, recommendations, should_transform_test=True):
"""
- "test" is:
if should_transform_test == False: a dataframe with columns "playlist_id" and "track_id".
else: a dict with "playlist_id" as key and a list of "track_id" as value.
- "recommendations" is a dataframe with "playlist_id" and "track_id" as numpy.ndarray value.
"""
if should_transform_test:
# Tranform "test" in a dict:
# key: playlist_id
# value: list of track_ids
test_df = preprocess.get_playlist_track_list2(test)
else:
test_df = test
mean_ap = 0
for _,row in recommendations.iterrows():
pl_id = row['playlist_id']
tracks = row['track_ids']
correct = 0
ap = 0
for it, t in enumerate(tracks):
if t in test_df.loc[pl_id]['track_ids']:
correct += 1
ap += correct / (it+1)
if len(tracks) > 0:
ap /= len(tracks)
mean_ap += ap
return mean_ap / len(recommendations)
def train_test_split(train, test_size=0.3, min_playlist_tracks=7, target_playlists=None):
if target_playlists is None:
playlists = train.groupby('playlist_id').count()
else:
playlists = train[train.playlist_id.isin(target_playlists.playlist_id)].groupby('playlist_id').count()
# Only playlists with at least "min_playlist_tracks" tracks are considered.
# If "min_playlists_tracks" = 7, then 28311 out of 45649 playlists in "train" are considered.
to_choose_playlists = playlists[playlists['track_id'] >= min_playlist_tracks].index.values
# Among these playlists, "test_size * len(to_choose_playlists)" distinct playlists are chosen for testing.
# If "test_size" = 0.3, then 8493 playlists are chosen for testing.
# It's a numpy array that contains playlis_ids.
target_playlists = np.random.choice(to_choose_playlists, replace=False, size=int(test_size * len(to_choose_playlists)))
target_tracks = np.array([])
indexes = np.array([])
for p in target_playlists:
# Choose 5 random tracks of such playlist: since we selected playlists with at least "min_playlist_tracks"
# tracks, if "min_playlist_tracks" is at least 5, we are sure to find them.
selected_df = train[train['playlist_id'] == p].sample(5)
selected_tracks = selected_df['track_id'].values
target_tracks = np.union1d(target_tracks, selected_tracks)
indexes = np.union1d(indexes, selected_df.index.values)
test = train.loc[indexes].copy()
train = train.drop(indexes)
return train, test, pd.DataFrame(target_playlists, columns=['playlist_id']), pd.DataFrame(target_tracks, columns=['track_id'])
def dot_with_top(m1, m2, def_rows_g, top=-1, row_group=1, similarity="dot", shrinkage=0.000001, alpha=1):
"""
Produces the product between matrices m1 and m2.
Possible similarities: "dot", "cosine". By default it goes on "dot".
NB: Shrinkage is not implemented...
Code taken from
https://stackoverflow.com/questions/29647326/sparse-matrix-dot-product-keeping-only-n-max-values-per-result-row
and optimized for smart dot products.
"""
m2_transposed = m2.transpose()
l2 = m2.sum(axis=0) # by cols
if top > 0:
final_rows = []
row_id = 0
while row_id < m1.shape[0]:
last_row = row_id + row_group if row_id + row_group <= m1.shape[0] else m1.shape[0]
rows = m1[row_id:last_row]
if rows.count_nonzero() > 0:
if similarity == "cosine-old":
res_rows = cosine_similarity(rows, m2_transposed, dense_output=False)
elif similarity == "cosine":
res_rows = csr_matrix((np.dot(rows,m2) / (np.sqrt(rows.sum(axis=1)) * np.sqrt(l2) + shrinkage)))
elif similarity == "cosine-asym":
res_rows = csr_matrix((np.dot(rows,m2) / (np.power(rows.sum(axis=1),alpha) * np.power(m2.sum(axis=0),(1-alpha)) + shrinkage)))
elif similarity == "dot-old":
res_rows = rows.dot(m2)
else:
res_rows = (np.dot(rows,m2) + shrinkage).toarray()
if res_rows.count_nonzero() > 0:
for res_row in res_rows:
if res_row.nnz > top:
args_ids = np.argsort(res_row.data)[-top:]
data = res_row.data[args_ids]
cols = res_row.indices[args_ids]
final_rows.append(csr_matrix((data, (np.zeros(top), cols)), shape=res_row.shape))
else:
args_ids = np.argsort(res_row.data)[-top:]
data = res_row.data[args_ids]
cols = res_row.indices[args_ids]
final_rows.append(csr_matrix((data, (np.zeros(len(args_ids)), cols)), shape=res_row.shape))
#print("Less than top: {0}".format(len(args_ids)))
#final_rows.append(def_rows_g[0])
else:
print("Add empty 2")
for res_row in res_rows:
final_rows.append(def_rows_g[0])
else:
print("Add empty 3")
final_rows.append(def_rows_g)
row_id += row_group
if row_id % row_group == 0:
print(row_id)
return vstack(final_rows, 'csr')
return m1.dot(m2)
def from_num_to_id(df, row_num, column = 'track_id'):
""" df must have a 'track_id' column """
return df.iloc[row_num][column]
def from_id_to_num(df, tr_id, column='track_id'):
""" df must have a 'track_id' column """
return np.where(df[column].values == tr_id)[0][0]
def from_prediction_matrix_to_dataframe(pred_matrix, dataset, keep_best=5, map_tracks=False):
pred_matrix_csr = pred_matrix.tocsr()
predictions = pd.DataFrame(dataset.target_playlists[:pred_matrix.shape[0]])
predictions.index = dataset.target_playlists['playlist_id'][:pred_matrix.shape[0]]
predictions['track_ids'] = [np.array([]) for i in range(len(predictions))]
for target_row,pl_id in enumerate(dataset.target_playlists.playlist_id[:pred_matrix.shape[0]]):
row_start = pred_matrix_csr.indptr[target_row]
row_end = pred_matrix_csr.indptr[target_row+1]
row_columns = pred_matrix_csr.indices[row_start:row_end]
row_data = pred_matrix_csr.data[row_start:row_end]
best_indexes = row_data.argsort()[::-1][:keep_best]
pred = row_columns[best_indexes]
if map_tracks:
pred = np.array([dataset.num_to_tracks[t] for t in pred])
predictions.loc[pl_id] = predictions.loc[pl_id].set_value('track_ids', pred)
return predictions
def build_id_to_num_map(df, column):
a = pd.Series(np.arange(len(df)))
a.index = df[column]
return a
def build_num_to_id_map(df, column):
a = pd.Series(df[column])
a.index = np.arange(len(df))
return a
| [
"pandas.Series",
"numpy.sqrt",
"numpy.union1d",
"pandas.read_csv",
"scipy.sparse.vstack",
"numpy.where",
"sklearn.metrics.pairwise.cosine_similarity",
"numpy.argsort",
"numpy.array",
"numpy.dot",
"numpy.zeros",
"pandas.DataFrame",
"scipy.sparse.csr_matrix"
] | [((8274, 8286), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (8282, 8286), True, 'import numpy as np\n'), ((8301, 8313), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (8309, 8313), True, 'import numpy as np\n'), ((12314, 12375), 'pandas.DataFrame', 'pd.DataFrame', (['dataset.target_playlists[:pred_matrix.shape[0]]'], {}), '(dataset.target_playlists[:pred_matrix.shape[0]])\n', (12326, 12375), True, 'import pandas as pd\n'), ((13344, 13365), 'pandas.Series', 'pd.Series', (['df[column]'], {}), '(df[column])\n', (13353, 13365), True, 'import pandas as pd\n'), ((425, 476), 'pandas.read_csv', 'pd.read_csv', (['"""data/train_final.csv"""'], {'delimiter': '"""\t"""'}), "('data/train_final.csv', delimiter='\\t')\n", (436, 476), True, 'import pandas as pd\n'), ((497, 552), 'pandas.read_csv', 'pd.read_csv', (['"""data/playlists_final.csv"""'], {'delimiter': '"""\t"""'}), "('data/playlists_final.csv', delimiter='\\t')\n", (508, 552), True, 'import pandas as pd\n'), ((580, 636), 'pandas.read_csv', 'pd.read_csv', (['"""data/target_playlists.csv"""'], {'delimiter': '"""\t"""'}), "('data/target_playlists.csv', delimiter='\\t')\n", (591, 636), True, 'import pandas as pd\n'), ((661, 714), 'pandas.read_csv', 'pd.read_csv', (['"""data/target_tracks.csv"""'], {'delimiter': '"""\t"""'}), "('data/target_tracks.csv', delimiter='\\t')\n", (672, 714), True, 'import pandas as pd\n'), ((734, 786), 'pandas.read_csv', 'pd.read_csv', (['"""data/tracks_final.csv"""'], {'delimiter': '"""\t"""'}), "('data/tracks_final.csv', delimiter='\\t')\n", (745, 786), True, 'import pandas as pd\n'), ((1206, 1234), 'pandas.Series', 'pd.Series', (['self.tracks.index'], {}), '(self.tracks.index)\n', (1215, 1234), True, 'import pandas as pd\n'), ((1328, 1359), 'pandas.Series', 'pd.Series', (['self.playlists.index'], {}), '(self.playlists.index)\n', (1337, 1359), True, 'import pandas as pd\n'), ((1836, 1874), 'pandas.Series', 'pd.Series', (["self.tracks['track_id_tmp']"], {}), "(self.tracks['track_id_tmp'])\n", (1845, 1874), True, 'import pandas as pd\n'), ((3147, 3178), 'pandas.Series', 'pd.Series', (['self.playlists.index'], {}), '(self.playlists.index)\n', (3156, 3178), True, 'import pandas as pd\n'), ((6234, 6254), 'scipy.sparse.csr_matrix', 'csr_matrix', (['self.urm'], {}), '(self.urm)\n', (6244, 6254), False, 'from scipy.sparse import vstack, csr_matrix, csc_matrix, lil_matrix\n'), ((8691, 8733), 'numpy.union1d', 'np.union1d', (['target_tracks', 'selected_tracks'], {}), '(target_tracks, selected_tracks)\n', (8701, 8733), True, 'import numpy as np\n'), ((8752, 8797), 'numpy.union1d', 'np.union1d', (['indexes', 'selected_df.index.values'], {}), '(indexes, selected_df.index.values)\n', (8762, 8797), True, 'import numpy as np\n'), ((8893, 8948), 'pandas.DataFrame', 'pd.DataFrame', (['target_playlists'], {'columns': "['playlist_id']"}), "(target_playlists, columns=['playlist_id'])\n", (8905, 8948), True, 'import pandas as pd\n'), ((8950, 8999), 'pandas.DataFrame', 'pd.DataFrame', (['target_tracks'], {'columns': "['track_id']"}), "(target_tracks, columns=['track_id'])\n", (8962, 8999), True, 'import pandas as pd\n'), ((11824, 11849), 'scipy.sparse.vstack', 'vstack', (['final_rows', '"""csr"""'], {}), "(final_rows, 'csr')\n", (11830, 11849), False, 'from scipy.sparse import vstack, csr_matrix, csc_matrix, lil_matrix\n'), ((12495, 12507), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (12503, 12507), True, 'import numpy as np\n'), ((12115, 12151), 'numpy.where', 'np.where', (['(df[column].values == tr_id)'], {}), '(df[column].values == tr_id)\n', (12123, 12151), True, 'import numpy as np\n'), ((13023, 13073), 'numpy.array', 'np.array', (['[dataset.num_to_tracks[t] for t in pred]'], {}), '([dataset.num_to_tracks[t] for t in pred])\n', (13031, 13073), True, 'import numpy as np\n'), ((5177, 5189), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5185, 5189), True, 'import numpy as np\n'), ((9919, 9977), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['rows', 'm2_transposed'], {'dense_output': '(False)'}), '(rows, m2_transposed, dense_output=False)\n', (9936, 9977), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((10066, 10082), 'numpy.dot', 'np.dot', (['rows', 'm2'], {}), '(rows, m2)\n', (10072, 10082), True, 'import numpy as np\n'), ((10699, 10723), 'numpy.argsort', 'np.argsort', (['res_row.data'], {}), '(res_row.data)\n', (10709, 10723), True, 'import numpy as np\n'), ((11029, 11053), 'numpy.argsort', 'np.argsort', (['res_row.data'], {}), '(res_row.data)\n', (11039, 11053), True, 'import numpy as np\n'), ((10233, 10249), 'numpy.dot', 'np.dot', (['rows', 'm2'], {}), '(rows, m2)\n', (10239, 10249), True, 'import numpy as np\n'), ((10113, 10124), 'numpy.sqrt', 'np.sqrt', (['l2'], {}), '(l2)\n', (10120, 10124), True, 'import numpy as np\n'), ((10481, 10497), 'numpy.dot', 'np.dot', (['rows', 'm2'], {}), '(rows, m2)\n', (10487, 10497), True, 'import numpy as np\n'), ((10915, 10928), 'numpy.zeros', 'np.zeros', (['top'], {}), '(top)\n', (10923, 10928), True, 'import numpy as np\n')] |
import numpy as np
from math import sin, cos, atan2, tan, sqrt, pi
import matplotlib.pyplot as plt
import time
from bdsim.components import TransferBlock, FunctionBlock
from bdsim.graphics import GraphicsBlock
class MultiRotor(TransferBlock):
"""
:blockname:`MULTIROTOR`
.. table::
:align: left
+------------+---------+---------+
| inputs | outputs | states |
+------------+---------+---------+
| 1 | 1 | 16 |
+------------+---------+---------+
| ndarray(4) | dict | |
+------------+---------+---------+
"""
nin = 1
nout = 1
# Flyer2dynamics lovingly coded by <NAME>, first coded 12/4/04
# A simulation of idealised X-4 Flyer II flight dynamics.
# version 2.0 2005 modified to be compatible with latest version of Matlab
# version 3.0 2006 fixed rotation matrix problem
# version 4.0 4/2/10, fixed rotor flapping rotation matrix bug, mirroring
# version 5.0 8/8/11, simplified and restructured
# version 6.0 25/10/13, fixed rotation matrix/inverse wronskian definitions, flapping cross-product bug
#
# New in version 2:
# - Generalised rotor thrust model
# - Rotor flapping model
# - Frame aerodynamic drag model
# - Frame aerodynamic surfaces model
# - Internal motor model
# - Much coolage
#
# Version 1.3
# - Rigid body dynamic model
# - Rotor gyroscopic model
# - External motor model
#
# ARGUMENTS
# u Reference inputs 1x4
# tele Enable telemetry (1 or 0) 1x1
# crash Enable crash detection (1 or 0) 1x1
# init Initial conditions 1x12
#
# INPUTS
# u = [N S E W]
# NSEW motor commands 1x4
#
# CONTINUOUS STATES
# z Position 3x1 (x,y,z)
# v Velocity 3x1 (xd,yd,zd)
# n Attitude 3x1 (Y,P,R)
# o Angular velocity 3x1 (wx,wy,wz)
# w Rotor angular velocity 4x1
#
# Notes: z-axis downward so altitude is -z(3)
#
# CONTINUOUS STATE MATRIX MAPPING
# x = [z1 z2 z3 n1 n2 n3 z1 z2 z3 o1 o2 o3 w1 w2 w3 w4]
#
#
# CONTINUOUS STATE EQUATIONS
# z` = v
# v` = g*e3 - (1/m)*T*R*e3
# I*o` = -o X I*o + G + torq
# R = f(n)
# n` = inv(W)*o
#
def __init__(self, model, groundcheck=True, speedcheck=True, x0=None, **blockargs):
r"""
Create a multi-rotor dynamic model block.
:param model: A dictionary of vehicle geometric and inertial properties
:type model: dict
:param groundcheck: Prevent vehicle moving below ground :math:`z>0`, defaults to True
:type groundcheck: bool
:param speedcheck: Check for non-positive rotor speed, defaults to True
:type speedcheck: bool
:param x0: Initial state, defaults to None
:type x0: array_like(6) or array_like(12), optional
:param blockargs: |BlockOptions|
:type blockargs: dict
:return: a MULTIROTOR block
:rtype: MultiRotor instance
Dynamic model of a multi-rotor flying robot, includes rotor flapping.
**Block ports**
:input ω: a vector of input rotor speeds in (radians/sec). These are,
looking down, clockwise from the front rotor which lies on the x-axis.
:output x: a dictionary signal with the following items:
- ``x`` pose in the world frame as :math:`[x, y, z, \theta_Y, \theta_P, \theta_R]`
- ``vb`` translational velocity in the world frame (metres/sec)
- ``w`` angular rates in the world frame as yaw-pitch-roll rates (radians/second)
- ``a1s`` longitudinal flapping angles (radians)
- ``b1s`` lateral flapping angles (radians)
**Model parameters**
The dynamic model is a dict with the following key/value pairs.
=========== ==========================================
key description
=========== ==========================================
``nrotors`` Number of rotors (even integer)
``J`` Flyer rotational inertia matrix (3x3)
``h`` Height of rotors above CoG
``d`` Length of flyer arms
``nb`` Number of blades per rotor
``r`` Rotor radius
``c`` Blade chord
``e`` Flapping hinge offset
``Mb`` Rotor blade mass
``Mc`` Estimated hub clamp mass
``ec`` Blade root clamp displacement
``Ib`` Rotor blade rotational inertia
``Ic`` Estimated root clamp inertia
``mb`` Static blade moment
``Ir`` Total rotor inertia
``Ct`` Non-dim. thrust coefficient
``Cq`` Non-dim. torque coefficient
``sigma`` Rotor solidity ratio
``thetat`` Blade tip angle
``theta0`` Blade root angle
``theta1`` Blade twist angle
``theta75`` 3/4 blade angle
``thetai`` Blade ideal root approximation
``a`` Lift slope gradient
``A`` Rotor disc area
``gamma`` Lock number
=========== ==========================================
.. note::
- SI units are used.
- Based on MATLAB code developed by <NAME> 2004.
:References:
- Design, Construction and Control of a Large Quadrotor micro air vehicle.
P.Pounds, `PhD thesis <https://openresearch-repository.anu.edu.au/handle/1885/146543>`_
Australian National University, 2007.
:seealso: :class:`MultiRotorMixer` :class:`MultiRotorPlot`
"""
if model is None:
raise ValueError('no model provided')
super().__init__(nin=1, nout=1, **blockargs)
self.type = 'quadrotor'
try:
nrotors = model['nrotors']
except KeyError:
raise RuntimeError('vehicle model does not contain nrotors')
assert nrotors % 2 == 0, 'Must have an even number of rotors'
self.nstates = 12
if x0 is None:
x0 = np.zeros((self.nstates,))
else:
x0 = np.r_[x0]
if len(x0) == 6:
# assume all derivative are zero
x0 = np.r_[x0, np.zeros((6,))]
elif len(x0) == 4:
# assume x,y,z,yaw
x0 = np.r_[x0[:3], 0, 0, x0[3], np.zeros((6,))]
elif len(x0) == 3:
# assume x,y,z
x0 = np.r_[x0[:3], np.zeros((9,))]
elif len(x0) != self.nstates:
raise ValueError("x0 is the wrong length")
self._x0 = x0
self.nrotors = nrotors
self.model = model
self.groundcheck = groundcheck
self.speedcheck = speedcheck
self.D = np.zeros((3,self.nrotors))
self.theta = np.zeros((self.nrotors,))
for i in range(0, self.nrotors):
theta = i / self.nrotors * 2 * pi
# Di Rotor hub displacements (1x3)
# first rotor is on the x-axis, clockwise order looking down from above
self.D[:,i] = np.r_[ model['d'] * cos(theta), model['d'] * sin(theta), model['h']]
self.theta[i] = theta
self.a1s = np.zeros((self.nrotors,))
self.b1s = np.zeros((self.nrotors,))
def output(self, t=None):
model = self.model
# compute output vector as a function of state vector
# z Position 3x1 (x,y,z)
# v Velocity 3x1 (xd,yd,zd)
# n Attitude 3x1 (Y,P,R)
# o Angular velocity 3x1 (Yd,Pd,Rd)
n = self._x[3:6] # RPY angles
phi = n[0] # yaw
the = n[1] # pitch
psi = n[2] # roll
# rotz(phi)*roty(the)*rotx(psi)
# BBF > Inertial rotation matrix
R = np.array([
[cos(the) * cos(phi), sin(psi) * sin(the) * cos(phi) - cos(psi) * sin(phi), cos(psi) * sin(the) * cos(phi) + sin(psi) * sin(phi)],
[cos(the) * sin(phi), sin(psi) * sin(the) * sin(phi) + cos(psi) * cos(phi), cos(psi) * sin(the) * sin(phi) - sin(psi) * cos(phi)],
[-sin(the), sin(psi) * cos(the), cos(psi) * cos(the)]
])
#inverted Wronskian
iW = np.array([
[0, sin(psi), cos(psi)],
[0, cos(psi) * cos(the), -sin(psi) * cos(the)],
[cos(the), sin(psi) * sin(the), cos(psi) * sin(the)]
]) / cos(the)
# return velocity in the body frame
vd = np.linalg.inv(R) @ self._x[6:9] # translational velocity mapped to body frame
rpyd = iW @ self._x[9:12] # RPY rates mapped to body frame
out = {}
out['x'] = self._x[0:6]
out['trans'] = np.r_[self._x[:3], vd]
out['rot'] = np.r_[self._x[3:6], rpyd]
out['a1s'] = self.a1s
out['b1s'] = self.b1s
out['X'] = np.r_[self._x[:6], vd, rpyd]
# sys = [ x(1:6);
# inv(R)*x(7:9); % translational velocity mapped to body frame
# iW*x(10:12)];
return [out]
def deriv(self):
model = self.model
# Body-fixed frame references
# ei Body fixed frame references 3x1
e3 = np.r_[0, 0, 1]
# process inputs
w = self.inputs[0]
if len(w) != self.nrotors:
raise RuntimeError('input vector wrong size')
if self.speedcheck and np.any(w == 0):
# might need to fix this, preculudes aerobatics :(
# mu becomes NaN due to 0/0
raise RuntimeError('quadrotor_dynamics: not defined for zero rotor speed');
# EXTRACT STATES FROM X
z = self._x[0:3] # position in {W}
n = self._x[3:6] # RPY angles {W}
v = self._x[6:9] # velocity in {W}
o = self._x[9:12] # angular velocity in {W}
# PREPROCESS ROTATION AND WRONSKIAN MATRICIES
phi = n[0] # yaw
the = n[1] # pitch
psi = n[2] # roll
# phi = n(1); % yaw
# the = n(2); % pitch
# psi = n(3); % roll
# rotz(phi)*roty(the)*rotx(psi)
# BBF > Inertial rotation matrix
R = np.array([
[cos(the)*cos(phi), sin(psi)*sin(the)*cos(phi)-cos(psi)*sin(phi), cos(psi)*sin(the)*cos(phi)+sin(psi)*sin(phi)],
[cos(the)*sin(phi), sin(psi)*sin(the)*sin(phi)+cos(psi)*cos(phi), cos(psi)*sin(the)*sin(phi)-sin(psi)*cos(phi)],
[-sin(the), sin(psi)*cos(the), cos(psi)*cos(the)]
])
# Manual Construction
# Q3 = [cos(phi) -sin(phi) 0;sin(phi) cos(phi) 0;0 0 1]; % RZ %Rotation mappings
# Q2 = [cos(the) 0 sin(the);0 1 0;-sin(the) 0 cos(the)]; % RY
# Q1 = [1 0 0;0 cos(psi) -sin(psi);0 sin(psi) cos(psi)]; % RX
# R = Q3*Q2*Q1 %Rotation matrix
#
# RZ * RY * RX
# inverted Wronskian
iW = np.array([
[0, sin(psi), cos(psi)],
[0, cos(psi)*cos(the), -sin(psi)*cos(the)],
[cos(the), sin(psi)*sin(the), cos(psi)*sin(the)]
]) / cos(the)
# % rotz(phi)*roty(the)*rotx(psi)
# R = [cos(the)*cos(phi) sin(psi)*sin(the)*cos(phi)-cos(psi)*sin(phi) cos(psi)*sin(the)*cos(phi)+sin(psi)*sin(phi); %BBF > Inertial rotation matrix
# cos(the)*sin(phi) sin(psi)*sin(the)*sin(phi)+cos(psi)*cos(phi) cos(psi)*sin(the)*sin(phi)-sin(psi)*cos(phi);
# -sin(the) sin(psi)*cos(the) cos(psi)*cos(the)];
# iW = [0 sin(psi) cos(psi); %inverted Wronskian
# 0 cos(psi)*cos(the) -sin(psi)*cos(the);
# cos(the) sin(psi)*sin(the) cos(psi)*sin(the)] / cos(the);
# ROTOR MODEL
T = np.zeros((3,4))
Q = np.zeros((3,4))
tau = np.zeros((3,4))
a1s = self.a1s
b1s = self.b1s
for i in range(0, self.nrotors): # for each rotor
# Relative motion
Vr = np.cross(o, self.D[:,i]) + v
mu = sqrt(np.sum(Vr[0:2]**2)) / (abs(w[i]) * model['r']) # Magnitude of mu, planar components
lc = Vr[2] / (abs(w[i]) * model['r']) # Non-dimensionalised normal inflow
li = mu # Non-dimensionalised induced velocity approximation
alphas = atan2(lc, mu)
j = atan2(Vr[1], Vr[0]) # Sideslip azimuth relative to e1 (zero over nose)
J = np.array([
[cos(j), -sin(j)],
[sin(j), cos(j)]
]) # BBF > mu sideslip rotation matrix
# Flapping
beta = np.array([
[((8/3*model['theta0'] + 2 * model['theta1']) * mu - 2 * lc * mu) / (1 - mu**2 / 2)], # Longitudinal flapping
[0] # Lattitudinal flapping (note sign)
])
# sign(w) * (4/3)*((Ct/sigma)*(2*mu*gamma/3/a)/(1+3*e/2/r) + li)/(1+mu^2/2)];
beta = J.T @ beta; # Rotate the beta flapping angles to longitudinal and lateral coordinates.
a1s[i] = beta[0] - 16 / model['gamma'] / abs(w[i]) * o[1]
b1s[i] = beta[1] - 16 / model['gamma'] / abs(w[i]) * o[0]
# Forces and torques
# Rotor thrust, linearised angle approximations
T[:,i] = model['Ct'] * model['rho'] * model['A'] * model['r']**2 * w[i]**2 * \
np.r_[-cos(b1s[i]) * sin(a1s[i]), sin(b1s[i]), -cos(a1s[i])*cos(b1s[i])]
# Rotor drag torque - note that this preserves w[i] direction sign
Q[:,i] = -model['Cq'] * model['rho'] * model['A'] * model['r']**3 * w[i] * abs(w[i]) * e3
tau[:,i] = np.cross(T[:,i], self.D[:,i]) # Torque due to rotor thrust
# print(f"{tau=}")
# print(f"{T=}")
# RIGID BODY DYNAMIC MODEL
dz = v
dn = iW @ o
dv = model['g'] * e3 + R @ np.sum(T, axis=1) / model['M']
do = -np.linalg.inv(model['J']) @ (np.cross(o, model['J'] @ o) + np.sum(tau, axis=1) + np.sum(Q, axis=1)) # row sum of torques
# dv = quad.g*e3 + R*(1/quad.M)*sum(T,2);
# do = inv(quad.J)*(cross(-o,quad.J*o) + sum(tau,2) + sum(Q,2)); %row sum of torques
# vehicle can't fall below ground, remember z is down
if self.groundcheck and z[2] > 0:
z[0] = 0
dz[0] = 0
# # stash the flapping information for plotting
# self.a1s = a1s
# self.b1s = b1s
return np.r_[dz, dn, dv, do] # This is the state derivative vector
# ------------------------------------------------------------------------ #
class MultiRotorMixer(FunctionBlock):
"""
:blockname:`MULTIROTORMIXER`
.. table::
:align: left
+--------+------------+---------+
| inputs | outputs | states |
+--------+------------+---------+
| 4 | 1 | 0 |
+--------+------------+---------+
| float | ndarray(4) | |
+--------+------------+---------+
"""
nin = 4
nout = 1
inlabels = ('𝛕r', '𝛕p', '𝛕y', 'T')
outlabels = ('ω',)
def __init__(self, model=None, wmax=1000, wmin=5, **blockargs):
"""
Create a speed mixer block for a multi-rotor flying vehicle.
:param model: A dictionary of vehicle geometric and inertial properties
:type model: dict
:param maxw: maximum rotor speed in rad/s, defaults to 1000
:type maxw: float
:param minw: minimum rotor speed in rad/s, defaults to 5
:type minw: float
:param blockargs: |BlockOptions|
:type blockargs: dict
:return: a MULTIROTORMIXER block
:rtype: MultiRotorMixer instance
This block converts airframe moments and total thrust into a 1D
array of rotor speeds which can be input to the MULTIROTOR block.
**Block ports**
:input 𝛕r: roll torque
:input 𝛕p: pitch torque
:input 𝛕y: yaw torque
:input T: total thrust
:output ω: 1D array of rotor speeds
**Model parameters**
The model is a dict with the following key/value pairs.
=========== ==========================================
key description
=========== ==========================================
``nrotors`` Number of rotors (even integer)
``h`` Height of rotors above CoG
``d`` Length of flyer arms
``r`` Rotor radius
=========== ==========================================
.. note::
- Based on MATLAB code developed by <NAME> 2004.
:seealso: :class:`MultiRotor` :class:`MultiRotorPlot`
"""
if model is None:
raise ValueError('no model provided')
super().__init__(**blockargs)
self.type = 'multirotormixer'
self.model = model
self.nrotors = model['nrotors']
self.minw = wmin**2
self.maxw = wmax**2
self.theta = np.arange(self.nrotors) / self.nrotors * 2 * np.pi
# build the Nx4 mixer matrix
M = []
s = []
for i in range(self.nrotors):
# roll and pitch coupling
column = np.r_[
-sin(self.theta[i]) * model['d'] * model['b'],
cos(self.theta[i]) * model['d'] * model['b'],
model['k'] if (i % 2) == 0 else -model['k'] ,
-model['b']
]
s.append(1 if (i % 2) == 0 else -1)
M.append(column)
self.M = np.array(M).T
self.Minv = np.linalg.inv(self.M)
self.signs = np.array(s)
def output(self, t):
tau = self.inputs
# mix airframe force/torque to rotor thrusts
w = self.Minv @ tau
# clip the rotor speeds to the range [minw, maxw]
w = np.clip(w, self.minw, self.maxw)
# convert required thrust to rotor speed
w = np.sqrt(w)
# flip the signs of alternating rotors
w = self.signs * w
return [w]
# ------------------------------------------------------------------------ #
class MultiRotorPlot(GraphicsBlock):
"""
:blockname:`MULTIROTORPLOT`
.. table::
:align: left
+--------+---------+---------+
| inputs | outputs | states |
+--------+---------+---------+
| 1 | 0 | 0 |
+--------+---------+---------+
| dict | | |
+--------+---------+---------+
"""
nin = 1
nout = 0
inlabels = ('x',)
# Based on code lovingly coded by <NAME>, first coded 17/4/02
# version 2 2004 added scaling and ground display
# version 3 2010 improved rotor rendering and fixed mirroring bug
# Displays X-4 flyer position and attitude in a 3D plot.
# GREEN ROTOR POINTS NORTH
# BLUE ROTOR POINTS EAST
# PARAMETERS
# s defines the plot size in meters
# swi controls flyer attitude plot; 1 = on, otherwise off.
# INPUTS
# 1 Center X position
# 2 Center Y position
# 3 Center Z position
# 4 Yaw angle in rad
# 5 Pitch angle in rad
# 6 Roll angle in rad
nin = 1
nout = 0
inlabels = ('x',)
def __init__(self, model, scale=[-2, 2, -2, 2, 10], flapscale=1, projection='ortho', **blockargs):
"""
Create a block that displays/animates a multi-rotor flying vehicle.
:param model: A dictionary of vehicle geometric and inertial properties
:type model: dict
:param scale: dimensions of workspace: xmin, xmax, ymin, ymax, zmin, zmax, defaults to [-2,2,-2,2,10]
:type scale: array_like, optional
:param flapscale: exagerate flapping angle by this factor, defaults to 1
:type flapscale: float
:param projection: 3D projection, one of: 'ortho' [default], 'perspective'
:type projection: str
:param blockargs: |BlockOptions|
:type blockargs: dict
:return: a MULTIROTORPLOT block
:rtype: MultiRotorPlot instance
Animate a multi-rotor flying vehicle using Matplotlib graphics. The
rotors are shown as circles and their orientation includes rotor
flapping which can be exagerated by ``flapscale``.
.. figure:: ../../figs/multirotorplot.png
:width: 500px
:alt: example of generated graphic
Example of quad-rotor display.
**Block ports**
:input x: a dictionary signal that includes the item:
- ``x`` pose in the world frame as :math:`[x, y, z, \theta_Y, \theta_P, \theta_R]`
- ``a1s`` rotor flap angle
- ``b1s`` rotor flap angle
**Model parameters**
The model is a dict with the following key/value pairs.
=========== ==========================================
key description
=========== ==========================================
``nrotors`` Number of rotors (even integer)
``h`` Height of rotors above CoG
``d`` Length of flyer arms
``r`` Rotor radius
=========== ==========================================
.. note::
- Based on MATLAB code developed by <NAME> 2004.
:seealso: :class:`MultiRotor` :class:`MultiRotorMixer`
"""
if model is None:
raise ValueError('no model provided')
super().__init__(nin=1, **blockargs)
self.type = 'quadrotorplot'
self.model = model
self.scale = scale
self.nrotors = model['nrotors']
self.projection = projection
self.flapscale = flapscale
def start(self, state):
quad = self.model
# vehicle dimensons
d = quad['d']; # Hub displacement from COG
r = quad['r']; # Rotor radius
#C = np.zeros((3, self.nrotors)) ## WHERE USED?
self.D = np.zeros((3,self.nrotors))
for i in range(0, self.nrotors):
theta = i / self.nrotors * 2 * pi
# Di Rotor hub displacements (1x3)
# first rotor is on the x-axis, clockwise order looking down from above
self.D[:,i] = np.r_[ quad['d'] * cos(theta), quad['d'] * sin(theta), quad['h']]
#draw ground
self.fig = self.create_figure(state)
# no axes in the figure, create a 3D axes
self.ax = self.fig.add_subplot(111, projection='3d', proj_type=self.projection)
# ax.set_aspect('equal')
self.ax.set_xlabel('X')
self.ax.set_ylabel('Y')
self.ax.set_zlabel('-Z (height above ground)')
self.panel = self.ax.text2D(0.05, 0.95, '', transform=self.ax.transAxes,
fontsize=10, family='monospace', verticalalignment='top',
bbox=dict(boxstyle='round', facecolor='white', edgecolor='black'))
# TODO allow user to set maximum height of plot volume
self.ax.set_xlim(self.scale[0], self.scale[1])
self.ax.set_ylim(self.scale[2], self.scale[3])
self.ax.set_zlim(0, self.scale[4])
# plot the ground boundaries and the big cross
self.ax.plot([self.scale[0], self.scale[1]], [self.scale[2], self.scale[3]], [0, 0], 'b-')
self.ax.plot([self.scale[0], self.scale[1]], [self.scale[3], self.scale[2]], [0, 0], 'b-')
self.ax.grid(True)
self.shadow, = self.ax.plot([0, 0], [0, 0], 'k--')
self.groundmark, = self.ax.plot([0], [0], [0], 'kx')
self.arm = []
self.disk = []
for i in range(0, self.nrotors):
h, = self.ax.plot([0], [0], [0])
self.arm.append(h)
if i == 0:
color = 'b-'
else:
color = 'g-'
h, = self.ax.plot([0], [0], [0], color)
self.disk.append(h)
self.a1s = np.zeros((self.nrotors,))
self.b1s = np.zeros((self.nrotors,))
plt.draw()
plt.show(block=False)
super().start()
def step(self, state):
def plot3(h, x, y, z):
h.set_data_3d(x, y, z)
# h.set_data(x, y)
# h.set_3d_properties(np.r_[z])
# READ STATE
z = self.inputs[0]['x'][0:3]
n = self.inputs[0]['x'][3:6]
# TODO, check input dimensions, 12 or 12+2N, deal with flapping
a1s = self.inputs[0]['a1s']
b1s = self.inputs[0]['b1s']
quad = self.model
# vehicle dimensons
d = quad['d'] # Hub displacement from COG
r = quad['r'] # Rotor radius
# PREPROCESS ROTATION MATRIX
phi, the, psi = n # Euler angles
# BBF > Inertial rotation matrix
R = np.array([
[cos(the) * cos(phi), sin(psi) * sin(the) * cos(phi) - cos(psi) * sin(phi), cos(psi) * sin(the) * cos(phi) + sin(psi) * sin(phi)],
[cos(the) * sin(phi), sin(psi) * sin(the) * sin(phi) + cos(psi) * cos(phi), cos(psi) * sin(the) * sin(phi) - sin(psi)* cos(phi)],
[-sin(the), sin(psi)*cos(the), cos(psi) * cos(the)]
])
# Manual Construction
#Q3 = [cos(psi) -sin(psi) 0;sin(psi) cos(psi) 0;0 0 1]; %Rotation mappings
#Q2 = [cos(the) 0 sin(the);0 1 0;-sin(the) 0 cos(the)];
#Q1 = [1 0 0;0 cos(phi) -sin(phi);0 sin(phi) cos(phi)];
#R = Q3*Q2*Q1; %Rotation matrix
# CALCULATE FLYER TIP POSITONS USING COORDINATE FRAME ROTATION
F = np.array([
[1, 0, 0],
[0, -1, 0],
[0, 0, -1]
])
# Draw flyer rotors
theta = np.linspace(0, 2 * pi, 20)
circle = np.zeros((3, 20))
for j, t in enumerate(theta):
circle[:,j] = np.r_[r * sin(t), r * cos(t), 0]
hub = np.zeros((3, self.nrotors))
tippath = np.zeros((3, 20, self.nrotors))
for i in range(0, self.nrotors):
hub[:,i] = F @ (z + R @ self.D[:,i]) # points in the inertial frame
q = self.flapscale # Flapping angle scaling for output display - makes it easier to see what flapping is occurring
# Rotor -> Plot frame
Rr = np.array([
[cos(q * a1s[i]), sin(q * b1s[i]) * sin(q * a1s[i]), cos(q * b1s[i]) * sin(q * a1s[i])],
[0, cos(q * b1s[i]), -sin(q*b1s[i])],
[-sin(q * a1s[i]), sin(q * b1s[i]) * cos(q * a1s[i]), cos(q * b1s[i]) * cos(q * a1s[i])]
])
tippath[:,:,i] = F @ R @ Rr @ circle
plot3(self.disk[i], hub[0,i] + tippath[0,:,i], hub[1,i] + tippath[1,:,i], hub[2,i] + tippath[2,:,i])
# Draw flyer
hub0 = F @ z # centre of vehicle
for i in range(0, self.nrotors):
# line from hub to centre plot3([hub(1,N) hub(1,S)],[hub(2,N) hub(2,S)],[hub(3,N) hub(3,S)],'-b')
plot3(self.arm[i], [hub[0,i], hub0[0]], [hub[1,i], hub0[1]], [hub[2,i], hub0[2]])
# plot a circle at the hub itself
#plot3([hub(1,i)],[hub(2,i)],[hub(3,i)],'o')
# plot the vehicle's centroid on the ground plane
plot3(self.shadow, [z[0], 0], [-z[1], 0], [0, 0])
plot3(self.groundmark, z[0], -z[1], 0)
textstr = f"t={state.t: .2f}\nh={z[2]: .2f}\nγ={n[0]: .2f}"
self.panel.set_text(textstr)
super().step(state=state)
def done(self, block=False, **kwargs):
if self.bd.options.graphics:
plt.show(block=block)
super().done()
if __name__ == "__main__":
from bdsim.blocks.quad_model import quadrotor
# m = MultiRotorMixer(model=quadrotor)
# print(m.M)
# print(m.Minv)
# # print(m.Minv @ [0, 0, 0, -40])
# m.inputs = [0, 0, 0, -40]
# print(m.output(0.0))
# m.inputs = [0, 0, 0, -50]
# print(m.output(0.0))
# m.inputs = [0, 0, 0.1, -40]
# print(m.output(0.0))
# m.inputs = [1, 0, 0, -40]
# print(m.output(0.0))
# m.inputs = [0, 1, 0, -40]
# print(m.output(0.0))
m = MultiRotor(model=quadrotor)
def show(w):
print()
print(w[0]**2 + w[2]**2 - w[1]**2 - w[3]**2)
print(w)
m._x = np.r_[0.0, 0, -4, 0, 0, 0, 0, 0, 0, 0, 0, 0]
m.inputs = [np.r_[w]]
dx = m.deriv()
print('zdd', dx[8])
print('wd', dx[9:12])
m._x = dx
x = m.output()[0]['X']
print('zd', x[8])
print('ypr_dot', x[9:12])
show([800.0, -800, 800, -800])
# tau_y pitch
z = np.sqrt((900**2 + 700**2) /2)
show([900.0, -z, 700, -z])
show([700.0, -z, 900, -z])
# tau_x roll
show([z, -900, z, -700])
show([z, -700, z, -900])
# tau_z roll
show([900, -800, 900, -800])
| [
"numpy.clip",
"numpy.sqrt",
"numpy.cross",
"numpy.any",
"math.cos",
"numpy.array",
"numpy.zeros",
"numpy.linalg.inv",
"numpy.linspace",
"math.atan2",
"numpy.sum",
"matplotlib.pyplot.draw",
"math.sin",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((29901, 29935), 'numpy.sqrt', 'np.sqrt', (['((900 ** 2 + 700 ** 2) / 2)'], {}), '((900 ** 2 + 700 ** 2) / 2)\n', (29908, 29935), True, 'import numpy as np\n'), ((7082, 7109), 'numpy.zeros', 'np.zeros', (['(3, self.nrotors)'], {}), '((3, self.nrotors))\n', (7090, 7109), True, 'import numpy as np\n'), ((7130, 7155), 'numpy.zeros', 'np.zeros', (['(self.nrotors,)'], {}), '((self.nrotors,))\n', (7138, 7155), True, 'import numpy as np\n'), ((7541, 7566), 'numpy.zeros', 'np.zeros', (['(self.nrotors,)'], {}), '((self.nrotors,))\n', (7549, 7566), True, 'import numpy as np\n'), ((7586, 7611), 'numpy.zeros', 'np.zeros', (['(self.nrotors,)'], {}), '((self.nrotors,))\n', (7594, 7611), True, 'import numpy as np\n'), ((12524, 12540), 'numpy.zeros', 'np.zeros', (['(3, 4)'], {}), '((3, 4))\n', (12532, 12540), True, 'import numpy as np\n'), ((12552, 12568), 'numpy.zeros', 'np.zeros', (['(3, 4)'], {}), '((3, 4))\n', (12560, 12568), True, 'import numpy as np\n'), ((12582, 12598), 'numpy.zeros', 'np.zeros', (['(3, 4)'], {}), '((3, 4))\n', (12590, 12598), True, 'import numpy as np\n'), ((18709, 18730), 'numpy.linalg.inv', 'np.linalg.inv', (['self.M'], {}), '(self.M)\n', (18722, 18730), True, 'import numpy as np\n'), ((18752, 18763), 'numpy.array', 'np.array', (['s'], {}), '(s)\n', (18760, 18763), True, 'import numpy as np\n'), ((18969, 19001), 'numpy.clip', 'np.clip', (['w', 'self.minw', 'self.maxw'], {}), '(w, self.minw, self.maxw)\n', (18976, 19001), True, 'import numpy as np\n'), ((19064, 19074), 'numpy.sqrt', 'np.sqrt', (['w'], {}), '(w)\n', (19071, 19074), True, 'import numpy as np\n'), ((23087, 23114), 'numpy.zeros', 'np.zeros', (['(3, self.nrotors)'], {}), '((3, self.nrotors))\n', (23095, 23114), True, 'import numpy as np\n'), ((25042, 25067), 'numpy.zeros', 'np.zeros', (['(self.nrotors,)'], {}), '((self.nrotors,))\n', (25050, 25067), True, 'import numpy as np\n'), ((25087, 25112), 'numpy.zeros', 'np.zeros', (['(self.nrotors,)'], {}), '((self.nrotors,))\n', (25095, 25112), True, 'import numpy as np\n'), ((25134, 25144), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (25142, 25144), True, 'import matplotlib.pyplot as plt\n'), ((25153, 25174), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (25161, 25174), True, 'import matplotlib.pyplot as plt\n'), ((26768, 26813), 'numpy.array', 'np.array', (['[[1, 0, 0], [0, -1, 0], [0, 0, -1]]'], {}), '([[1, 0, 0], [0, -1, 0], [0, 0, -1]])\n', (26776, 26813), True, 'import numpy as np\n'), ((26933, 26959), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * pi)', '(20)'], {}), '(0, 2 * pi, 20)\n', (26944, 26959), True, 'import numpy as np\n'), ((26977, 26994), 'numpy.zeros', 'np.zeros', (['(3, 20)'], {}), '((3, 20))\n', (26985, 26994), True, 'import numpy as np\n'), ((27115, 27142), 'numpy.zeros', 'np.zeros', (['(3, self.nrotors)'], {}), '((3, self.nrotors))\n', (27123, 27142), True, 'import numpy as np\n'), ((27161, 27192), 'numpy.zeros', 'np.zeros', (['(3, 20, self.nrotors)'], {}), '((3, 20, self.nrotors))\n', (27169, 27192), True, 'import numpy as np\n'), ((6341, 6366), 'numpy.zeros', 'np.zeros', (['(self.nstates,)'], {}), '((self.nstates,))\n', (6349, 6366), True, 'import numpy as np\n'), ((9023, 9031), 'math.cos', 'cos', (['the'], {}), '(the)\n', (9026, 9031), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((9099, 9115), 'numpy.linalg.inv', 'np.linalg.inv', (['R'], {}), '(R)\n', (9112, 9115), True, 'import numpy as np\n'), ((10047, 10061), 'numpy.any', 'np.any', (['(w == 0)'], {}), '(w == 0)\n', (10053, 10061), True, 'import numpy as np\n'), ((11849, 11857), 'math.cos', 'cos', (['the'], {}), '(the)\n', (11852, 11857), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((13150, 13163), 'math.atan2', 'atan2', (['lc', 'mu'], {}), '(lc, mu)\n', (13155, 13163), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((13180, 13199), 'math.atan2', 'atan2', (['Vr[1]', 'Vr[0]'], {}), '(Vr[1], Vr[0])\n', (13185, 13199), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((13548, 13659), 'numpy.array', 'np.array', (["[[((8 / 3 * model['theta0'] + 2 * model['theta1']) * mu - 2 * lc * mu) / (1 -\n mu ** 2 / 2)], [0]]"], {}), "([[((8 / 3 * model['theta0'] + 2 * model['theta1']) * mu - 2 * lc *\n mu) / (1 - mu ** 2 / 2)], [0]])\n", (13556, 13659), True, 'import numpy as np\n'), ((14733, 14764), 'numpy.cross', 'np.cross', (['T[:, i]', 'self.D[:, i]'], {}), '(T[:, i], self.D[:, i])\n', (14741, 14764), True, 'import numpy as np\n'), ((18675, 18686), 'numpy.array', 'np.array', (['M'], {}), '(M)\n', (18683, 18686), True, 'import numpy as np\n'), ((28848, 28869), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': 'block'}), '(block=block)\n', (28856, 28869), True, 'import matplotlib.pyplot as plt\n'), ((12765, 12790), 'numpy.cross', 'np.cross', (['o', 'self.D[:, i]'], {}), '(o, self.D[:, i])\n', (12773, 12790), True, 'import numpy as np\n'), ((15011, 15036), 'numpy.linalg.inv', 'np.linalg.inv', (["model['J']"], {}), "(model['J'])\n", (15024, 15036), True, 'import numpy as np\n'), ((15092, 15109), 'numpy.sum', 'np.sum', (['Q'], {'axis': '(1)'}), '(Q, axis=1)\n', (15098, 15109), True, 'import numpy as np\n'), ((12816, 12836), 'numpy.sum', 'np.sum', (['(Vr[0:2] ** 2)'], {}), '(Vr[0:2] ** 2)\n', (12822, 12836), True, 'import numpy as np\n'), ((14966, 14983), 'numpy.sum', 'np.sum', (['T'], {'axis': '(1)'}), '(T, axis=1)\n', (14972, 14983), True, 'import numpy as np\n'), ((15040, 15067), 'numpy.cross', 'np.cross', (['o', "(model['J'] @ o)"], {}), "(o, model['J'] @ o)\n", (15048, 15067), True, 'import numpy as np\n'), ((15070, 15089), 'numpy.sum', 'np.sum', (['tau'], {'axis': '(1)'}), '(tau, axis=1)\n', (15076, 15089), True, 'import numpy as np\n'), ((18120, 18143), 'numpy.arange', 'np.arange', (['self.nrotors'], {}), '(self.nrotors)\n', (18129, 18143), True, 'import numpy as np\n'), ((6517, 6531), 'numpy.zeros', 'np.zeros', (['(6,)'], {}), '((6,))\n', (6525, 6531), True, 'import numpy as np\n'), ((7426, 7436), 'math.cos', 'cos', (['theta'], {}), '(theta)\n', (7429, 7436), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((7451, 7461), 'math.sin', 'sin', (['theta'], {}), '(theta)\n', (7454, 7461), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((8310, 8318), 'math.cos', 'cos', (['the'], {}), '(the)\n', (8313, 8318), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((8321, 8329), 'math.cos', 'cos', (['phi'], {}), '(phi)\n', (8324, 8329), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((8457, 8465), 'math.cos', 'cos', (['the'], {}), '(the)\n', (8460, 8465), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((8468, 8476), 'math.sin', 'sin', (['phi'], {}), '(phi)\n', (8471, 8476), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((8605, 8613), 'math.sin', 'sin', (['the'], {}), '(the)\n', (8608, 8613), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((8625, 8633), 'math.sin', 'sin', (['psi'], {}), '(psi)\n', (8628, 8633), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((8636, 8644), 'math.cos', 'cos', (['the'], {}), '(the)\n', (8639, 8644), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((8679, 8687), 'math.cos', 'cos', (['psi'], {}), '(psi)\n', (8682, 8687), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((8690, 8698), 'math.cos', 'cos', (['the'], {}), '(the)\n', (8693, 8698), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((8807, 8815), 'math.sin', 'sin', (['psi'], {}), '(psi)\n', (8810, 8815), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((8829, 8837), 'math.cos', 'cos', (['psi'], {}), '(psi)\n', (8832, 8837), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((8949, 8957), 'math.cos', 'cos', (['the'], {}), '(the)\n', (8952, 8957), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((10844, 10852), 'math.cos', 'cos', (['the'], {}), '(the)\n', (10847, 10852), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((10853, 10861), 'math.cos', 'cos', (['phi'], {}), '(phi)\n', (10856, 10861), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((10969, 10977), 'math.cos', 'cos', (['the'], {}), '(the)\n', (10972, 10977), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((10978, 10986), 'math.sin', 'sin', (['phi'], {}), '(phi)\n', (10981, 10986), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((11095, 11103), 'math.sin', 'sin', (['the'], {}), '(the)\n', (11098, 11103), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((11113, 11121), 'math.sin', 'sin', (['psi'], {}), '(psi)\n', (11116, 11121), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((11122, 11130), 'math.cos', 'cos', (['the'], {}), '(the)\n', (11125, 11130), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((11159, 11167), 'math.cos', 'cos', (['psi'], {}), '(psi)\n', (11162, 11167), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((11168, 11176), 'math.cos', 'cos', (['the'], {}), '(the)\n', (11171, 11176), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((11646, 11654), 'math.sin', 'sin', (['psi'], {}), '(psi)\n', (11649, 11654), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((11665, 11673), 'math.cos', 'cos', (['psi'], {}), '(psi)\n', (11668, 11673), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((11780, 11788), 'math.cos', 'cos', (['the'], {}), '(the)\n', (11783, 11788), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((13332, 13338), 'math.cos', 'cos', (['j'], {}), '(j)\n', (13335, 13338), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((13371, 13377), 'math.sin', 'sin', (['j'], {}), '(j)\n', (13374, 13377), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((13380, 13386), 'math.cos', 'cos', (['j'], {}), '(j)\n', (13383, 13386), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((14472, 14483), 'math.sin', 'sin', (['b1s[i]'], {}), '(b1s[i])\n', (14475, 14483), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((23384, 23394), 'math.cos', 'cos', (['theta'], {}), '(theta)\n', (23387, 23394), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((23408, 23418), 'math.sin', 'sin', (['theta'], {}), '(theta)\n', (23411, 23418), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((25973, 25981), 'math.cos', 'cos', (['the'], {}), '(the)\n', (25976, 25981), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((25984, 25992), 'math.cos', 'cos', (['phi'], {}), '(phi)\n', (25987, 25992), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((26123, 26131), 'math.cos', 'cos', (['the'], {}), '(the)\n', (26126, 26131), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((26134, 26142), 'math.sin', 'sin', (['phi'], {}), '(phi)\n', (26137, 26142), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((26271, 26279), 'math.sin', 'sin', (['the'], {}), '(the)\n', (26274, 26279), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((26291, 26299), 'math.sin', 'sin', (['psi'], {}), '(psi)\n', (26294, 26299), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((26300, 26308), 'math.cos', 'cos', (['the'], {}), '(the)\n', (26303, 26308), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((26345, 26353), 'math.cos', 'cos', (['psi'], {}), '(psi)\n', (26348, 26353), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((26356, 26364), 'math.cos', 'cos', (['the'], {}), '(the)\n', (26359, 26364), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((27069, 27075), 'math.sin', 'sin', (['t'], {}), '(t)\n', (27072, 27075), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((27081, 27087), 'math.cos', 'cos', (['t'], {}), '(t)\n', (27084, 27087), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((27540, 27555), 'math.cos', 'cos', (['(q * a1s[i])'], {}), '(q * a1s[i])\n', (27543, 27555), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((27669, 27684), 'math.cos', 'cos', (['(q * b1s[i])'], {}), '(q * b1s[i])\n', (27672, 27684), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((6647, 6661), 'numpy.zeros', 'np.zeros', (['(6,)'], {}), '((6,))\n', (6655, 6661), True, 'import numpy as np\n'), ((8353, 8361), 'math.cos', 'cos', (['phi'], {}), '(phi)\n', (8356, 8361), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((8364, 8372), 'math.cos', 'cos', (['psi'], {}), '(psi)\n', (8367, 8372), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((8375, 8383), 'math.sin', 'sin', (['phi'], {}), '(phi)\n', (8378, 8383), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((8407, 8415), 'math.cos', 'cos', (['phi'], {}), '(phi)\n', (8410, 8415), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((8418, 8426), 'math.sin', 'sin', (['psi'], {}), '(psi)\n', (8421, 8426), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((8429, 8437), 'math.sin', 'sin', (['phi'], {}), '(phi)\n', (8432, 8437), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((8500, 8508), 'math.sin', 'sin', (['phi'], {}), '(phi)\n', (8503, 8508), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((8511, 8519), 'math.cos', 'cos', (['psi'], {}), '(psi)\n', (8514, 8519), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((8522, 8530), 'math.cos', 'cos', (['phi'], {}), '(phi)\n', (8525, 8530), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((8554, 8562), 'math.sin', 'sin', (['phi'], {}), '(phi)\n', (8557, 8562), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((8565, 8573), 'math.sin', 'sin', (['psi'], {}), '(psi)\n', (8568, 8573), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((8576, 8584), 'math.cos', 'cos', (['phi'], {}), '(phi)\n', (8579, 8584), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((8884, 8892), 'math.cos', 'cos', (['psi'], {}), '(psi)\n', (8887, 8892), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((8895, 8903), 'math.cos', 'cos', (['the'], {}), '(the)\n', (8898, 8903), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((8917, 8925), 'math.cos', 'cos', (['the'], {}), '(the)\n', (8920, 8925), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((8959, 8967), 'math.sin', 'sin', (['psi'], {}), '(psi)\n', (8962, 8967), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((8970, 8978), 'math.sin', 'sin', (['the'], {}), '(the)\n', (8973, 8978), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((8981, 8989), 'math.cos', 'cos', (['psi'], {}), '(psi)\n', (8984, 8989), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((8992, 9000), 'math.sin', 'sin', (['the'], {}), '(the)\n', (8995, 9000), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((10881, 10889), 'math.cos', 'cos', (['phi'], {}), '(phi)\n', (10884, 10889), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((10890, 10898), 'math.cos', 'cos', (['psi'], {}), '(psi)\n', (10893, 10898), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((10899, 10907), 'math.sin', 'sin', (['phi'], {}), '(phi)\n', (10902, 10907), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((10927, 10935), 'math.cos', 'cos', (['phi'], {}), '(phi)\n', (10930, 10935), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((10936, 10944), 'math.sin', 'sin', (['psi'], {}), '(psi)\n', (10939, 10944), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((10945, 10953), 'math.sin', 'sin', (['phi'], {}), '(phi)\n', (10948, 10953), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((11006, 11014), 'math.sin', 'sin', (['phi'], {}), '(phi)\n', (11009, 11014), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((11015, 11023), 'math.cos', 'cos', (['psi'], {}), '(psi)\n', (11018, 11023), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((11024, 11032), 'math.cos', 'cos', (['phi'], {}), '(phi)\n', (11027, 11032), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((11052, 11060), 'math.sin', 'sin', (['phi'], {}), '(phi)\n', (11055, 11060), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((11061, 11069), 'math.sin', 'sin', (['psi'], {}), '(psi)\n', (11064, 11069), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((11070, 11078), 'math.cos', 'cos', (['phi'], {}), '(phi)\n', (11073, 11078), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((11719, 11727), 'math.cos', 'cos', (['psi'], {}), '(psi)\n', (11722, 11727), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((11728, 11736), 'math.cos', 'cos', (['the'], {}), '(the)\n', (11731, 11736), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((11748, 11756), 'math.cos', 'cos', (['the'], {}), '(the)\n', (11751, 11756), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((11790, 11798), 'math.sin', 'sin', (['psi'], {}), '(psi)\n', (11793, 11798), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((11799, 11807), 'math.sin', 'sin', (['the'], {}), '(the)\n', (11802, 11807), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((11809, 11817), 'math.cos', 'cos', (['psi'], {}), '(psi)\n', (11812, 11817), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((11818, 11826), 'math.sin', 'sin', (['the'], {}), '(the)\n', (11821, 11826), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((13341, 13347), 'math.sin', 'sin', (['j'], {}), '(j)\n', (13344, 13347), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((14459, 14470), 'math.sin', 'sin', (['a1s[i]'], {}), '(a1s[i])\n', (14462, 14470), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((14498, 14509), 'math.cos', 'cos', (['b1s[i]'], {}), '(b1s[i])\n', (14501, 14509), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((18431, 18449), 'math.cos', 'cos', (['self.theta[i]'], {}), '(self.theta[i])\n', (18434, 18449), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((26016, 26024), 'math.cos', 'cos', (['phi'], {}), '(phi)\n', (26019, 26024), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((26027, 26035), 'math.cos', 'cos', (['psi'], {}), '(psi)\n', (26030, 26035), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((26038, 26046), 'math.sin', 'sin', (['phi'], {}), '(phi)\n', (26041, 26046), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((26070, 26078), 'math.cos', 'cos', (['phi'], {}), '(phi)\n', (26073, 26078), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((26081, 26089), 'math.sin', 'sin', (['psi'], {}), '(psi)\n', (26084, 26089), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((26092, 26100), 'math.sin', 'sin', (['phi'], {}), '(phi)\n', (26095, 26100), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((26166, 26174), 'math.sin', 'sin', (['phi'], {}), '(phi)\n', (26169, 26174), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((26177, 26185), 'math.cos', 'cos', (['psi'], {}), '(psi)\n', (26180, 26185), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((26188, 26196), 'math.cos', 'cos', (['phi'], {}), '(phi)\n', (26191, 26196), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((26220, 26228), 'math.sin', 'sin', (['phi'], {}), '(phi)\n', (26223, 26228), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((26231, 26239), 'math.sin', 'sin', (['psi'], {}), '(psi)\n', (26234, 26239), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((26242, 26250), 'math.cos', 'cos', (['phi'], {}), '(phi)\n', (26245, 26250), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((27558, 27573), 'math.sin', 'sin', (['(q * b1s[i])'], {}), '(q * b1s[i])\n', (27561, 27573), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((27576, 27591), 'math.sin', 'sin', (['(q * a1s[i])'], {}), '(q * a1s[i])\n', (27579, 27591), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((27594, 27609), 'math.cos', 'cos', (['(q * b1s[i])'], {}), '(q * b1s[i])\n', (27597, 27609), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((27612, 27627), 'math.sin', 'sin', (['(q * a1s[i])'], {}), '(q * a1s[i])\n', (27615, 27627), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((27705, 27720), 'math.sin', 'sin', (['(q * b1s[i])'], {}), '(q * b1s[i])\n', (27708, 27720), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((27743, 27758), 'math.sin', 'sin', (['(q * a1s[i])'], {}), '(q * a1s[i])\n', (27746, 27758), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((27760, 27775), 'math.sin', 'sin', (['(q * b1s[i])'], {}), '(q * b1s[i])\n', (27763, 27775), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((27778, 27793), 'math.cos', 'cos', (['(q * a1s[i])'], {}), '(q * a1s[i])\n', (27781, 27793), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((27796, 27811), 'math.cos', 'cos', (['(q * b1s[i])'], {}), '(q * b1s[i])\n', (27799, 27811), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((27814, 27829), 'math.cos', 'cos', (['(q * a1s[i])'], {}), '(q * a1s[i])\n', (27817, 27829), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((6760, 6774), 'numpy.zeros', 'np.zeros', (['(9,)'], {}), '((9,))\n', (6768, 6774), True, 'import numpy as np\n'), ((8331, 8339), 'math.sin', 'sin', (['psi'], {}), '(psi)\n', (8334, 8339), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((8342, 8350), 'math.sin', 'sin', (['the'], {}), '(the)\n', (8345, 8350), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((8385, 8393), 'math.cos', 'cos', (['psi'], {}), '(psi)\n', (8388, 8393), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((8396, 8404), 'math.sin', 'sin', (['the'], {}), '(the)\n', (8399, 8404), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((8478, 8486), 'math.sin', 'sin', (['psi'], {}), '(psi)\n', (8481, 8486), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((8489, 8497), 'math.sin', 'sin', (['the'], {}), '(the)\n', (8492, 8497), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((8532, 8540), 'math.cos', 'cos', (['psi'], {}), '(psi)\n', (8535, 8540), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((8543, 8551), 'math.sin', 'sin', (['the'], {}), '(the)\n', (8546, 8551), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((8906, 8914), 'math.sin', 'sin', (['psi'], {}), '(psi)\n', (8909, 8914), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((10863, 10871), 'math.sin', 'sin', (['psi'], {}), '(psi)\n', (10866, 10871), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((10872, 10880), 'math.sin', 'sin', (['the'], {}), '(the)\n', (10875, 10880), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((10909, 10917), 'math.cos', 'cos', (['psi'], {}), '(psi)\n', (10912, 10917), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((10918, 10926), 'math.sin', 'sin', (['the'], {}), '(the)\n', (10921, 10926), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((10988, 10996), 'math.sin', 'sin', (['psi'], {}), '(psi)\n', (10991, 10996), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((10997, 11005), 'math.sin', 'sin', (['the'], {}), '(the)\n', (11000, 11005), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((11034, 11042), 'math.cos', 'cos', (['psi'], {}), '(psi)\n', (11037, 11042), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((11043, 11051), 'math.sin', 'sin', (['the'], {}), '(the)\n', (11046, 11051), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((11739, 11747), 'math.sin', 'sin', (['psi'], {}), '(psi)\n', (11742, 11747), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((14445, 14456), 'math.cos', 'cos', (['b1s[i]'], {}), '(b1s[i])\n', (14448, 14456), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((14486, 14497), 'math.cos', 'cos', (['a1s[i]'], {}), '(a1s[i])\n', (14489, 14497), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((18368, 18386), 'math.sin', 'sin', (['self.theta[i]'], {}), '(self.theta[i])\n', (18371, 18386), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((25994, 26002), 'math.sin', 'sin', (['psi'], {}), '(psi)\n', (25997, 26002), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((26005, 26013), 'math.sin', 'sin', (['the'], {}), '(the)\n', (26008, 26013), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((26048, 26056), 'math.cos', 'cos', (['psi'], {}), '(psi)\n', (26051, 26056), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((26059, 26067), 'math.sin', 'sin', (['the'], {}), '(the)\n', (26062, 26067), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((26144, 26152), 'math.sin', 'sin', (['psi'], {}), '(psi)\n', (26147, 26152), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((26155, 26163), 'math.sin', 'sin', (['the'], {}), '(the)\n', (26158, 26163), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((26198, 26206), 'math.cos', 'cos', (['psi'], {}), '(psi)\n', (26201, 26206), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n'), ((26209, 26217), 'math.sin', 'sin', (['the'], {}), '(the)\n', (26212, 26217), False, 'from math import sin, cos, atan2, tan, sqrt, pi\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.