code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
# coding:utf-8
import pandas as pd
import numpy as np
import math
from sklearn.tree import DecisionTreeClassifier, _tree
from sklearn.cluster import KMeans
from .utils import fillna, bin_by_splits, to_ndarray, clip
from .utils.decorator import support_dataframe
from .utils.forwardSplit import *
DEFAULT_BINS = 10
DEFAULT_DECIMAL = 4 # 默认小数精度
def MonoMerge(feature, target, n_bins=None, min_samples=10):
'''
:param feature: 待分箱变量
:param target: 目标特征
:param n_bins: 最多分箱数
:param min_sample: 每个分箱最小样本数
:return: numpy array -- 切割点数组
'''
df = pd.DataFrame()
df['feature'] = feature
df['target'] = target
df['feature'] = df['feature'].fillna(-99) #有缺失值则无法分箱
if n_bins is None:
n_bins = DEFAULT_BINS
if min_samples < 1:
min_samples = math.ceil(len(target) * min_samples)
t = forwardSplit(df['feature'], df['target'])
t.fit(sby='woe', minv=0.01, num_split=n_bins-1,min_sample=min_samples,init_split=20)
# 单调分箱失败,采用决策树分2箱
if t.bins is None:
bins = DTMerge(feature, target, n_bins=2, min_samples=min_samples)
return bins
else:
bins = list(t.bins)
bins.pop(0) # 删除分箱两边的边界值
bins.pop(-1) # 删除分箱两边的边界值
# 结果取4位小数
thresholds = np.array(bins)
for i in range(len(thresholds)):
if type(thresholds[i]) == np.float64:
thresholds[i] = round(thresholds[i], DEFAULT_DECIMAL)
return np.sort(thresholds)
def DTMerge(feature, target, nan = -1, n_bins = None, min_samples = 1):
"""Merge by Decision Tree
Args:
feature (array-like)
target (array-like): target will be used to fit decision tree
nan (number): value will be used to fill nan
n_bins (int): n groups that will be merged into
min_samples (int): min number of samples in each leaf nodes
Returns:
array: array of split points
"""
if n_bins is None and min_samples == 1:
n_bins = DEFAULT_BINS
feature = fillna(feature, by = nan)
tree = DecisionTreeClassifier(
min_samples_leaf = min_samples,
max_leaf_nodes = n_bins,
)
tree.fit(feature.reshape((-1, 1)), target)
thresholds = tree.tree_.threshold
thresholds = thresholds[thresholds != _tree.TREE_UNDEFINED]
# 结果取4位小数
for i in range(len(thresholds)):
if type(thresholds[i]) == np.float64:
thresholds[i] = round(thresholds[i],DEFAULT_DECIMAL)
return np.sort(thresholds)
def StepMerge(feature, nan = None, n_bins = None, clip_v = None, clip_std = None, clip_q = None,min_samples = 1):
"""Merge by step
Args:
feature (array-like)
nan (number): value will be used to fill nan
n_bins (int): n groups that will be merged into
clip_v (number | tuple): min/max value of clipping
clip_std (number | tuple): min/max std of clipping
clip_q (number | tuple): min/max quantile of clipping
Returns:
array: split points of feature
"""
if n_bins is None:
n_bins = DEFAULT_BINS
if nan is not None:
feature = fillna(feature, by = nan)
feature = clip(feature, value = clip_v, std = clip_std, quantile = clip_q)
max = np.nanmax(feature)
min = np.nanmin(feature)
step = (max - min) / n_bins
return np.arange(min, max, step)[1:].round(4)
def QuantileMerge(feature, nan = -1, n_bins = None, q = None ,min_samples = 1):
"""Merge by quantile
Args:
feature (array-like)
nan (number): value will be used to fill nan
n_bins (int): n groups that will be merged into
q (array-like): list of percentage split points
Returns:
array: split points of feature
"""
if n_bins is None and q is None:
n_bins = DEFAULT_BINS
if q is None:
step = 1 / n_bins
q = np.arange(0, 1, step)
feature = fillna(feature, by = nan)
splits = np.quantile(feature, q).round(4)
return np.unique(splits)[1:]
def KMeansMerge(feature, target = None, nan = -1, n_bins = None, random_state = 1, min_samples = 1):
"""Merge by KMeans
Args:
feature (array-like)
target (array-like): target will be used to fit kmeans model
nan (number): value will be used to fill nan
n_bins (int): n groups that will be merged into
random_state (int): random state will be used for kmeans model
Returns:
array: split points of feature
"""
if n_bins is None:
n_bins = DEFAULT_BINS
feature = fillna(feature, by = nan)
model = KMeans(
n_clusters = n_bins,
random_state = random_state
)
model.fit(feature.reshape((-1 ,1)), target)
centers = np.sort(model.cluster_centers_.reshape(-1))
l = len(centers) - 1
splits = np.zeros(l)
for i in range(l):
splits[i] = (centers[i] + centers[i+1]) / 2
return splits.round(4)
@support_dataframe(require_target = False)
def merge(feature, target = None, method = 'dt', return_splits = False, **kwargs):
"""merge feature into groups
Args:
feature (array-like)
target (array-like)
method (str): 'dt', 'chi', 'quantile', 'step', 'kmeans' - the strategy to be used to merge feature
return_splits (bool): if needs to return splits
n_bins (int): n groups that will be merged into
Returns:
array: a array of merged label with the same size of feature
array: list of split points
"""
feature = to_ndarray(feature)
method = method.lower()
if method == 'dt':
splits = DTMerge(feature, target, **kwargs)
elif method == 'mono':
splits = MonoMerge(feature, target, **kwargs)
elif method == 'quantile':
splits = QuantileMerge(feature, **kwargs)
elif method == 'step':
splits = StepMerge(feature, **kwargs)
elif method == 'kmeans':
splits = KMeansMerge(feature, target=target, **kwargs)
else:
splits = np.empty(shape=(0,))
if len(splits):
bins = bin_by_splits(feature, splits)
else:
bins = np.zeros(len(feature))
if return_splits:
return bins, splits
# print(splits)
return bins
| [
"pandas.DataFrame",
"numpy.quantile",
"sklearn.cluster.KMeans",
"numpy.empty",
"numpy.unique",
"numpy.zeros",
"numpy.nanmin",
"sklearn.tree.DecisionTreeClassifier",
"numpy.sort",
"numpy.array",
"numpy.arange",
"numpy.nanmax"
] | [((575, 589), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (587, 589), True, 'import pandas as pd\n'), ((2055, 2130), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'min_samples_leaf': 'min_samples', 'max_leaf_nodes': 'n_bins'}), '(min_samples_leaf=min_samples, max_leaf_nodes=n_bins)\n', (2077, 2130), False, 'from sklearn.tree import DecisionTreeClassifier, _tree\n'), ((2483, 2502), 'numpy.sort', 'np.sort', (['thresholds'], {}), '(thresholds)\n', (2490, 2502), True, 'import numpy as np\n'), ((3242, 3260), 'numpy.nanmax', 'np.nanmax', (['feature'], {}), '(feature)\n', (3251, 3260), True, 'import numpy as np\n'), ((3271, 3289), 'numpy.nanmin', 'np.nanmin', (['feature'], {}), '(feature)\n', (3280, 3289), True, 'import numpy as np\n'), ((4597, 4649), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'n_bins', 'random_state': 'random_state'}), '(n_clusters=n_bins, random_state=random_state)\n', (4603, 4649), False, 'from sklearn.cluster import KMeans\n'), ((4822, 4833), 'numpy.zeros', 'np.zeros', (['l'], {}), '(l)\n', (4830, 4833), True, 'import numpy as np\n'), ((1266, 1280), 'numpy.array', 'np.array', (['bins'], {}), '(bins)\n', (1274, 1280), True, 'import numpy as np\n'), ((1458, 1477), 'numpy.sort', 'np.sort', (['thresholds'], {}), '(thresholds)\n', (1465, 1477), True, 'import numpy as np\n'), ((3870, 3891), 'numpy.arange', 'np.arange', (['(0)', '(1)', 'step'], {}), '(0, 1, step)\n', (3879, 3891), True, 'import numpy as np\n'), ((3992, 4009), 'numpy.unique', 'np.unique', (['splits'], {}), '(splits)\n', (4001, 4009), True, 'import numpy as np\n'), ((3947, 3970), 'numpy.quantile', 'np.quantile', (['feature', 'q'], {}), '(feature, q)\n', (3958, 3970), True, 'import numpy as np\n'), ((3334, 3359), 'numpy.arange', 'np.arange', (['min', 'max', 'step'], {}), '(min, max, step)\n', (3343, 3359), True, 'import numpy as np\n'), ((6006, 6026), 'numpy.empty', 'np.empty', ([], {'shape': '(0,)'}), '(shape=(0,))\n', (6014, 6026), True, 'import numpy as np\n')] |
import cv2
import click
import numpy as np
def main():
rgb = cv2.imread("../data/rgb.jpg")
bgrLower = np.array([10, 10, 80])
bgrUpper = np.array([100, 100, 255])
img_mask = cv2.inRange(rgb, bgrLower, bgrUpper)
img_mask = cv2.morphologyEx(img_mask, cv2.MORPH_OPEN, (15, 15))
img_mask[:100, :] = 0
img_mask = cv2.dilate(img_mask, (15, 15))
img_mask = cv2.bitwise_not(img_mask)
cv2.imwrite("../data/mask.png", img_mask)
cv2.waitKey(10)
if __name__ == "__main__":
main() | [
"cv2.bitwise_not",
"cv2.dilate",
"cv2.waitKey",
"cv2.morphologyEx",
"cv2.imwrite",
"cv2.imread",
"numpy.array",
"cv2.inRange"
] | [((67, 96), 'cv2.imread', 'cv2.imread', (['"""../data/rgb.jpg"""'], {}), "('../data/rgb.jpg')\n", (77, 96), False, 'import cv2\n'), ((113, 135), 'numpy.array', 'np.array', (['[10, 10, 80]'], {}), '([10, 10, 80])\n', (121, 135), True, 'import numpy as np\n'), ((151, 176), 'numpy.array', 'np.array', (['[100, 100, 255]'], {}), '([100, 100, 255])\n', (159, 176), True, 'import numpy as np\n'), ((192, 228), 'cv2.inRange', 'cv2.inRange', (['rgb', 'bgrLower', 'bgrUpper'], {}), '(rgb, bgrLower, bgrUpper)\n', (203, 228), False, 'import cv2\n'), ((244, 296), 'cv2.morphologyEx', 'cv2.morphologyEx', (['img_mask', 'cv2.MORPH_OPEN', '(15, 15)'], {}), '(img_mask, cv2.MORPH_OPEN, (15, 15))\n', (260, 296), False, 'import cv2\n'), ((338, 368), 'cv2.dilate', 'cv2.dilate', (['img_mask', '(15, 15)'], {}), '(img_mask, (15, 15))\n', (348, 368), False, 'import cv2\n'), ((384, 409), 'cv2.bitwise_not', 'cv2.bitwise_not', (['img_mask'], {}), '(img_mask)\n', (399, 409), False, 'import cv2\n'), ((414, 455), 'cv2.imwrite', 'cv2.imwrite', (['"""../data/mask.png"""', 'img_mask'], {}), "('../data/mask.png', img_mask)\n", (425, 455), False, 'import cv2\n'), ((460, 475), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (471, 475), False, 'import cv2\n')] |
"""
Distributed evaluating script for 3D shape classification with PipeWork dataset
"""
import argparse
import os
import sys
import time
import json
import random
import pickle
import numpy as np
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(ROOT_DIR)
import torch
import torch.nn as nn
from torchvision import transforms
import torch.distributed as dist
from torch.utils.tensorboard import SummaryWriter
from torch.nn.parallel import DistributedDataParallel
import matplotlib.pyplot as plt
import matplotlib.patheffects as PathEffects
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
import seaborn as sns
from models import build_classification
from datasets import PipeWorkCls
import datasets.data_utils as d_utils
from utils.util import AverageMeter, accuracy, str2bool, classification_metrics, plot_CM_wrapper, plot_wrongly_predicted_point_clouds, save_fig, fashion_scatter
from utils.logger import setup_logger
from utils.config import config, update_config
def parse_option():
parser = argparse.ArgumentParser('PipeWork classification evaluating')
parser.add_argument('--cfg', type=str, required=True, help='config file')
parser.add_argument('--load_path', required=True, type=str, metavar='PATH',
help='path to latest checkpoint')
parser.add_argument('--loss', type=str, default='smooth', help='loss types, e.g., smooth or ce or wce or sqrt_ce')
parser.add_argument("--use_avg_max_pool", type=str2bool, default=False, help='whether to apply avg and max pooling globally for the classification, need concat them.')
parser.add_argument('--log_dir', type=str, default='log_eval', help='log dir [default: log_eval]')
parser.add_argument('--data_root', type=str, default='data', help='root director of dataset')
parser.add_argument("--data_aug", type=str2bool, default=True, help='whether to apply data augmentation')
parser.add_argument('--num_workers', type=int, default=4, help='num of workers to use')
parser.add_argument('--batch_size', type=int, help='batch_size')
parser.add_argument('--num_points', type=int, help='num_points')
parser.add_argument("--local_rank", type=int, help='local rank for DistributedDataParallel')
parser.add_argument("--rng_seed", type=int, default=0, help='manual seed')
# SE module
parser.add_argument('--SE_squeeze_type', type=str, default='avg', help='squeeze types for SE, e.g., avg or max')
parser.add_argument('--SE_excitation_type', type=str, default='sigmoid', help='excitation types for SE, e.g., sigmoid, relu or tanh')
# plot t-SNE figure; Note: the t-SNE figure is not very good (less separated) compared w. the effect of t-SNE on MINIST
parser.add_argument("--tsne", type=str2bool, default=False, help='whether to plot t-SNE figure on learned global features')
args, unparsed = parser.parse_known_args()
update_config(args.cfg)
config.data_root = args.data_root
config.use_avg_max_pool = args.use_avg_max_pool
config.loss = args.loss
config.data_aug = args.data_aug
config.num_workers = args.num_workers
config.load_path = args.load_path
config.rng_seed = args.rng_seed
config.local_rank = args.local_rank
# SE module
config.SE_squeeze_type = args.SE_squeeze_type
config.SE_excitation_type = args.SE_excitation_type
# t-SNE figure
config.tsne = args.tsne
ddir_name = args.cfg.split('.')[-2].split('/')[-1]
# Note: different folder name from the training log (i.e., ckpt folder for training)
if config.data_aug:
config.log_dir = os.path.join(args.log_dir, 'pipework', f'{ddir_name}_{int(time.time())}_DA')
else:
config.log_dir = os.path.join(args.log_dir, 'pipework', f'{ddir_name}_{int(time.time())}_no_DA')
if args.batch_size:
config.batch_size = args.batch_size
if args.num_points:
config.num_points = args.num_points
print(args)
print(config)
# torch.manual_seed(args.rng_seed)
# torch.cuda.manual_seed_all(args.rng_seed)
# random.seed(args.rng_seed)
# np.random.seed(args.rng_seed)
return args, config
def get_loader(args):
test_transforms = transforms.Compose([
d_utils.PointcloudToTensor()
])
test_dataset = PipeWorkCls(input_features_dim=config.input_features_dim, num_points=args.num_points,
data_root=args.data_root, transforms=test_transforms,
subsampling_parameter=config.sampleDl,
split='test')
test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset, shuffle=False)
test_loader = torch.utils.data.DataLoader(test_dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.num_workers,
pin_memory=True,
sampler=test_sampler,
drop_last=False)
return test_loader
def load_checkpoint(config, model):
logger.info("=> loading checkpoint '{}'".format(config.load_path))
checkpoint = torch.load(config.load_path, map_location='cpu')
config.start_epoch = checkpoint['epoch'] + 1
model.load_state_dict(checkpoint['model'])
logger.info("=> loaded successfully '{}' (epoch {})".format(config.load_path, checkpoint['epoch']))
del checkpoint
torch.cuda.empty_cache()
def main(config,path=None):
test_loader = get_loader(config)
n_data = len(test_loader.dataset) # 934 instances in the test set
logger.info(f"length of testing dataset: {n_data}")
model, criterion = build_classification(config) # use a label smoothing CE loss
model.cuda()
criterion.cuda()
model = DistributedDataParallel(model, device_ids=[config.local_rank], broadcast_buffers=False)
# resume from a checkpoint to validate
if config.load_path:
assert os.path.isfile(config.load_path)
load_checkpoint(config, model)
logger.info("==> checking loaded ckpt")
validate(test_loader, model, criterion, config, path, num_votes=10)
def get_avg_global_features(points, mask, features, model):
"""obtain the avg pooling global features
Args:
points ([type]): points_batch
mask ([type]): points mask
features ([type]): point features
model ([type]): the loaded model
Returns:
[type]: return the avg global features
"""
with torch.no_grad():
output = None
def hook_func(module_, input_, output_):
nonlocal output
output = output_
# the model's name(pool_avg) is determined by the classifer definition
hook = model.module.classifier.pool_avg.register_forward_hook(hook_func)
model(points, mask, features) # (B,num_classes)
hook.remove()
return output
def get_max_global_features(points, mask, features, model):
"""obtain the max pooling global features
Args:
points ([type]): points_batch
mask ([type]): points mask
features ([type]): point features
model ([type]): the loaded model
Returns:
[type]: return the avg global features
"""
with torch.no_grad():
output = None
def hook_func(module_, input_, output_):
nonlocal output
output = output_
# the model's name(pool_avg) is determined by the classifer definition
hook = model.module.classifier.pool_max.register_forward_hook(hook_func)
model(points, mask, features) # (B,num_classes)
hook.remove()
return output
def validate(test_loader, model, criterion, config, path=None, num_votes=10):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
model.eval()
with torch.no_grad():
end = time.time()
vote_preds = None
TS = d_utils.BatchPointcloudScaleAndJitter(scale_low=config.scale_low,
scale_high=config.scale_high,
std=config.noise_std,
clip=config.noise_clip)
for v in range(num_votes):
preds = []
targets = []
global_features=[]
for idx, (points, mask, features, target) in enumerate(test_loader):
# augment for voting
if v > 0 and config.data_aug:
points = TS(points)
if config.input_features_dim == 3:
features = points
features = features.transpose(1, 2).contiguous()
elif config.input_features_dim == 4:
features = torch.ones(size=(points.shape[0], points.shape[1], 1), dtype=torch.float32)
features = torch.cat([features, points], -1)
features = features.transpose(1, 2).contiguous()
else:
raise NotImplementedError(
f"input_features_dim {config.input_features_dim} in voting not supported")
# forward
points = points.cuda(non_blocking=True) # (B,N,3)
mask = mask.cuda(non_blocking=True) # (B,N)
features = features.cuda(non_blocking=True) # (B,3,N)
target = target.cuda(non_blocking=True) # (B,)
# when t-sne, then collect global features (either avg or both)
if config.tsne:
# obtained global features
global_feature_avg = get_avg_global_features(points, mask, features, model)
if config.use_avg_max_pool:
global_feature_max = get_avg_global_features(points, mask, features, model)
global_feature = torch.cat((global_feature_max, global_feature_avg),dim=1) # Bx2Cx1
else:
global_feature = global_feature_avg
global_features.append(global_feature)
pred = model(points, mask, features) # (B,num_classes)
target = target.view(-1)
loss = criterion(pred, target)
acc1 = accuracy(pred, target, topk=(1,)) # array
# no need to compute average accuracy for each batch since many category acc are 0.
# acc, avg_class_acc = classification_metrics(pred.cpu().numpy(), target.cpu().numpy(), num_classes=config.num_classes)
losses.update(loss.item(), points.size(0))
top1.update(acc1[0].item(), points.size(0))
preds.append(pred)
targets.append(target)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if idx % config.print_freq == 0:
logger.info(
f'Test: [{idx}/{len(test_loader)}]\t'
f'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
f'Loss {losses.val:.4f} ({losses.avg:.4f})\t'
f'Acc@1 {top1.val:.3%} ({top1.avg:.3%})')
logger.info(f' * Acc@1 {top1.avg:.3%}')
top1.reset()
preds = torch.cat(preds, 0)
targets = torch.cat(targets, 0)
if vote_preds is None:
vote_preds = preds
else:
# sum all logits for voting predictions
vote_preds += preds
vote_acc1 = accuracy(vote_preds, targets, topk=(1,))[0].item()
logger.info(f' * Vote{v} Acc@1 {vote_acc1:.3%}')
_, vote_avg_acc = classification_metrics(vote_preds.cpu().numpy(), targets.cpu().numpy(), num_classes=config.num_classes)
logger.info(f' * Vote{v} avg acc {vote_avg_acc:.3%}')
# ouput more eval metrics(precision, recall, etc) and confusion matrix in the last voting
if v==num_votes-1:
# precision, recall, f1-score, etc.
logger.info(f' * More evaluation metrics of Vote{v}:')
label_to_names = {0: 'BlindFlange', 1: 'Cross', 2: 'Elbow90', 3: 'Elbownon90', 4: 'Flange', 5: 'FlangeWN', 6: 'Olet', 7: 'OrificeFlange', 8: 'Pipe', 9: 'ReducerCONC', 10: 'ReducerECC', 11: 'ReducerInsert', 12: 'SafetyValve', 13: 'Strainer', 14: 'Tee', 15: 'TeeRED', 16: 'Valve'}
target_names = list(label_to_names.values())
y_true = targets.cpu().numpy()
y_pred = np.argmax(vote_preds.cpu().numpy(), -1)
cls_report = classification_report(
y_true,
y_pred,
target_names=target_names,
digits=4)
logger.info(f'\n{cls_report}')
"""
# draw t-SNE figure for the global features
# the generated figure is not good as t-SNE figure is not very well sperated as the MINIST t-SNE effect
# still the legend can not be generated, see plot/images for results
"""
if config.tsne:
if path:
# path log_eval/pipework/pseudo_grid_1629165562/config.json
save_path = os.path.join(*path.split('/')[:-1])
else:
save_path = os.path.join('images')
global_features = torch.cat(global_features, 0) # (N, 4608 =2304*2)
# dump the global features
filename = os.path.join(save_path, f'global_features_test.pkl')
with open(filename, 'wb') as f:
pickle.dump((global_features.cpu().numpy(), targets.cpu().numpy()), f)
# plot t-SNE for all categories
time_start = time.time()
pipework_tsne = TSNE(random_state=123).fit_transform(global_features.cpu().numpy())
logger.info('t-SNE done! Time elapsed: {} seconds'.format(time.time()-time_start))
fashion_scatter(pipework_tsne, targets.cpu().numpy())
save_fig(save_path,f'tSNE_pipework', tight_layout=True)
# plot t-SNE for selected categories
# Frequent classes: [0,2,3,4,5,9,14,15,16]
# Misclassified classes: [2,3,4,5,9,10,14,15]
frequent_classes = [0,2,3,4,5,9,14,15,16]
mask_all = np.zeros(targets.shape[0]).astype(bool)
for item in frequent_classes:
mask_all = mask_all | (targets.cpu().numpy()==item)
global_features_filtered = global_features.cpu().numpy()[mask_all] # [N'=873, 4608]
targets_filtered = targets.cpu().numpy()[mask_all] # [N'=873]
time_start = time.time()
pipework_tsne = TSNE(random_state=123).fit_transform(global_features_filtered)
logger.info('t-SNE done! Time elapsed: {} seconds'.format(time.time()-time_start))
fashion_scatter(pipework_tsne, targets_filtered, True)
save_fig(save_path,f'tSNE_pipework_filtered', tight_layout=True)
# plot confusion matrix
if path:
# path log_eval/pipework/pseudo_grid_1629165562/config.json
save_path = os.path.join(*path.split('/')[:-1])
C = confusion_matrix(
y_true,
y_pred,
np.arange(config.num_classes))
# plot 3 figures, 1 default style and seaborn style w or w/o percents
plot_CM_wrapper(
C,y_true,y_pred,
label_to_names,save_path,
filename='CM_seaborn',
figsize=(12,12),
fmt='0.1f')
logger.info("Confusion matrix saved to {}".format(save_path))
# plot wrongly predicted examples for error analysis
indices_wrong = plot_wrongly_predicted_point_clouds(
y_true,
y_pred,
test_loader,
save_path,
label_to_names,
filename='wrongly_predicted_point_clouds',
sampling_ratio=0.1)
logger.info("{} wrong predictions!".format(len(indices_wrong)))
logger.info("The indices of test set which are predicted wrongly are: {}".format(indices_wrong))
return vote_acc1, vote_avg_acc
if __name__ == "__main__":
opt, config = parse_option()
torch.cuda.set_device(config.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True
os.makedirs(opt.log_dir, exist_ok=True)
os.environ["JOB_LOAD_DIR"] = os.path.dirname(config.load_path)
logger = setup_logger(output=config.log_dir, distributed_rank=dist.get_rank(), name="pipework_eval")
if dist.get_rank() == 0:
path = os.path.join(config.log_dir, "config.json")
with open(path, 'w') as f:
json.dump(vars(opt), f, indent=2)
json.dump(vars(config), f, indent=2)
os.system('cp %s %s' % (opt.cfg, config.log_dir))
logger.info("Full config saved to {}".format(path))
main(config, path) | [
"argparse.ArgumentParser",
"torch.cat",
"sklearn.metrics.classification_report",
"os.path.isfile",
"datasets.data_utils.BatchPointcloudScaleAndJitter",
"numpy.arange",
"torch.no_grad",
"utils.util.AverageMeter",
"os.path.join",
"sys.path.append",
"torch.ones",
"os.path.abspath",
"utils.util.... | [((262, 287), 'os.path.dirname', 'os.path.dirname', (['BASE_DIR'], {}), '(BASE_DIR)\n', (277, 287), False, 'import os\n'), ((288, 313), 'sys.path.append', 'sys.path.append', (['ROOT_DIR'], {}), '(ROOT_DIR)\n', (303, 313), False, 'import sys\n'), ((224, 249), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (239, 249), False, 'import os\n'), ((1154, 1215), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""PipeWork classification evaluating"""'], {}), "('PipeWork classification evaluating')\n", (1177, 1215), False, 'import argparse\n'), ((3018, 3041), 'utils.config.update_config', 'update_config', (['args.cfg'], {}), '(args.cfg)\n', (3031, 3041), False, 'from utils.config import config, update_config\n'), ((4392, 4593), 'datasets.PipeWorkCls', 'PipeWorkCls', ([], {'input_features_dim': 'config.input_features_dim', 'num_points': 'args.num_points', 'data_root': 'args.data_root', 'transforms': 'test_transforms', 'subsampling_parameter': 'config.sampleDl', 'split': '"""test"""'}), "(input_features_dim=config.input_features_dim, num_points=args.\n num_points, data_root=args.data_root, transforms=test_transforms,\n subsampling_parameter=config.sampleDl, split='test')\n", (4403, 4593), False, 'from datasets import PipeWorkCls\n'), ((4704, 4780), 'torch.utils.data.distributed.DistributedSampler', 'torch.utils.data.distributed.DistributedSampler', (['test_dataset'], {'shuffle': '(False)'}), '(test_dataset, shuffle=False)\n', (4751, 4780), False, 'import torch\n'), ((4799, 4978), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': 'args.num_workers', 'pin_memory': '(True)', 'sampler': 'test_sampler', 'drop_last': '(False)'}), '(test_dataset, batch_size=args.batch_size,\n shuffle=False, num_workers=args.num_workers, pin_memory=True, sampler=\n test_sampler, drop_last=False)\n', (4826, 4978), False, 'import torch\n'), ((5397, 5445), 'torch.load', 'torch.load', (['config.load_path'], {'map_location': '"""cpu"""'}), "(config.load_path, map_location='cpu')\n", (5407, 5445), False, 'import torch\n'), ((5671, 5695), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (5693, 5695), False, 'import torch\n'), ((5913, 5941), 'models.build_classification', 'build_classification', (['config'], {}), '(config)\n', (5933, 5941), False, 'from models import build_classification\n'), ((6025, 6116), 'torch.nn.parallel.DistributedDataParallel', 'DistributedDataParallel', (['model'], {'device_ids': '[config.local_rank]', 'broadcast_buffers': '(False)'}), '(model, device_ids=[config.local_rank],\n broadcast_buffers=False)\n', (6048, 6116), False, 'from torch.nn.parallel import DistributedDataParallel\n'), ((8037, 8051), 'utils.util.AverageMeter', 'AverageMeter', ([], {}), '()\n', (8049, 8051), False, 'from utils.util import AverageMeter, accuracy, str2bool, classification_metrics, plot_CM_wrapper, plot_wrongly_predicted_point_clouds, save_fig, fashion_scatter\n'), ((8065, 8079), 'utils.util.AverageMeter', 'AverageMeter', ([], {}), '()\n', (8077, 8079), False, 'from utils.util import AverageMeter, accuracy, str2bool, classification_metrics, plot_CM_wrapper, plot_wrongly_predicted_point_clouds, save_fig, fashion_scatter\n'), ((8091, 8105), 'utils.util.AverageMeter', 'AverageMeter', ([], {}), '()\n', (8103, 8105), False, 'from utils.util import AverageMeter, accuracy, str2bool, classification_metrics, plot_CM_wrapper, plot_wrongly_predicted_point_clouds, save_fig, fashion_scatter\n'), ((17248, 17288), 'torch.cuda.set_device', 'torch.cuda.set_device', (['config.local_rank'], {}), '(config.local_rank)\n', (17269, 17288), False, 'import torch\n'), ((17293, 17367), 'torch.distributed.init_process_group', 'torch.distributed.init_process_group', ([], {'backend': '"""nccl"""', 'init_method': '"""env://"""'}), "(backend='nccl', init_method='env://')\n", (17329, 17367), False, 'import torch\n'), ((17500, 17539), 'os.makedirs', 'os.makedirs', (['opt.log_dir'], {'exist_ok': '(True)'}), '(opt.log_dir, exist_ok=True)\n', (17511, 17539), False, 'import os\n'), ((17574, 17607), 'os.path.dirname', 'os.path.dirname', (['config.load_path'], {}), '(config.load_path)\n', (17589, 17607), False, 'import os\n'), ((6197, 6229), 'os.path.isfile', 'os.path.isfile', (['config.load_path'], {}), '(config.load_path)\n', (6211, 6229), False, 'import os\n'), ((6743, 6758), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6756, 6758), False, 'import torch\n'), ((7517, 7532), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7530, 7532), False, 'import torch\n'), ((8133, 8148), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8146, 8148), False, 'import torch\n'), ((8164, 8175), 'time.time', 'time.time', ([], {}), '()\n', (8173, 8175), False, 'import time\n'), ((8215, 8360), 'datasets.data_utils.BatchPointcloudScaleAndJitter', 'd_utils.BatchPointcloudScaleAndJitter', ([], {'scale_low': 'config.scale_low', 'scale_high': 'config.scale_high', 'std': 'config.noise_std', 'clip': 'config.noise_clip'}), '(scale_low=config.scale_low,\n scale_high=config.scale_high, std=config.noise_std, clip=config.noise_clip)\n', (8252, 8360), True, 'import datasets.data_utils as d_utils\n'), ((17721, 17736), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (17734, 17736), True, 'import torch.distributed as dist\n'), ((17758, 17801), 'os.path.join', 'os.path.join', (['config.log_dir', '"""config.json"""'], {}), "(config.log_dir, 'config.json')\n", (17770, 17801), False, 'import os\n'), ((4336, 4364), 'datasets.data_utils.PointcloudToTensor', 'd_utils.PointcloudToTensor', ([], {}), '()\n', (4362, 4364), True, 'import datasets.data_utils as d_utils\n'), ((11670, 11689), 'torch.cat', 'torch.cat', (['preds', '(0)'], {}), '(preds, 0)\n', (11679, 11689), False, 'import torch\n'), ((11712, 11733), 'torch.cat', 'torch.cat', (['targets', '(0)'], {}), '(targets, 0)\n', (11721, 11733), False, 'import torch\n'), ((17675, 17690), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (17688, 17690), True, 'import torch.distributed as dist\n'), ((17944, 17993), 'os.system', 'os.system', (["('cp %s %s' % (opt.cfg, config.log_dir))"], {}), "('cp %s %s' % (opt.cfg, config.log_dir))\n", (17953, 17993), False, 'import os\n'), ((10614, 10647), 'utils.util.accuracy', 'accuracy', (['pred', 'target'], {'topk': '(1,)'}), '(pred, target, topk=(1,))\n', (10622, 10647), False, 'from utils.util import AverageMeter, accuracy, str2bool, classification_metrics, plot_CM_wrapper, plot_wrongly_predicted_point_clouds, save_fig, fashion_scatter\n'), ((11202, 11213), 'time.time', 'time.time', ([], {}), '()\n', (11211, 11213), False, 'import time\n'), ((13006, 13080), 'sklearn.metrics.classification_report', 'classification_report', (['y_true', 'y_pred'], {'target_names': 'target_names', 'digits': '(4)'}), '(y_true, y_pred, target_names=target_names, digits=4)\n', (13027, 13080), False, 'from sklearn.metrics import confusion_matrix, classification_report\n'), ((13872, 13901), 'torch.cat', 'torch.cat', (['global_features', '(0)'], {}), '(global_features, 0)\n', (13881, 13901), False, 'import torch\n'), ((14000, 14052), 'os.path.join', 'os.path.join', (['save_path', 'f"""global_features_test.pkl"""'], {}), "(save_path, f'global_features_test.pkl')\n", (14012, 14052), False, 'import os\n'), ((14286, 14297), 'time.time', 'time.time', ([], {}), '()\n', (14295, 14297), False, 'import time\n'), ((14599, 14655), 'utils.util.save_fig', 'save_fig', (['save_path', 'f"""tSNE_pipework"""'], {'tight_layout': '(True)'}), "(save_path, f'tSNE_pipework', tight_layout=True)\n", (14607, 14655), False, 'from utils.util import AverageMeter, accuracy, str2bool, classification_metrics, plot_CM_wrapper, plot_wrongly_predicted_point_clouds, save_fig, fashion_scatter\n'), ((15320, 15331), 'time.time', 'time.time', ([], {}), '()\n', (15329, 15331), False, 'import time\n'), ((15554, 15608), 'utils.util.fashion_scatter', 'fashion_scatter', (['pipework_tsne', 'targets_filtered', '(True)'], {}), '(pipework_tsne, targets_filtered, True)\n', (15569, 15608), False, 'from utils.util import AverageMeter, accuracy, str2bool, classification_metrics, plot_CM_wrapper, plot_wrongly_predicted_point_clouds, save_fig, fashion_scatter\n'), ((15629, 15694), 'utils.util.save_fig', 'save_fig', (['save_path', 'f"""tSNE_pipework_filtered"""'], {'tight_layout': '(True)'}), "(save_path, f'tSNE_pipework_filtered', tight_layout=True)\n", (15637, 15694), False, 'from utils.util import AverageMeter, accuracy, str2bool, classification_metrics, plot_CM_wrapper, plot_wrongly_predicted_point_clouds, save_fig, fashion_scatter\n'), ((16194, 16313), 'utils.util.plot_CM_wrapper', 'plot_CM_wrapper', (['C', 'y_true', 'y_pred', 'label_to_names', 'save_path'], {'filename': '"""CM_seaborn"""', 'figsize': '(12, 12)', 'fmt': '"""0.1f"""'}), "(C, y_true, y_pred, label_to_names, save_path, filename=\n 'CM_seaborn', figsize=(12, 12), fmt='0.1f')\n", (16209, 16313), False, 'from utils.util import AverageMeter, accuracy, str2bool, classification_metrics, plot_CM_wrapper, plot_wrongly_predicted_point_clouds, save_fig, fashion_scatter\n'), ((16618, 16780), 'utils.util.plot_wrongly_predicted_point_clouds', 'plot_wrongly_predicted_point_clouds', (['y_true', 'y_pred', 'test_loader', 'save_path', 'label_to_names'], {'filename': '"""wrongly_predicted_point_clouds"""', 'sampling_ratio': '(0.1)'}), "(y_true, y_pred, test_loader, save_path,\n label_to_names, filename='wrongly_predicted_point_clouds',\n sampling_ratio=0.1)\n", (16653, 16780), False, 'from utils.util import AverageMeter, accuracy, str2bool, classification_metrics, plot_CM_wrapper, plot_wrongly_predicted_point_clouds, save_fig, fashion_scatter\n'), ((3774, 3785), 'time.time', 'time.time', ([], {}), '()\n', (3783, 3785), False, 'import time\n'), ((3886, 3897), 'time.time', 'time.time', ([], {}), '()\n', (3895, 3897), False, 'import time\n'), ((10219, 10277), 'torch.cat', 'torch.cat', (['(global_feature_max, global_feature_avg)'], {'dim': '(1)'}), '((global_feature_max, global_feature_avg), dim=1)\n', (10228, 10277), False, 'import torch\n'), ((11161, 11172), 'time.time', 'time.time', ([], {}), '()\n', (11170, 11172), False, 'import time\n'), ((11940, 11980), 'utils.util.accuracy', 'accuracy', (['vote_preds', 'targets'], {'topk': '(1,)'}), '(vote_preds, targets, topk=(1,))\n', (11948, 11980), False, 'from utils.util import AverageMeter, accuracy, str2bool, classification_metrics, plot_CM_wrapper, plot_wrongly_predicted_point_clouds, save_fig, fashion_scatter\n'), ((13811, 13833), 'os.path.join', 'os.path.join', (['"""images"""'], {}), "('images')\n", (13823, 13833), False, 'import os\n'), ((16052, 16081), 'numpy.arange', 'np.arange', (['config.num_classes'], {}), '(config.num_classes)\n', (16061, 16081), True, 'import numpy as np\n'), ((9090, 9165), 'torch.ones', 'torch.ones', ([], {'size': '(points.shape[0], points.shape[1], 1)', 'dtype': 'torch.float32'}), '(size=(points.shape[0], points.shape[1], 1), dtype=torch.float32)\n', (9100, 9165), False, 'import torch\n'), ((9201, 9234), 'torch.cat', 'torch.cat', (['[features, points]', '(-1)'], {}), '([features, points], -1)\n', (9210, 9234), False, 'import torch\n'), ((14334, 14356), 'sklearn.manifold.TSNE', 'TSNE', ([], {'random_state': '(123)'}), '(random_state=123)\n', (14338, 14356), False, 'from sklearn.manifold import TSNE\n'), ((14935, 14961), 'numpy.zeros', 'np.zeros', (['targets.shape[0]'], {}), '(targets.shape[0])\n', (14943, 14961), True, 'import numpy as np\n'), ((15368, 15390), 'sklearn.manifold.TSNE', 'TSNE', ([], {'random_state': '(123)'}), '(random_state=123)\n', (15372, 15390), False, 'from sklearn.manifold import TSNE\n'), ((14480, 14491), 'time.time', 'time.time', ([], {}), '()\n', (14489, 14491), False, 'import time\n'), ((15509, 15520), 'time.time', 'time.time', ([], {}), '()\n', (15518, 15520), False, 'import time\n')] |
from mpi4py import MPI
from solver import Solver
import torch
import os
import time
import warnings
import datetime
import numpy as np
from tqdm import tqdm
from misc.utils import color, get_fake, get_labels, get_loss_value
from misc.utils import split, TimeNow, to_var
from misc.losses import _compute_loss_smooth, _GAN_LOSS
import torch.utils.data.distributed
from misc.utils import horovod
hvd = horovod()
comm = MPI.COMM_WORLD
warnings.filterwarnings('ignore')
class Train(Solver):
def __init__(self, config, data_loader):
super(Train, self).__init__(config, data_loader)
self.count_seed = 0
self.step_seed = 4 # 1 disc - 3 gen
self.run()
# ============================================================#
# ============================================================#
def update_lr(self, g_lr, d_lr):
for param_group in self.g_optimizer.param_groups:
param_group['lr'] = g_lr
for param_group in self.d_optimizer.param_groups:
param_group['lr'] = d_lr
# ============================================================#
# ============================================================#
def reset_grad(self):
self.g_optimizer.zero_grad()
self.d_optimizer.zero_grad()
# ============================================================#
# ============================================================#
def update_loss(self, loss, value):
try:
self.LOSS[loss].append(value)
except BaseException:
self.LOSS[loss] = []
self.LOSS[loss].append(value)
# ============================================================#
# ============================================================#
def get_labels(self):
return get_labels(
self.config.image_size,
self.config.dataset_fake,
attr=self.data_loader.dataset)
# ============================================================#
# ============================================================#
def debug_vars(self, start):
fixed_x = []
fixed_label = []
for i, (images, labels, _) in enumerate(self.data_loader):
fixed_x.append(images)
fixed_label.append(labels)
if i == max(1, int(16 / self.config.batch_size)):
break
fixed_x = torch.cat(fixed_x, dim=0)
fixed_label = torch.cat(fixed_label, dim=0)
fixed_style = self.random_style(fixed_x, seed=self.count_seed)
if start == 0:
self.generate_SMIT(
fixed_x,
self.output_sample(0, 0),
Multimodal=1,
label=fixed_label,
training=True,
fixed_style=fixed_style)
if self.config.image_size == 256:
self.generate_SMIT(
fixed_x,
self.output_sample(0, 0),
label=fixed_label,
training=True)
return fixed_x, fixed_label, fixed_style
# ============================================================#
# ============================================================#
def _GAN_LOSS(self, real_x, fake_x, label):
cross_entropy = self.config.dataset_fake in [
'painters_14', 'Animals', 'Image2Weather', 'Image2Season',
'Image2Edges', 'RafD', 'BP4D_idt'
# 'Image2Edges', 'Yosemite', 'RafD', 'BP4D_idt'
]
if cross_entropy:
label = torch.max(label, dim=1)[1]
return _GAN_LOSS(
self.D, real_x, fake_x, label, cross_entropy=cross_entropy)
# ============================================================#
# ============================================================#
def INFO(self, epoch, iter):
# PRINT log info
if self.verbose:
if (iter + 1) % self.config.log_step == 0 or iter + epoch == 0:
self.loss = {
key: get_loss_value(value)
for key, value in self.loss.items()
}
color(self.loss, 'Gatm', 'blue')
self.progress_bar.set_postfix(**self.loss)
if (iter + 1) == len(self.data_loader):
self.progress_bar.set_postfix('')
# ============================================================#
# ============================================================#
def Decay_lr(self, current_epoch=0):
self.d_lr -= (
self.config.d_lr /
float(self.config.num_epochs - self.config.num_epochs_decay))
self.g_lr -= (
self.config.g_lr /
float(self.config.num_epochs - self.config.num_epochs_decay))
self.update_lr(self.g_lr, self.d_lr)
if self.verbose and current_epoch % self.config.save_epoch == 0:
self.PRINT('Decay learning rate to g_lr: {}, d_lr: {}.'.format(
self.g_lr, self.d_lr))
# ============================================================#
# ============================================================#
def RESUME_INFO(self):
if not self.config.pretrained_model:
return 0, 0
start = int(self.config.pretrained_model.split('_')[0]) + 1
total_iter = start * int(self.config.pretrained_model.split('_')[1])
self.count_seed = start * total_iter * self.step_seed
for e in range(start):
if e > self.config.num_epochs_decay:
self.Decay_lr(e)
return start, total_iter
# ============================================================#
# ============================================================#
def MISC(self, epoch, iter):
if epoch % self.config.save_epoch == 0 and self.verbose:
# Save Weights
self.save(epoch, iter + 1)
# Save Translation
self.generate_SMIT(
self.fixed_x,
self.output_sample(epoch, iter + 1),
Multimodal=1,
label=self.fixed_label,
training=True,
fixed_style=self.fixed_style)
if self.config.image_size == 256:
self.generate_SMIT(
self.fixed_x,
self.output_sample(epoch, iter + 1),
Multimodal=1,
label=self.fixed_label,
training=True)
if self.config.image_size == 256:
self.generate_SMIT(
self.fixed_x,
self.output_sample(epoch, iter + 1),
label=self.fixed_label,
training=True)
# Debug INFO
elapsed = time.time() - self.start_time
elapsed = str(datetime.timedelta(seconds=elapsed))
log = '-> %s | Elapsed [Iter: %d] (%d/%d) : %s | %s\nTrain' % (
TimeNow(), self.total_iter, epoch, self.config.num_epochs,
elapsed, self.Log)
for tag, value in sorted(self.LOSS.items()):
log += ", {}: {:.4f}".format(tag, np.array(value).mean())
self.PRINT(log)
# self.PLOT(epoch)
comm.Barrier()
# Decay learning rate
if epoch > self.config.num_epochs_decay:
self.Decay_lr(epoch)
# ============================================================#
# ============================================================#
def reset_losses(self):
return {}
# ============================================================#
# ============================================================#
def current_losses(self, mode, **kwargs):
loss = 0
for key, _ in kwargs.items():
if mode in key:
loss += self.loss[key]
self.update_loss(key, get_loss_value(self.loss[key]))
return loss
# ============================================================#
# ============================================================#
def to_var(self, *args):
vars = []
for arg in args:
vars.append(to_var(arg))
return vars
# ============================================================#
# ============================================================#
def train_model(self, generator=False, discriminator=False):
if torch.cuda.device_count() > 1 and hvd.size() == 1:
G = self.G.module
else:
G = self.G
for p in G.generator.parameters():
try:
p.requires_grad_(generator)
except AttributeError:
p.requires_grad = generator
for p in self.D.parameters():
try:
p.requires_grad_(discriminator)
except AttributeError:
p.requires_grad = discriminator
# ============================================================#
# ============================================================#
def Dis_update(self, real_x, real_c, fake_c):
self.train_model(discriminator=True)
real_x, real_c, fake_c = self.to_var(real_x, real_c, fake_c)
style_fake = to_var(self.random_style(real_x, seed=self.count_seed))
self.count_seed += 1
fake_x = self.G(real_x, fake_c, style_fake)[0]
d_loss_src, d_loss_cls = self._GAN_LOSS(real_x, fake_x, real_c)
self.loss['Dsrc'] = d_loss_src
self.loss['Dcls'] = d_loss_cls * self.config.lambda_cls
d_loss = self.current_losses('D', **self.loss)
self.reset_grad()
d_loss.backward()
self.d_optimizer.step()
# ============================================================#
# ============================================================#
def Gen_update(self, real_x, real_c, fake_c):
self.train_model(generator=True)
real_x, real_c, fake_c = self.to_var(real_x, real_c, fake_c)
criterion_l1 = torch.nn.L1Loss()
style_fake = to_var(self.random_style(real_x, seed=self.count_seed))
style_rec = to_var(self.random_style(real_x, seed=self.count_seed + 1))
style_identity = to_var(
self.random_style(real_x, seed=self.count_seed + 2))
self.count_seed += 3
fake_x = self.G(real_x, fake_c, style_fake)
g_loss_src, g_loss_cls = self._GAN_LOSS(fake_x[0], real_x, fake_c)
self.loss['Gsrc'] = g_loss_src
self.loss['Gcls'] = g_loss_cls * self.config.lambda_cls
# REC LOSS
rec_x = self.G(fake_x[0], real_c, style_rec)
g_loss_rec = criterion_l1(rec_x[0], real_x)
self.loss['Grec'] = self.config.lambda_rec * g_loss_rec
# ========== Attention Part ==========#
self.loss['Gatm'] = self.config.lambda_mask * (
torch.mean(rec_x[1]) + torch.mean(fake_x[1]))
self.loss['Gats'] = self.config.lambda_mask_smooth * (
_compute_loss_smooth(rec_x[1]) + _compute_loss_smooth(fake_x[1]))
# ========== Identity Part ==========#
if self.config.Identity:
idt_x = self.G(real_x, real_c, style_identity)[0]
g_loss_idt = criterion_l1(idt_x, real_x)
self.loss['Gidt'] = self.config.lambda_idt * \
g_loss_idt
g_loss = self.current_losses('G', **self.loss)
self.reset_grad()
g_loss.backward()
self.g_optimizer.step()
# ============================================================#
# ============================================================#
def run(self):
# lr cache for decaying
self.g_lr = self.config.g_lr
self.d_lr = self.config.d_lr
self.PRINT('Training with learning rate g_lr: {}, d_lr: {}.'.format(
self.g_optimizer.param_groups[0]['lr'],
self.d_optimizer.param_groups[0]['lr']))
# Start with trained info if exists
start, self.total_iter = self.RESUME_INFO()
# Fixed inputs, target domain labels, and style for debugging
self.fixed_x, self.fixed_label, self.fixed_style = self.debug_vars(
start)
self.PRINT("Current time: " + TimeNow())
self.PRINT("Debug Log txt: " + os.path.realpath(self.config.log.name))
# Log info
# RaGAN uses different data for Dis and Gen
self.Log = self.PRINT_LOG(self.config.batch_size // 2)
self.start_time = time.time()
# Start training
for epoch in range(start, self.config.num_epochs):
self.D.train()
self.G.train()
self.LOSS = {}
desc_bar = '[Iter: %d] Epoch: %d/%d' % (self.total_iter, epoch,
self.config.num_epochs)
epoch_verbose = (epoch % self.config.save_epoch) and epoch != 0
self.progress_bar = tqdm(
enumerate(self.data_loader),
unit_scale=True,
total=len(self.data_loader),
desc=desc_bar,
disable=not self.verbose or epoch_verbose,
ncols=5)
for _iter, (real_x, real_c, _) in self.progress_bar:
self.loss = self.reset_losses()
self.total_iter += 1 * hvd.size()
# RaGAN uses different data for Dis and Gen
real_x0, real_x1 = split(real_x)
real_c0, real_c1 = split(real_c)
fake_c = get_fake(real_c, seed=_iter)
fake_c0, fake_c1 = split(fake_c)
# ============================================================#
# ======================== Train D ===========================#
# ============================================================#
self.Dis_update(real_x0, real_c0, fake_c0)
# ============================================================#
# ======================== Train G ===========================#
# ============================================================#
self.Gen_update(real_x1, real_c1, fake_c1)
# ====================== DEBUG =====================#
self.INFO(epoch, _iter)
# ============================================================#
# ======================= MISCELANEOUS =======================#
# ============================================================#
# Shuffling dataset each epoch
self.data_loader.dataset.shuffle(epoch)
self.MISC(epoch, _iter)
| [
"torch.cat",
"torch.cuda.device_count",
"misc.utils.split",
"misc.utils.color",
"misc.losses._compute_loss_smooth",
"datetime.timedelta",
"torch.mean",
"misc.utils.TimeNow",
"os.path.realpath",
"torch.max",
"misc.utils.to_var",
"misc.utils.get_fake",
"misc.utils.get_loss_value",
"misc.util... | [((413, 422), 'misc.utils.horovod', 'horovod', ([], {}), '()\n', (420, 422), False, 'from misc.utils import horovod\n'), ((447, 480), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (470, 480), False, 'import warnings\n'), ((1850, 1946), 'misc.utils.get_labels', 'get_labels', (['self.config.image_size', 'self.config.dataset_fake'], {'attr': 'self.data_loader.dataset'}), '(self.config.image_size, self.config.dataset_fake, attr=self.\n data_loader.dataset)\n', (1860, 1946), False, 'from misc.utils import color, get_fake, get_labels, get_loss_value\n'), ((2453, 2478), 'torch.cat', 'torch.cat', (['fixed_x'], {'dim': '(0)'}), '(fixed_x, dim=0)\n', (2462, 2478), False, 'import torch\n'), ((2502, 2531), 'torch.cat', 'torch.cat', (['fixed_label'], {'dim': '(0)'}), '(fixed_label, dim=0)\n', (2511, 2531), False, 'import torch\n'), ((3688, 3757), 'misc.losses._GAN_LOSS', '_GAN_LOSS', (['self.D', 'real_x', 'fake_x', 'label'], {'cross_entropy': 'cross_entropy'}), '(self.D, real_x, fake_x, label, cross_entropy=cross_entropy)\n', (3697, 3757), False, 'from misc.losses import _compute_loss_smooth, _GAN_LOSS\n'), ((10263, 10280), 'torch.nn.L1Loss', 'torch.nn.L1Loss', ([], {}), '()\n', (10278, 10280), False, 'import torch\n'), ((12770, 12781), 'time.time', 'time.time', ([], {}), '()\n', (12779, 12781), False, 'import time\n'), ((3645, 3668), 'torch.max', 'torch.max', (['label'], {'dim': '(1)'}), '(label, dim=1)\n', (3654, 3668), False, 'import torch\n'), ((4247, 4279), 'misc.utils.color', 'color', (['self.loss', '"""Gatm"""', '"""blue"""'], {}), "(self.loss, 'Gatm', 'blue')\n", (4252, 4279), False, 'from misc.utils import color, get_fake, get_labels, get_loss_value\n'), ((6937, 6948), 'time.time', 'time.time', ([], {}), '()\n', (6946, 6948), False, 'import time\n'), ((6994, 7029), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'elapsed'}), '(seconds=elapsed)\n', (7012, 7029), False, 'import datetime\n'), ((8388, 8399), 'misc.utils.to_var', 'to_var', (['arg'], {}), '(arg)\n', (8394, 8399), False, 'from misc.utils import split, TimeNow, to_var\n'), ((8640, 8665), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (8663, 8665), False, 'import torch\n'), ((11123, 11143), 'torch.mean', 'torch.mean', (['rec_x[1]'], {}), '(rec_x[1])\n', (11133, 11143), False, 'import torch\n'), ((11146, 11167), 'torch.mean', 'torch.mean', (['fake_x[1]'], {}), '(fake_x[1])\n', (11156, 11167), False, 'import torch\n'), ((11246, 11276), 'misc.losses._compute_loss_smooth', '_compute_loss_smooth', (['rec_x[1]'], {}), '(rec_x[1])\n', (11266, 11276), False, 'from misc.losses import _compute_loss_smooth, _GAN_LOSS\n'), ((11279, 11310), 'misc.losses._compute_loss_smooth', '_compute_loss_smooth', (['fake_x[1]'], {}), '(fake_x[1])\n', (11299, 11310), False, 'from misc.losses import _compute_loss_smooth, _GAN_LOSS\n'), ((12511, 12520), 'misc.utils.TimeNow', 'TimeNow', ([], {}), '()\n', (12518, 12520), False, 'from misc.utils import split, TimeNow, to_var\n'), ((12562, 12600), 'os.path.realpath', 'os.path.realpath', (['self.config.log.name'], {}), '(self.config.log.name)\n', (12578, 12600), False, 'import os\n'), ((13731, 13744), 'misc.utils.split', 'split', (['real_x'], {}), '(real_x)\n', (13736, 13744), False, 'from misc.utils import split, TimeNow, to_var\n'), ((13781, 13794), 'misc.utils.split', 'split', (['real_c'], {}), '(real_c)\n', (13786, 13794), False, 'from misc.utils import split, TimeNow, to_var\n'), ((13821, 13849), 'misc.utils.get_fake', 'get_fake', (['real_c'], {'seed': '_iter'}), '(real_c, seed=_iter)\n', (13829, 13849), False, 'from misc.utils import color, get_fake, get_labels, get_loss_value\n'), ((13886, 13899), 'misc.utils.split', 'split', (['fake_c'], {}), '(fake_c)\n', (13891, 13899), False, 'from misc.utils import split, TimeNow, to_var\n'), ((4132, 4153), 'misc.utils.get_loss_value', 'get_loss_value', (['value'], {}), '(value)\n', (4146, 4153), False, 'from misc.utils import color, get_fake, get_labels, get_loss_value\n'), ((7125, 7134), 'misc.utils.TimeNow', 'TimeNow', ([], {}), '()\n', (7132, 7134), False, 'from misc.utils import split, TimeNow, to_var\n'), ((8095, 8125), 'misc.utils.get_loss_value', 'get_loss_value', (['self.loss[key]'], {}), '(self.loss[key])\n', (8109, 8125), False, 'from misc.utils import color, get_fake, get_labels, get_loss_value\n'), ((7329, 7344), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (7337, 7344), True, 'import numpy as np\n')] |
import pandas as pd
from bs4 import BeautifulSoup
import numpy as np
import nltk
import random
import os
from collections import Counter, defaultdict
import re
import json
import math
import matplotlib.pyplot as plt
import time
import csv
import pickle
from tqdm import tqdm
import numpy as np
import datetime
import pickle
import os.path
from scipy.stats.stats import pearsonr, spearmanr
import scipy
import sklearn
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
import utils
def load_nlp_review_data(DATA_ROOT='./nlp/'):
##################################
# This two file jointly provide the full information.
##################################
df = pd.read_csv(DATA_ROOT + "tolabel.csv", sep="|")
df = df[["Manuscript no.", "Reviewer ID", "CleanedComments", "Rec", "Suitable", "ShouldBe", "HumanLabel"]]
df = df.set_index(["Manuscript no."])
scored_bert = pd.read_csv(DATA_ROOT + "scored_reviews.csv",
sep="\t", names=["id", "score", "dummy", "text"])
df["score"] = list(scored_bert.score)
df["Text"] = list(scored_bert.text)
print('BERT score mean {:.3f}, std {:.3f}'.format(np.mean(scored_bert.score), np.std(scored_bert.score)) )
df.drop(columns=["Rec", "Suitable", "ShouldBe",], inplace=True)
print('df')
print(df)
# read in paper history stuff
e = pd.read_csv(DATA_ROOT + "../eLife_Paper_history_2019_03_15.csv")
e["Manuscript no."] = e["ms"]
e = e.set_index(["Manuscript no."])
e = e.dropna(subset=["full_decision"])
# to get finaldecision, take last non-NA decision of the ones listed here
# note that this excludes rejected by initial decision
e["FinalDecision"] = e.apply(lambda x: list(x[["full_decision", "rev1_decision", "rev2_decision", "rev3_decision", "rev4_decision"]].dropna())[-1], axis=1)
e["outcome"] = np.where(e["FinalDecision"] == "Accept Full Submission", 1, 0)
df_e = df.join(e)
print('df_e', df_e)
analyze_review_outcome_score_alignment(df_e)
return df_e
def analyze_review_outcome_score_alignment(df_e):
##################################
# TODO: add a function here
# RE’s decision compares with the reviewers’ sentiments. Can we do a quick correlation analysis of this?
# Avg reviewer score vs. acceptance decision? Or min, max?
##################################
# build a dict: Manuscript no. --> final decision outcome (0.0/1.0), BERT scores
manuscript_ID_to_reviews = defaultdict(list)
manuscript_ID_to_outcome = dict()
for i, line in tqdm(df_e.iterrows(), total=df_e.shape[0]):
if math.isnan(line['ms']):
continue # skip this row, problematic
manuscript_ID = int(line['ms']) # Manuscript no.
if line['FinalDecision'] == 'Accept Full Submission':
manuscript_ID_to_outcome[manuscript_ID] = 1 # Accept Full Submission 1.0 otherwise 0.0
elif line['FinalDecision'] == 'Reject Full Submission':
manuscript_ID_to_outcome[manuscript_ID] = 0
else:
# review not finished, skipped.
continue
existing_reviewer_IDs = [ tup[1] for tup in manuscript_ID_to_reviews[manuscript_ID]]
if line['Reviewer ID'] not in existing_reviewer_IDs:
review_BERT_score = line['score']
manuscript_ID_to_reviews[manuscript_ID].append( [ review_BERT_score, int(line['Reviewer ID']), ] )
##################################
# Explore avg, min, max
##################################
manuscript_ID_to_avg_score = dict()
manuscript_ID_to_min_score = dict()
manuscript_ID_to_max_score = dict()
for manuscript_ID, review_list in manuscript_ID_to_reviews.items():
review_list.sort(key=lambda x: x[0]) # will sort only based on scores
score_list = [tup[0] for tup in review_list]
avg_score = np.mean(score_list)
min_score = score_list[0]
max_score = score_list[-1]
manuscript_ID_to_avg_score[manuscript_ID] = avg_score
manuscript_ID_to_min_score[manuscript_ID] = min_score
manuscript_ID_to_max_score[manuscript_ID] = max_score
def calculate_correlation(manuscript_ID_to_score, name):
# final output:
# A. numerical output: two bins, acc, rej: avg and std of scores. for avg score, max score, min score; or correlation numbers. auc score?
# B. graphical output: histogram
outcome_array = []
score_accray = []
for manuscript_ID in sorted(manuscript_ID_to_reviews.keys()):
outcome_array.append(manuscript_ID_to_outcome[manuscript_ID])
score_accray.append(manuscript_ID_to_score[manuscript_ID])
correlation, p_value = scipy.stats.pointbiserialr(outcome_array, score_accray)
auc = sklearn.metrics.roc_auc_score(outcome_array, score_accray)
print('Aggregation method: {}'.format(name))
print('Point biserial correlation coefficient: {:.3f} and its p-value:'.format(correlation), p_value)
print('AUC: {:.3f}'.format(auc))
return
calculate_correlation(manuscript_ID_to_score = manuscript_ID_to_avg_score, name='average')
calculate_correlation(manuscript_ID_to_score = manuscript_ID_to_min_score, name='min')
calculate_correlation(manuscript_ID_to_score = manuscript_ID_to_max_score, name='max')
return
def load_reviewer_data(DATA_ROOT='./nlp/'):
reviewers = pd.read_csv(DATA_ROOT + "gender_reviewers.csv", error_bad_lines=False)
# this is wrong
reviewers_data = pd.DataFrame(reviewers.groupby("Reviewer ID")["Reviewer name"].count())
reviewers_data.columns = ["reviewer_count"]
reviewers["review_count"] = reviewers.groupby("Reviewer ID")["gender"].transform("count")
print('reviewers')
print(reviewers)
fuzzy_matcher = utils.Insitution_Fuzzy_Mather()
reviewer_info_dict = dict()
for i, line in tqdm(reviewers.iterrows(), total=reviewers.shape[0]):
this_race = 'N/A'
match_name, alpha_two_code = fuzzy_matcher.match(email=line['Reviewer email'], institution_name=line['Reviewer institution'])
reviewer_info_dict[line['Reviewer ID']] = {
'people_ID': int(line['Reviewer ID']),
'name': line['Reviewer name'],
'institution': line['Reviewer institution'],
'email': line['Reviewer email'],
'race': this_race,
'country_code': alpha_two_code, # -2
'gender': line['gender'], # -1
}
return reviewer_info_dict
if __name__ == '__main__':
df = load_nlp_review_data(DATA_ROOT='./')
for i, line in tqdm(df.iterrows(), total=df.shape[0]):
# pass
print('line', line)
if i > 5:
break
pass
load_reviewer_data(DATA_ROOT='./')
| [
"math.isnan",
"pandas.read_csv",
"numpy.std",
"os.path.dirname",
"sys.path.insert",
"utils.Insitution_Fuzzy_Mather",
"sklearn.metrics.roc_auc_score",
"collections.defaultdict",
"numpy.where",
"numpy.mean",
"scipy.stats.pointbiserialr",
"inspect.currentframe"
] | [((539, 566), 'os.path.dirname', 'os.path.dirname', (['currentdir'], {}), '(currentdir)\n', (554, 566), False, 'import os, sys, inspect\n'), ((567, 596), 'sys.path.insert', 'sys.path.insert', (['(0)', 'parentdir'], {}), '(0, parentdir)\n', (582, 596), False, 'import os, sys, inspect\n'), ((805, 852), 'pandas.read_csv', 'pd.read_csv', (["(DATA_ROOT + 'tolabel.csv')"], {'sep': '"""|"""'}), "(DATA_ROOT + 'tolabel.csv', sep='|')\n", (816, 852), True, 'import pandas as pd\n'), ((1025, 1124), 'pandas.read_csv', 'pd.read_csv', (["(DATA_ROOT + 'scored_reviews.csv')"], {'sep': '"""\t"""', 'names': "['id', 'score', 'dummy', 'text']"}), "(DATA_ROOT + 'scored_reviews.csv', sep='\\t', names=['id',\n 'score', 'dummy', 'text'])\n", (1036, 1124), True, 'import pandas as pd\n'), ((1484, 1548), 'pandas.read_csv', 'pd.read_csv', (["(DATA_ROOT + '../eLife_Paper_history_2019_03_15.csv')"], {}), "(DATA_ROOT + '../eLife_Paper_history_2019_03_15.csv')\n", (1495, 1548), True, 'import pandas as pd\n'), ((1983, 2045), 'numpy.where', 'np.where', (["(e['FinalDecision'] == 'Accept Full Submission')", '(1)', '(0)'], {}), "(e['FinalDecision'] == 'Accept Full Submission', 1, 0)\n", (1991, 2045), True, 'import numpy as np\n'), ((2610, 2627), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2621, 2627), False, 'from collections import Counter, defaultdict\n'), ((5569, 5639), 'pandas.read_csv', 'pd.read_csv', (["(DATA_ROOT + 'gender_reviewers.csv')"], {'error_bad_lines': '(False)'}), "(DATA_ROOT + 'gender_reviewers.csv', error_bad_lines=False)\n", (5580, 5639), True, 'import pandas as pd\n'), ((5962, 5993), 'utils.Insitution_Fuzzy_Mather', 'utils.Insitution_Fuzzy_Mather', ([], {}), '()\n', (5991, 5993), False, 'import utils\n'), ((2740, 2762), 'math.isnan', 'math.isnan', (["line['ms']"], {}), "(line['ms'])\n", (2750, 2762), False, 'import math\n'), ((3998, 4017), 'numpy.mean', 'np.mean', (['score_list'], {}), '(score_list)\n', (4005, 4017), True, 'import numpy as np\n'), ((4852, 4907), 'scipy.stats.pointbiserialr', 'scipy.stats.pointbiserialr', (['outcome_array', 'score_accray'], {}), '(outcome_array, score_accray)\n', (4878, 4907), False, 'import scipy\n'), ((4922, 4980), 'sklearn.metrics.roc_auc_score', 'sklearn.metrics.roc_auc_score', (['outcome_array', 'score_accray'], {}), '(outcome_array, score_accray)\n', (4951, 4980), False, 'import sklearn\n'), ((501, 523), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (521, 523), False, 'import os, sys, inspect\n'), ((1286, 1312), 'numpy.mean', 'np.mean', (['scored_bert.score'], {}), '(scored_bert.score)\n', (1293, 1312), True, 'import numpy as np\n'), ((1314, 1339), 'numpy.std', 'np.std', (['scored_bert.score'], {}), '(scored_bert.score)\n', (1320, 1339), True, 'import numpy as np\n')] |
import nengo
import nengo.spa as spa
import numpy as np
digits = ['ONE', 'TWO', 'THREE', 'FOUR', 'FIVE', 'SIX', 'SEVEN', 'EIGHT', 'NINE']
D = 16
vocab = spa.Vocabulary(D)
model = nengo.Network()
with model:
model.config[nengo.Ensemble].neuron_type=nengo.Direct()
num1 = spa.State(D, vocab=vocab)
num2 = spa.State(D, vocab=vocab)
model.config[nengo.Ensemble].neuron_type=nengo.LIF()
ens = nengo.Ensemble(n_neurons=100, dimensions=D*2)
model.config[nengo.Ensemble].neuron_type=nengo.Direct()
answer = nengo.Ensemble(n_neurons=100, dimensions=1)
nengo.Connection(num1.output, ens[:D]) # connect to the first D dimensions
nengo.Connection(num2.output, ens[D:]) # connect to the second D dimensions
inputs = []
outputs = []
for i in range(len(digits)):
for j in range(len(digits)):
if i != j:
n1 = vocab.parse(digits[i]).v
n2 = vocab.parse(digits[j]).v
v = np.hstack([n1, n2])
inputs.append(v)
if i < j:
outputs.append([-1])
else:
outputs.append([1])
nengo.Connection(ens, answer, function=outputs, eval_points=inputs)
| [
"nengo.Direct",
"nengo.spa.State",
"nengo.LIF",
"numpy.hstack",
"nengo.spa.Vocabulary",
"nengo.Network",
"nengo.Connection",
"nengo.Ensemble"
] | [((156, 173), 'nengo.spa.Vocabulary', 'spa.Vocabulary', (['D'], {}), '(D)\n', (170, 173), True, 'import nengo.spa as spa\n'), ((183, 198), 'nengo.Network', 'nengo.Network', ([], {}), '()\n', (196, 198), False, 'import nengo\n'), ((256, 270), 'nengo.Direct', 'nengo.Direct', ([], {}), '()\n', (268, 270), False, 'import nengo\n'), ((282, 307), 'nengo.spa.State', 'spa.State', (['D'], {'vocab': 'vocab'}), '(D, vocab=vocab)\n', (291, 307), True, 'import nengo.spa as spa\n'), ((319, 344), 'nengo.spa.State', 'spa.State', (['D'], {'vocab': 'vocab'}), '(D, vocab=vocab)\n', (328, 344), True, 'import nengo.spa as spa\n'), ((395, 406), 'nengo.LIF', 'nengo.LIF', ([], {}), '()\n', (404, 406), False, 'import nengo\n'), ((417, 464), 'nengo.Ensemble', 'nengo.Ensemble', ([], {'n_neurons': '(100)', 'dimensions': '(D * 2)'}), '(n_neurons=100, dimensions=D * 2)\n', (431, 464), False, 'import nengo\n'), ((513, 527), 'nengo.Direct', 'nengo.Direct', ([], {}), '()\n', (525, 527), False, 'import nengo\n'), ((541, 584), 'nengo.Ensemble', 'nengo.Ensemble', ([], {'n_neurons': '(100)', 'dimensions': '(1)'}), '(n_neurons=100, dimensions=1)\n', (555, 584), False, 'import nengo\n'), ((594, 632), 'nengo.Connection', 'nengo.Connection', (['num1.output', 'ens[:D]'], {}), '(num1.output, ens[:D])\n', (610, 632), False, 'import nengo\n'), ((673, 711), 'nengo.Connection', 'nengo.Connection', (['num2.output', 'ens[D:]'], {}), '(num2.output, ens[D:])\n', (689, 711), False, 'import nengo\n'), ((1200, 1267), 'nengo.Connection', 'nengo.Connection', (['ens', 'answer'], {'function': 'outputs', 'eval_points': 'inputs'}), '(ens, answer, function=outputs, eval_points=inputs)\n', (1216, 1267), False, 'import nengo\n'), ((997, 1016), 'numpy.hstack', 'np.hstack', (['[n1, n2]'], {}), '([n1, n2])\n', (1006, 1016), True, 'import numpy as np\n')] |
"""
==================
scatter(X, Y, ...)
==================
"""
import matplotlib.pyplot as plt
import numpy as np
plt.style.use('mpl_plot_gallery')
# make the data
np.random.seed(3)
X = 4 + np.random.normal(0, 2, 24)
Y = 4 + np.random.normal(0, 2, len(X))
# size and color:
S = np.random.uniform(15, 80, len(X))
# plot
fig, ax = plt.subplots()
ax.scatter(X, Y, s=S, c=-S, cmap=plt.get_cmap('Blues'), vmin=-100, vmax=0)
ax.set_xlim(0, 8)
ax.set_xticks(np.arange(1, 8))
ax.set_ylim(0, 8)
ax.set_yticks(np.arange(1, 8))
plt.show()
| [
"numpy.random.seed",
"matplotlib.pyplot.show",
"matplotlib.pyplot.get_cmap",
"matplotlib.pyplot.style.use",
"numpy.arange",
"numpy.random.normal",
"matplotlib.pyplot.subplots"
] | [((117, 150), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""mpl_plot_gallery"""'], {}), "('mpl_plot_gallery')\n", (130, 150), True, 'import matplotlib.pyplot as plt\n'), ((168, 185), 'numpy.random.seed', 'np.random.seed', (['(3)'], {}), '(3)\n', (182, 185), True, 'import numpy as np\n'), ((334, 348), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (346, 348), True, 'import matplotlib.pyplot as plt\n'), ((524, 534), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (532, 534), True, 'import matplotlib.pyplot as plt\n'), ((194, 220), 'numpy.random.normal', 'np.random.normal', (['(0)', '(2)', '(24)'], {}), '(0, 2, 24)\n', (210, 220), True, 'import numpy as np\n'), ((458, 473), 'numpy.arange', 'np.arange', (['(1)', '(8)'], {}), '(1, 8)\n', (467, 473), True, 'import numpy as np\n'), ((507, 522), 'numpy.arange', 'np.arange', (['(1)', '(8)'], {}), '(1, 8)\n', (516, 522), True, 'import numpy as np\n'), ((383, 404), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""Blues"""'], {}), "('Blues')\n", (395, 404), True, 'import matplotlib.pyplot as plt\n')] |
#Copyright (c) 2016, <NAME>
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#
#* Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
#* Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
#* Neither the name of kuri_mbzirc_challenge_3 nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
#AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
#IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
#FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
#DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
#SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
#OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import cv2
import numpy as np
import time
from kuri_msgs.msg import *
from geometry_msgs.msg import *
class Obstacle:
def __init__(self, object_id, contour, color, pixel):
self.object_id = object_id
self.contour = contour
self.color = color
x,y,w,h = cv2.boundingRect(contour)
roiPts = []
roiPts.append([x, y])
roiPts.append([x, y+h])
roiPts.append([x+w, y])
roiPts.append([x+w, y+h])
roiPts = np.array(roiPts)
s = roiPts.sum(axis = 1)
tl = roiPts[np.argmin(s)]
br = roiPts[np.argmax(s)]
self.roiBox = (tl[0], tl[1], br[0], br[1])
self.pixel = pixel
self.cx = x+w/2
self.cy = y+h/2
self.width = w
self.height = h
self.timestamp = time.time()
self.wx = 0
self.wy = 0
self.wz = 0
def update(self, contour, pixel):
self.contour = contour
x,y,w,h = cv2.boundingRect(contour)
roiPts = []
roiPts.append([x, y])
roiPts.append([x, y+h])
roiPts.append([x+w, y])
roiPts.append([x+w, y+h])
roiPts = np.array(roiPts)
s = roiPts.sum(axis = 1)
tl = roiPts[np.argmin(s)]
br = roiPts[np.argmax(s)]
self.roiBox = (tl[0], tl[1], br[0], br[1])
self.pixel = pixel
self.cx = x+w/2
self.cy = y+h/2
self.width = w
self.height = h
self.timestamp = time.time()
def islost(self, timeout):
t = time.time() - self.timestamp
if t > timeout:
return True
return False
def getAsObject(self):
o = Object()
o.pose.pose.position.x = self.wx
o.pose.pose.position.y = self.wy
o.pose.pose.position.z = self.wz
o.velocity = Twist() ##TODO
o.width = self.width
o.height = self.height
o.color = self.color
return o
| [
"numpy.argmax",
"numpy.argmin",
"time.time",
"numpy.array",
"cv2.boundingRect"
] | [((1793, 1818), 'cv2.boundingRect', 'cv2.boundingRect', (['contour'], {}), '(contour)\n', (1809, 1818), False, 'import cv2\n'), ((1972, 1988), 'numpy.array', 'np.array', (['roiPts'], {}), '(roiPts)\n', (1980, 1988), True, 'import numpy as np\n'), ((2268, 2279), 'time.time', 'time.time', ([], {}), '()\n', (2277, 2279), False, 'import time\n'), ((2422, 2447), 'cv2.boundingRect', 'cv2.boundingRect', (['contour'], {}), '(contour)\n', (2438, 2447), False, 'import cv2\n'), ((2601, 2617), 'numpy.array', 'np.array', (['roiPts'], {}), '(roiPts)\n', (2609, 2617), True, 'import numpy as np\n'), ((2897, 2908), 'time.time', 'time.time', ([], {}), '()\n', (2906, 2908), False, 'import time\n'), ((2038, 2050), 'numpy.argmin', 'np.argmin', (['s'], {}), '(s)\n', (2047, 2050), True, 'import numpy as np\n'), ((2070, 2082), 'numpy.argmax', 'np.argmax', (['s'], {}), '(s)\n', (2079, 2082), True, 'import numpy as np\n'), ((2667, 2679), 'numpy.argmin', 'np.argmin', (['s'], {}), '(s)\n', (2676, 2679), True, 'import numpy as np\n'), ((2699, 2711), 'numpy.argmax', 'np.argmax', (['s'], {}), '(s)\n', (2708, 2711), True, 'import numpy as np\n'), ((2962, 2973), 'time.time', 'time.time', ([], {}), '()\n', (2971, 2973), False, 'import time\n')] |
import tensorflow as tf
import numpy as np
def lstm(rnn_size, keep_prob,reuse=False):
lstm_cell =tf.nn.rnn_cell.LSTMCell(rnn_size,reuse=reuse)
drop =tf.nn.rnn_cell.DropoutWrapper(lstm_cell, output_keep_prob=keep_prob)
return drop
def model_input():
input_data = tf.placeholder(tf.int32, [None, None],name='input')
target_data = tf.placeholder(tf.int32, [None, None],name='target')
input_data_len = tf.placeholder(tf.int32,[None],name='input_len')
target_data_len = tf.placeholder(tf.int32,[None],name='target_len')
lr_rate = tf.placeholder(tf.float32,name='lr')
keep_prob = tf.placeholder(tf.float32,name='keep_prob')
return input_data,target_data,input_data_len,target_data_len,lr_rate,keep_prob
def encoder_input(source_vocab_size,embed_size,input_data):
encoder_embeddings = tf.Variable(tf.random_uniform([source_vocab_size, embed_size], -1, 1))
encoder_embedded = tf.nn.embedding_lookup(encoder_embeddings, input_data)
return encoder_embedded
def encoder_layer(stacked_cells,encoder_embedded,input_data_len):
((encoder_fw_outputs,encoder_bw_outputs),
(encoder_fw_final_state,encoder_bw_final_state)) = tf.nn.bidirectional_dynamic_rnn(cell_fw=stacked_cells,
cell_bw=stacked_cells,
inputs=encoder_embedded,
sequence_length=input_data_len,
dtype=tf.float32)
encoder_outputs = tf.concat((encoder_fw_outputs,encoder_bw_outputs),2)
encoder_state_c = tf.concat((encoder_fw_final_state.c,encoder_bw_final_state.c),1)
encoder_state_h = tf.concat((encoder_fw_final_state.h,encoder_bw_final_state.h),1)
encoder_states = tf.nn.rnn_cell.LSTMStateTuple(c=encoder_state_c,h=encoder_state_h)
return encoder_outputs,encoder_states
def attention_layer(rnn_size,encoder_outputs,dec_cell,target_data_len,batch_size,encoder_states):
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(rnn_size*2,encoder_outputs,
memory_sequence_length=target_data_len)
attention_cell = tf.contrib.seq2seq.AttentionWrapper(dec_cell, attention_mechanism,
attention_layer_size=rnn_size/2)
state = attention_cell.zero_state(dtype=tf.float32, batch_size=batch_size)
state = state.clone(cell_state=encoder_states)
return attention_cell
def decoder_embedding(target_vocab_size,embed_size,decoder_input):
decoder_embeddings = tf.Variable(tf.random_uniform([target_vocab_size, embed_size], -1, 1))
dec_cell_inputs = tf.nn.embedding_lookup(decoder_embeddings, decoder_input)
return decoder_embeddings,dec_cell_inputs
def decoder_input(target_data,batch_size,vocabs_to_index):
main = tf.strided_slice(target_data, [0, 0], [batch_size, -1], [1, 1])
decoder_input = tf.concat([tf.fill([batch_size, 1],vocabs_to_index['<GO>']), main], 1)
return decoder_input
def decoder_train_layer(rnn_size,decoder_input,
dec_cell_inputs,target_vocab_size,target_data_len,
encoder_outputs,encoder_states,batch_size,attention_cell,state,dense_layer):
train_helper = tf.contrib.seq2seq.TrainingHelper(dec_cell_inputs, target_data_len)
decoder_train = tf.contrib.seq2seq.BasicDecoder(cell=attention_cell, helper=train_helper,
initial_state=state,
output_layer=dense_layer)
outputs_train, _, _ = tf.contrib.seq2seq.dynamic_decode(decoder_train,
impute_finished=True,
maximum_iterations=tf.reduce_max(target_data_len))
return outputs_train
def decoder_infer_layer(decoder_embeddings,batch_size,vocabs_to_index,
attention_cell,state,dense_layer,target_data_len):
infer_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(decoder_embeddings,
tf.fill([batch_size], vocabs_to_index['<GO>']),
vocabs_to_index['<EOS>'])
decoder_infer = tf.contrib.seq2seq.BasicDecoder(cell=attention_cell, helper=infer_helper,
initial_state=state,
output_layer=dense_layer)
outputs_infer, _, _ = tf.contrib.seq2seq.dynamic_decode(decoder_infer, impute_finished=True,
maximum_iterations=tf.reduce_max(target_data_len))
return outputs_infer
def opt_loss(outputs_train,outputs_infer,target_data_len,target_data,lr_rate):
training_logits = tf.identity(outputs_train.rnn_output, name='logits')
inference_logits = tf.identity(outputs_infer.sample_id, name='predictions')
masks = tf.sequence_mask(target_data_len, tf.reduce_max(target_data_len), dtype=tf.float32, name='masks')
cost = tf.contrib.seq2seq.sequence_loss(training_logits,target_data,masks)
optimizer = tf.train.AdamOptimizer(lr_rate)
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
return inference_logits,cost,train_op
def pad_sentence(sentence_batch, pad_int):
padded_seqs = []
seq_lens = []
max_sentence_len = max([len(sentence) for sentence in sentence_batch])
for sentence in sentence_batch:
padded_seqs.append(sentence + [pad_int] * (max_sentence_len - len(sentence)))
seq_lens.append(len(sentence))
return padded_seqs, seq_lens
def get_accuracy(target, logits):
max_seq = max(len(target[1]), logits.shape[1])
if max_seq - len(target[1]):
target = np.pad(
target,
[(0,0),(0,max_seq - len(target[1]))],
'constant')
if max_seq - logits.shape[1]:
logits = np.pad(
logits,
[(0,0),(0,max_seq - logits.shape[1])],
'constant')
return np.mean(np.equal(target, logits))
def sentence_to_seq(sentence, vocabs_to_index):
results = []
for word in sentence.split(" "):
if word in vocabs_to_index:
results.append(vocabs_to_index[word])
else:
results.append(vocabs_to_index['<UNK>'])
return results
def decoder_layer(rnn_size,encoder_outputs,target_data_len,
dec_cell,encoder_states,target_data,vocabs_to_index,target_vocab_size,
embed_size,dense_layer,attention_cell,state,batch_size):
decoder_input_tensor = decoder_input(target_data,batch_size,vocabs_to_index)
decoder_embeddings,dec_cell_inputs = decoder_embedding(target_vocab_size,embed_size,decoder_input_tensor)
outputs_train = decoder_train_layer(rnn_size,decoder_input_tensor,dec_cell_inputs,target_vocab_size,target_data_len,encoder_outputs,encoder_states,batch_size,attention_cell,state,dense_layer)
outputs_infer = decoder_infer_layer(decoder_embeddings,batch_size,vocabs_to_index,attention_cell,state,dense_layer,target_data_len)
return outputs_train,outputs_infer
def seq2seq_model(source_vocab_size,embed_size,rnn_size,keep_prob,
target_vocab_size,batch_size,vocabs_to_index):
input_data,target_data,input_data_len,target_data_len,lr_rate,keep_probs = model_input()
encoder_embedded = encoder_input(source_vocab_size,embed_size,input_data)
stacked_cells = lstm(rnn_size, keep_prob)
encoder_outputs,encoder_states = encoder_layer(stacked_cells,
encoder_embedded,
input_data_len)
dec_cell = lstm(rnn_size*2,keep_prob)
dense_layer = tf.layers.Dense(target_vocab_size)
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(rnn_size*2,encoder_outputs,
memory_sequence_length=target_data_len)
attention_cell = tf.contrib.seq2seq.AttentionWrapper(dec_cell, attention_mechanism,
attention_layer_size=rnn_size/2)
state = attention_cell.zero_state(dtype=tf.float32, batch_size=batch_size)
state = state.clone(cell_state=encoder_states)
# attention_cell = attention_layer(rnn_size,encoder_outputs,target_data_len,dec_cell,batch_size,encoder_states)
outputs_train,outputs_infer = decoder_layer(rnn_size,encoder_outputs,target_data_len,
dec_cell,encoder_states,target_data,vocabs_to_index,target_vocab_size,
embed_size,dense_layer,attention_cell,state,batch_size)
inference_logits,cost,train_op = opt_loss(outputs_train,outputs_infer,target_data_len,target_data,lr_rate)
return input_data,target_data,input_data_len,target_data_len,lr_rate,keep_probs,inference_logits,cost,train_op
| [
"tensorflow.contrib.seq2seq.BahdanauAttention",
"tensorflow.nn.rnn_cell.LSTMStateTuple",
"tensorflow.clip_by_value",
"tensorflow.identity",
"tensorflow.nn.rnn_cell.DropoutWrapper",
"tensorflow.nn.rnn_cell.LSTMCell",
"tensorflow.nn.bidirectional_dynamic_rnn",
"tensorflow.contrib.seq2seq.BasicDecoder",
... | [((102, 148), 'tensorflow.nn.rnn_cell.LSTMCell', 'tf.nn.rnn_cell.LSTMCell', (['rnn_size'], {'reuse': 'reuse'}), '(rnn_size, reuse=reuse)\n', (125, 148), True, 'import tensorflow as tf\n'), ((158, 226), 'tensorflow.nn.rnn_cell.DropoutWrapper', 'tf.nn.rnn_cell.DropoutWrapper', (['lstm_cell'], {'output_keep_prob': 'keep_prob'}), '(lstm_cell, output_keep_prob=keep_prob)\n', (187, 226), True, 'import tensorflow as tf\n'), ((280, 332), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, None]'], {'name': '"""input"""'}), "(tf.int32, [None, None], name='input')\n", (294, 332), True, 'import tensorflow as tf\n'), ((350, 403), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, None]'], {'name': '"""target"""'}), "(tf.int32, [None, None], name='target')\n", (364, 403), True, 'import tensorflow as tf\n'), ((424, 474), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]'], {'name': '"""input_len"""'}), "(tf.int32, [None], name='input_len')\n", (438, 474), True, 'import tensorflow as tf\n'), ((495, 546), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]'], {'name': '"""target_len"""'}), "(tf.int32, [None], name='target_len')\n", (509, 546), True, 'import tensorflow as tf\n'), ((559, 596), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""lr"""'}), "(tf.float32, name='lr')\n", (573, 596), True, 'import tensorflow as tf\n'), ((612, 656), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""keep_prob"""'}), "(tf.float32, name='keep_prob')\n", (626, 656), True, 'import tensorflow as tf\n'), ((919, 973), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['encoder_embeddings', 'input_data'], {}), '(encoder_embeddings, input_data)\n', (941, 973), True, 'import tensorflow as tf\n'), ((1175, 1336), 'tensorflow.nn.bidirectional_dynamic_rnn', 'tf.nn.bidirectional_dynamic_rnn', ([], {'cell_fw': 'stacked_cells', 'cell_bw': 'stacked_cells', 'inputs': 'encoder_embedded', 'sequence_length': 'input_data_len', 'dtype': 'tf.float32'}), '(cell_fw=stacked_cells, cell_bw=\n stacked_cells, inputs=encoder_embedded, sequence_length=input_data_len,\n dtype=tf.float32)\n', (1206, 1336), True, 'import tensorflow as tf\n'), ((1614, 1668), 'tensorflow.concat', 'tf.concat', (['(encoder_fw_outputs, encoder_bw_outputs)', '(2)'], {}), '((encoder_fw_outputs, encoder_bw_outputs), 2)\n', (1623, 1668), True, 'import tensorflow as tf\n'), ((1689, 1755), 'tensorflow.concat', 'tf.concat', (['(encoder_fw_final_state.c, encoder_bw_final_state.c)', '(1)'], {}), '((encoder_fw_final_state.c, encoder_bw_final_state.c), 1)\n', (1698, 1755), True, 'import tensorflow as tf\n'), ((1776, 1842), 'tensorflow.concat', 'tf.concat', (['(encoder_fw_final_state.h, encoder_bw_final_state.h)', '(1)'], {}), '((encoder_fw_final_state.h, encoder_bw_final_state.h), 1)\n', (1785, 1842), True, 'import tensorflow as tf\n'), ((1862, 1929), 'tensorflow.nn.rnn_cell.LSTMStateTuple', 'tf.nn.rnn_cell.LSTMStateTuple', ([], {'c': 'encoder_state_c', 'h': 'encoder_state_h'}), '(c=encoder_state_c, h=encoder_state_h)\n', (1891, 1929), True, 'import tensorflow as tf\n'), ((2097, 2208), 'tensorflow.contrib.seq2seq.BahdanauAttention', 'tf.contrib.seq2seq.BahdanauAttention', (['(rnn_size * 2)', 'encoder_outputs'], {'memory_sequence_length': 'target_data_len'}), '(rnn_size * 2, encoder_outputs,\n memory_sequence_length=target_data_len)\n', (2133, 2208), True, 'import tensorflow as tf\n'), ((2291, 2396), 'tensorflow.contrib.seq2seq.AttentionWrapper', 'tf.contrib.seq2seq.AttentionWrapper', (['dec_cell', 'attention_mechanism'], {'attention_layer_size': '(rnn_size / 2)'}), '(dec_cell, attention_mechanism,\n attention_layer_size=rnn_size / 2)\n', (2326, 2396), True, 'import tensorflow as tf\n'), ((2796, 2853), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['decoder_embeddings', 'decoder_input'], {}), '(decoder_embeddings, decoder_input)\n', (2818, 2853), True, 'import tensorflow as tf\n'), ((2972, 3035), 'tensorflow.strided_slice', 'tf.strided_slice', (['target_data', '[0, 0]', '[batch_size, -1]', '[1, 1]'], {}), '(target_data, [0, 0], [batch_size, -1], [1, 1])\n', (2988, 3035), True, 'import tensorflow as tf\n'), ((3356, 3423), 'tensorflow.contrib.seq2seq.TrainingHelper', 'tf.contrib.seq2seq.TrainingHelper', (['dec_cell_inputs', 'target_data_len'], {}), '(dec_cell_inputs, target_data_len)\n', (3389, 3423), True, 'import tensorflow as tf\n'), ((3449, 3573), 'tensorflow.contrib.seq2seq.BasicDecoder', 'tf.contrib.seq2seq.BasicDecoder', ([], {'cell': 'attention_cell', 'helper': 'train_helper', 'initial_state': 'state', 'output_layer': 'dense_layer'}), '(cell=attention_cell, helper=train_helper,\n initial_state=state, output_layer=dense_layer)\n', (3480, 3573), True, 'import tensorflow as tf\n'), ((4369, 4493), 'tensorflow.contrib.seq2seq.BasicDecoder', 'tf.contrib.seq2seq.BasicDecoder', ([], {'cell': 'attention_cell', 'helper': 'infer_helper', 'initial_state': 'state', 'output_layer': 'dense_layer'}), '(cell=attention_cell, helper=infer_helper,\n initial_state=state, output_layer=dense_layer)\n', (4400, 4493), True, 'import tensorflow as tf\n'), ((4926, 4978), 'tensorflow.identity', 'tf.identity', (['outputs_train.rnn_output'], {'name': '"""logits"""'}), "(outputs_train.rnn_output, name='logits')\n", (4937, 4978), True, 'import tensorflow as tf\n'), ((5002, 5058), 'tensorflow.identity', 'tf.identity', (['outputs_infer.sample_id'], {'name': '"""predictions"""'}), "(outputs_infer.sample_id, name='predictions')\n", (5013, 5058), True, 'import tensorflow as tf\n'), ((5180, 5249), 'tensorflow.contrib.seq2seq.sequence_loss', 'tf.contrib.seq2seq.sequence_loss', (['training_logits', 'target_data', 'masks'], {}), '(training_logits, target_data, masks)\n', (5212, 5249), True, 'import tensorflow as tf\n'), ((5264, 5295), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['lr_rate'], {}), '(lr_rate)\n', (5286, 5295), True, 'import tensorflow as tf\n'), ((7931, 7965), 'tensorflow.layers.Dense', 'tf.layers.Dense', (['target_vocab_size'], {}), '(target_vocab_size)\n', (7946, 7965), True, 'import tensorflow as tf\n'), ((7997, 8108), 'tensorflow.contrib.seq2seq.BahdanauAttention', 'tf.contrib.seq2seq.BahdanauAttention', (['(rnn_size * 2)', 'encoder_outputs'], {'memory_sequence_length': 'target_data_len'}), '(rnn_size * 2, encoder_outputs,\n memory_sequence_length=target_data_len)\n', (8033, 8108), True, 'import tensorflow as tf\n'), ((8190, 8295), 'tensorflow.contrib.seq2seq.AttentionWrapper', 'tf.contrib.seq2seq.AttentionWrapper', (['dec_cell', 'attention_mechanism'], {'attention_layer_size': '(rnn_size / 2)'}), '(dec_cell, attention_mechanism,\n attention_layer_size=rnn_size / 2)\n', (8225, 8295), True, 'import tensorflow as tf\n'), ((837, 894), 'tensorflow.random_uniform', 'tf.random_uniform', (['[source_vocab_size, embed_size]', '(-1)', '(1)'], {}), '([source_vocab_size, embed_size], -1, 1)\n', (854, 894), True, 'import tensorflow as tf\n'), ((2715, 2772), 'tensorflow.random_uniform', 'tf.random_uniform', (['[target_vocab_size, embed_size]', '(-1)', '(1)'], {}), '([target_vocab_size, embed_size], -1, 1)\n', (2732, 2772), True, 'import tensorflow as tf\n'), ((4215, 4261), 'tensorflow.fill', 'tf.fill', (['[batch_size]', "vocabs_to_index['<GO>']"], {}), "([batch_size], vocabs_to_index['<GO>'])\n", (4222, 4261), True, 'import tensorflow as tf\n'), ((5105, 5135), 'tensorflow.reduce_max', 'tf.reduce_max', (['target_data_len'], {}), '(target_data_len)\n', (5118, 5135), True, 'import tensorflow as tf\n'), ((6200, 6268), 'numpy.pad', 'np.pad', (['logits', '[(0, 0), (0, max_seq - logits.shape[1])]', '"""constant"""'], {}), "(logits, [(0, 0), (0, max_seq - logits.shape[1])], 'constant')\n", (6206, 6268), True, 'import numpy as np\n'), ((6323, 6347), 'numpy.equal', 'np.equal', (['target', 'logits'], {}), '(target, logits)\n', (6331, 6347), True, 'import numpy as np\n'), ((3067, 3116), 'tensorflow.fill', 'tf.fill', (['[batch_size, 1]', "vocabs_to_index['<GO>']"], {}), "([batch_size, 1], vocabs_to_index['<GO>'])\n", (3074, 3116), True, 'import tensorflow as tf\n'), ((3890, 3920), 'tensorflow.reduce_max', 'tf.reduce_max', (['target_data_len'], {}), '(target_data_len)\n', (3903, 3920), True, 'import tensorflow as tf\n'), ((4766, 4796), 'tensorflow.reduce_max', 'tf.reduce_max', (['target_data_len'], {}), '(target_data_len)\n', (4779, 4796), True, 'import tensorflow as tf\n'), ((5371, 5404), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['grad', '(-1.0)', '(1.0)'], {}), '(grad, -1.0, 1.0)\n', (5387, 5404), True, 'import tensorflow as tf\n')] |
import cv2
import os
import numpy as np
import json
import glob
import datetime
from pathlib import Path
class CocoDatasetMaker:
def __init__(self, dataset_dir, img_index_offset=0, label_index_offset=0, output_dir="dataset_output"):
self.coco = {
"info": {
"year": 2020,
"version": 1.0,
"description": "sim-dataset",
"url": "www.YEET.nope",
"date_created": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "Z"
},
"images": [],
"annotations": [],
"categories": [
{
"id": 4,
"name": "BlackCover",
"supercategory": "object"
},
{
"id": 3,
"name": "WhiteCover",
"supercategory": "object"
},
{
"id": 2,
"name": "BlueCover",
"supercategory": "object"
},
{
"id": 1,
"name": "BottomCover",
"supercategory": "object"
},
{
"id": 0,
"name": "PCB",
"supercategory": "object"
}
],
"licenses": []
}
self.dataset_dir = dataset_dir
self.id_to_class_name_map = {
4: "BlackCover",
3: "WhiteCover",
2: "BlueCover",
1: "BottomCover",
0: "PCB"
}
self.class_name_to_id_map = {
"BlackCover": 4,
"WhiteCover": 3,
"BlueCover": 2,
"BottomCover": 1,
"PCB": 0
}
self.output_dir = output_dir
self.img_index_offset = img_index_offset
self.label_index_offset = label_index_offset
if not os.path.isdir(self.output_dir):
os.mkdir(self.output_dir)
def create_dataset(self):
image_folders = glob.glob(f'{self.dataset_dir}/*/')
annotation_index = self.label_index_offset
contour_folder_path = os.path.join(self.output_dir, 'img_with_contours')
if not os.path.isdir(contour_folder_path):
os.mkdir(contour_folder_path)
for img_index, image_folder in enumerate(image_folders):
full_image = cv2.imread(os.path.join(image_folder, 'full_image.png'))
image_name = f'img{img_index + self.img_index_offset}.png'
image_json = {
"id": img_index + self.img_index_offset,
"width": full_image.shape[1],
"height": full_image.shape[0],
"file_name": image_name,
"license": None,
"flickr_url": '',
"coco_url": None,
"date_captured": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "Z"
}
self.coco["images"].append(image_json)
cv2.imwrite(os.path.join(self.output_dir, image_name), full_image)
contours = []
for mask_path in glob.glob(f'{image_folder}mask*.png'):
annotations_made, mask_contours = self.__create_annotations_for_mask(mask_path, annotation_index, img_index + self.img_index_offset)
annotation_index += annotations_made
for contour in mask_contours:
contours.append(contour)
contour_img = cv2.drawContours(full_image, contours, -1, (0, 0, 255), 2)
original_img_name = Path(image_folder).stem
cv2.imwrite(os.path.join(contour_folder_path, f'cont{img_index + self.img_index_offset}_{original_img_name}.png'), contour_img)
print('writing json file')
with open(os.path.join(self.output_dir, 'dataset.json'), 'w') as outfile:
json.dump(self.coco, outfile)
print('done')
def __create_annotations_for_mask(self, mask_path, annotation_index, image_id):
only_file_name = os.path.basename(mask_path)
category_name = str(only_file_name.split('_')[1]).split('.')[0]
category_id = self.class_name_to_id_map[category_name]
img_mask = cv2.imread(mask_path)
img_mask = cv2.cvtColor(img_mask, cv2.COLOR_BGR2GRAY)
contours, _ = cv2.findContours(img_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
label_index = annotation_index
valid_contours = []
for contour in contours:
area = cv2.contourArea(contour)
if area >= 300.0:
epsilon = 0.005 * cv2.arcLength(contour, True)
contour_approx = cv2.approxPolyDP(contour, epsilon, True)
valid_contours.append(contour_approx)
for contour in valid_contours:
x, y, w, h = cv2.boundingRect(contour)
coco_contour = []
for xy_pair in contour:
coco_contour.append(int(xy_pair[0][0]))
coco_contour.append(int(xy_pair[0][1]))
annotation_json = {
"id": label_index,
"image_id": image_id,
"category_id": category_id,
"segmentation": [
coco_contour
],
"bbox": [
int(x),
int(y),
int(w),
int(h)
],
"area": 0, # is 0 in our real dataset
"iscrowd": 0
}
self.coco["annotations"].append(annotation_json)
label_index += 1
return len(contours), valid_contours
def merge_json_files(main_json, other_json, output_json_path="merged.json"):
with open(main_json, "r") as file:
main_json_obj = json.load(file)
with open(other_json, "r") as file:
other_json_obj = json.load(file)
main_json_obj["annotations"].extend(other_json_obj["annotations"])
main_json_obj["images"].extend(other_json_obj["images"])
with open(output_json_path, "w") as outfile:
json.dump(main_json_obj, outfile)
def remove_small_masks(json_file_path, area_threshold=400, output_json_path="filtered.json"):
with open(json_file_path, "r") as file:
json_obj = json.load(file)
annotations_to_keep = []
for annotation in json_obj["annotations"]:
contour = annotation["segmentation"][0]
cv_contour = [(contour[i], contour[i+1]) for i in range(0, len(contour), 2)]
cv_contour = np.array(cv_contour)
area = cv2.contourArea(cv_contour)
if area > area_threshold:
annotations_to_keep.append(annotation)
print(f"Had {len(json_obj['annotations'])} annotations, removed {len(json_obj['annotations']) - len(annotations_to_keep)} annotations")
json_obj["annotations"] = annotations_to_keep
with open(output_json_path, "w") as outfile:
json.dump(json_obj, outfile)
if __name__ == '__main__':
#dataset_maker = CocoDatasetMaker('output_dataset', img_index_offset=200, label_index_offset=2475, output_dir="dataset_output")
#dataset_maker.create_dataset()
#merge_json_files("dataset_1.json", "dataset_2.json")
remove_small_masks("dataset_2/dataset.json", area_threshold=9000)
| [
"json.dump",
"cv2.contourArea",
"json.load",
"os.mkdir",
"os.path.basename",
"cv2.cvtColor",
"os.path.isdir",
"cv2.approxPolyDP",
"cv2.arcLength",
"datetime.datetime.now",
"cv2.imread",
"pathlib.Path",
"numpy.array",
"glob.glob",
"cv2.drawContours",
"cv2.boundingRect",
"os.path.join"... | [((2126, 2161), 'glob.glob', 'glob.glob', (['f"""{self.dataset_dir}/*/"""'], {}), "(f'{self.dataset_dir}/*/')\n", (2135, 2161), False, 'import glob\n'), ((2243, 2293), 'os.path.join', 'os.path.join', (['self.output_dir', '"""img_with_contours"""'], {}), "(self.output_dir, 'img_with_contours')\n", (2255, 2293), False, 'import os\n'), ((4126, 4153), 'os.path.basename', 'os.path.basename', (['mask_path'], {}), '(mask_path)\n', (4142, 4153), False, 'import os\n'), ((4309, 4330), 'cv2.imread', 'cv2.imread', (['mask_path'], {}), '(mask_path)\n', (4319, 4330), False, 'import cv2\n'), ((4350, 4392), 'cv2.cvtColor', 'cv2.cvtColor', (['img_mask', 'cv2.COLOR_BGR2GRAY'], {}), '(img_mask, cv2.COLOR_BGR2GRAY)\n', (4362, 4392), False, 'import cv2\n'), ((4415, 4481), 'cv2.findContours', 'cv2.findContours', (['img_mask', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(img_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (4431, 4481), False, 'import cv2\n'), ((5883, 5898), 'json.load', 'json.load', (['file'], {}), '(file)\n', (5892, 5898), False, 'import json\n'), ((5964, 5979), 'json.load', 'json.load', (['file'], {}), '(file)\n', (5973, 5979), False, 'import json\n'), ((6169, 6202), 'json.dump', 'json.dump', (['main_json_obj', 'outfile'], {}), '(main_json_obj, outfile)\n', (6178, 6202), False, 'import json\n'), ((6362, 6377), 'json.load', 'json.load', (['file'], {}), '(file)\n', (6371, 6377), False, 'import json\n'), ((6608, 6628), 'numpy.array', 'np.array', (['cv_contour'], {}), '(cv_contour)\n', (6616, 6628), True, 'import numpy as np\n'), ((6644, 6671), 'cv2.contourArea', 'cv2.contourArea', (['cv_contour'], {}), '(cv_contour)\n', (6659, 6671), False, 'import cv2\n'), ((7004, 7032), 'json.dump', 'json.dump', (['json_obj', 'outfile'], {}), '(json_obj, outfile)\n', (7013, 7032), False, 'import json\n'), ((2001, 2031), 'os.path.isdir', 'os.path.isdir', (['self.output_dir'], {}), '(self.output_dir)\n', (2014, 2031), False, 'import os\n'), ((2045, 2070), 'os.mkdir', 'os.mkdir', (['self.output_dir'], {}), '(self.output_dir)\n', (2053, 2070), False, 'import os\n'), ((2309, 2343), 'os.path.isdir', 'os.path.isdir', (['contour_folder_path'], {}), '(contour_folder_path)\n', (2322, 2343), False, 'import os\n'), ((2357, 2386), 'os.mkdir', 'os.mkdir', (['contour_folder_path'], {}), '(contour_folder_path)\n', (2365, 2386), False, 'import os\n'), ((3218, 3255), 'glob.glob', 'glob.glob', (['f"""{image_folder}mask*.png"""'], {}), "(f'{image_folder}mask*.png')\n", (3227, 3255), False, 'import glob\n'), ((3577, 3635), 'cv2.drawContours', 'cv2.drawContours', (['full_image', 'contours', '(-1)', '(0, 0, 255)', '(2)'], {}), '(full_image, contours, -1, (0, 0, 255), 2)\n', (3593, 3635), False, 'import cv2\n'), ((3963, 3992), 'json.dump', 'json.dump', (['self.coco', 'outfile'], {}), '(self.coco, outfile)\n', (3972, 3992), False, 'import json\n'), ((4601, 4625), 'cv2.contourArea', 'cv2.contourArea', (['contour'], {}), '(contour)\n', (4616, 4625), False, 'import cv2\n'), ((4912, 4937), 'cv2.boundingRect', 'cv2.boundingRect', (['contour'], {}), '(contour)\n', (4928, 4937), False, 'import cv2\n'), ((2489, 2533), 'os.path.join', 'os.path.join', (['image_folder', '"""full_image.png"""'], {}), "(image_folder, 'full_image.png')\n", (2501, 2533), False, 'import os\n'), ((3107, 3148), 'os.path.join', 'os.path.join', (['self.output_dir', 'image_name'], {}), '(self.output_dir, image_name)\n', (3119, 3148), False, 'import os\n'), ((3668, 3686), 'pathlib.Path', 'Path', (['image_folder'], {}), '(image_folder)\n', (3672, 3686), False, 'from pathlib import Path\n'), ((3716, 3821), 'os.path.join', 'os.path.join', (['contour_folder_path', 'f"""cont{img_index + self.img_index_offset}_{original_img_name}.png"""'], {}), "(contour_folder_path,\n f'cont{img_index + self.img_index_offset}_{original_img_name}.png')\n", (3728, 3821), False, 'import os\n'), ((3887, 3932), 'os.path.join', 'os.path.join', (['self.output_dir', '"""dataset.json"""'], {}), "(self.output_dir, 'dataset.json')\n", (3899, 3932), False, 'import os\n'), ((4752, 4792), 'cv2.approxPolyDP', 'cv2.approxPolyDP', (['contour', 'epsilon', '(True)'], {}), '(contour, epsilon, True)\n', (4768, 4792), False, 'import cv2\n'), ((4690, 4718), 'cv2.arcLength', 'cv2.arcLength', (['contour', '(True)'], {}), '(contour, True)\n', (4703, 4718), False, 'import cv2\n'), ((462, 485), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (483, 485), False, 'import datetime\n'), ((2958, 2981), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2979, 2981), False, 'import datetime\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 7 09:31:55 2017
@author: matthew.goodwin
"""
import datetime
import sqlite3
import pandas as pd
import numpy as np
import os
import xlwt
sqlite_file="reservations.db"
# Set path for output based on relative path and location of script
FileDir = os.path.dirname(__file__)
print (FileDir)
OUTDIR = os.path.join(FileDir, 'output')
#Variable set up
#==============================================================================
# These variables will not change through the years. I initialize them as None
# so that in years beyond the first year analyzed, these values do not have to be calculated again
#==============================================================================
FACILITYID_filtered = None
campsite_count = None
# Set RecAreaIDs of objects for output. Thes come from RecAreaFacilities_API.csv
RecAreas = ['1061','1085','1088','1064','1071','1074','1035']
#Adjust YEARS list for each year you want analysis for
#YEAR_TABLE will be automatically updated to have the Table names for the necessary sheets based on YEARS
YEARS = [2015] #All years [2015, 2014, 2013, 2012, 2011, 2010, 2009, 2008, 2007, 2006]
#YEARS = [2015, 2014, 2013, 2012, 2011, 2010, 2009, 2008, 2007, 2006]
#No need to modify once YEARS is set
YEAR_TABLE = []
#Initialze DB connections
recreation_cnxn = sqlite3.connect(sqlite_file)
recreation_cursor = recreation_cnxn.cursor()
for yr in YEARS:
YEAR_TABLE.append("Recreation_"+str(yr))
# Query for selecting the date data needed to generate the yearly reservation analysis table
date_query = '''select FacilityID, StartDate, EndDate from Recreation____YEAR___ where FacilityID in ('___FACID___')'''
#crete folder for facilities
new_folder = os.path.join(OUTDIR, "RecAreas")
if not os.path.exists(new_folder):
os.makedirs(new_folder)
#loop through years. "Enumerate" also provides access to index
for recarea in RecAreas:
#loop through RecAreas if more than one
for index, years in enumerate(YEARS):
print("Running Analysis for " + recarea + " in " + str(years))
# These tasks (done using PANDAS) are setup to run at the recArea level
#get facility IDs in rec area using Data/RecAreaFacilities_API_v1.csv
print (datetime.datetime.now().time())
#Check if recarea facilities have already been loaded
if (index == 0 ) :
RecArea_query='''
select *
from RecAreaFacilities
where RECAREAID = ___RECIDS___
'''
temp_RecArea_query = RecArea_query.replace("___RECIDS___", str(recarea))
FACILITYID_filtered = pd.read_sql_query(temp_RecArea_query,recreation_cnxn)
FACILITYID_list=FACILITYID_filtered['FACILITYID'].tolist()
print (str(len(FACILITYID_filtered)) + " facilities for RecArea " + recarea + " loaded")
#Format FACILITYID_lsit for use in SQL in statement by replacing [] with ()
FACILITYID_list = str(FACILITYID_list).replace('[','(',1)
FACILITYID_list = FACILITYID_list.replace(']',')',1)
else:
print("Faciltiies previously loaded")
#Pull Campsites that are in the list of facilities
if campsite_count is None:
print("Gathering Campsite Info")
#Setup SQL query
campsite_query='''
select *
from Campsites
where FACILITYID IN ___FACIDS___
'''
temp_campsite_query = campsite_query.replace("___FACIDS___", str(FACILITYID_list))
#Run SQL query
Campsites_RecArea=pd.read_sql_query(temp_campsite_query,recreation_cnxn)
#Count sites
campsite_count = len(Campsites_RecArea)
print(str(campsite_count)+" Campsites Loaded")
else:
print("Campsites previously loaded")
#setup SQL query
fac_target_query = '''
select *
from ___RESYEAR___
where FacilityID IN ___FACIDS___
'''
temp_fac_target_query = fac_target_query.replace("___RESYEAR___", YEAR_TABLE[index])
temp_fac_target_query = temp_fac_target_query.replace("___FACIDS___", str(FACILITYID_list))
#Make SQL query
print('Gathering Facility data associated with RecArea in '+str(years))
target_fac = pd.read_sql_query(temp_fac_target_query, recreation_cnxn)
target_fac = target_fac.reset_index()
#Run Analysis on collected facility data for RecArea
#Convert EndDate, StateDate and OrderDate to datetime format
target_fac['EndDate'] = pd.to_datetime(target_fac['EndDate'])
target_fac['StartDate'] = pd.to_datetime(target_fac['StartDate'])
target_fac['OrderDate'] = pd.to_datetime(target_fac['OrderDate'])
#Calculate Time of Stay (if applicable)
target_fac['stay_length']= np.where(target_fac['EndDate'].notnull(),(target_fac['EndDate']-target_fac['StartDate']) / np.timedelta64(1, 'D'),None)
#Get average stay time
Average_Stay = round(target_fac['stay_length'].mean(),2)
#Get Average Lead Time
target_fac['lead_time']= np.where(target_fac['StartDate'].notnull(),(target_fac['StartDate']-target_fac['OrderDate']) / np.timedelta64(1, 'D'),None)
Average_Lead = round(target_fac['lead_time'].mean(),2)
#Set up workbook
new_file = os.path.join(new_folder, "RecArea"+ recarea + "_"+ str(years)+ '.xls')
wb = xlwt.Workbook()
#Create RecArea basic sheet
#RECAREANAME, RECAREAID, RECAREALATITUDE,RECAREALONGITUDE
#Calculate Total number of campsites, average stay, average lead, Reservations 2015
#@TODO look into lat/long for all sites
print('Gathering RecArea Basic Information')
#Setup SQL query
RecArea_basic_query = '''
select *
from RecAreas
where RECAREAID = ___RECIDS___
'''
temp_RecArea_basic_query=RecArea_basic_query.replace("___RECIDS___", str(recarea))
#Run SQL query
RecArea_all = pd.read_sql_query(temp_RecArea_basic_query,recreation_cnxn)
RecArea_target = RecArea_all.loc[RecArea_all['RECAREAID']==int(recarea)]
rec_basic = wb.add_sheet('RecArea_Basic')
rec_basic.write(0,0,'RecAreaID')
rec_basic.write(0,1,str(RecArea_target['RECAREAID'].iloc[0]))
rec_basic.write(1,0,'RecAreaName')
rec_basic.write(1,1,RecArea_target['RECAREANAME'].iloc[0])
rec_basic.write(2,0,'RecAreaLatitude')
rec_basic.write(2,1,RecArea_target['RECAREALATITUDE'].iloc[0])
rec_basic.write(3,0,'RecAreaLongitude')
rec_basic.write(3,1,RecArea_target['RECAREALONGITUDE'].iloc[0])
#Create placeholders for items that will be filled out later
rec_basic.write(4,0,'Number Campsites')
rec_basic.write(4,1,campsite_count)
rec_basic.write(5,0,'Average Stay (days)')
rec_basic.write(5,1, Average_Stay)
rec_basic.write(6,0,'Average Lead (days)')
rec_basic.write(6,1,Average_Lead)
# #Total site reservations calcualtion
total_res=len(target_fac)
rec_basic.write(7,0,'Total Reservations')
rec_basic.write(7,1,total_res)
#Total # of reserved visitors
target_fac.NumberOfPeople = target_fac.NumberOfPeople.astype(float)
total_res_visitors = target_fac['NumberOfPeople'].sum()
rec_basic.write(8,0,'Total Reserved Visitors')
rec_basic.write(8,1,total_res_visitors)
wb.save(new_file)
#Item 1: In-state/out-of-state/intl distinction
print ("Customer Origin Analysis")
#Count Countries where reservations come from and convert to dataframe
country_count = target_fac['CustomerCountry'].value_counts().to_frame().reset_index()
#Setup sheet where this and the other relevant info will go
custloc_sheet = wb.add_sheet("Customer Location Breakdown")
#custloc_sheet.write()
custloc_sheet.write(0,0,"Reservation Breakdown by Country")
custloc_sheet.write(1,0,"Country")
custloc_sheet.write(1,1,"# of Reservations")
for index, row in country_count.iterrows():
custloc_sheet.write(int(index)+2,0,row['index'])
custloc_sheet.write(int(index)+2,1,row['CustomerCountry'])
#In State/Out of State/Out of Country distinction
#Total site reservaations calcualtion (done previously)
#total_res=len(target_fac)
#Collect reservations made by residents of the faciliity's state
instate_res=len(target_fac.loc[target_fac['CustomerState']==target_fac['FacilityState']])
#outcountry_res =target_fac.loc[target_fac['CustomerState']!=target_fac['FacilityState'] & target_fac['CustomerCountry']='USA']
#Collect reservations made by non-USA residents
outcountry_res =len(target_fac.loc[target_fac['CustomerCountry']!='USA'])
#Calculate residents that are out of state
##Total Reservations-(instate_res+outcountrye_res)=out of state residents
outstate_res = total_res-(instate_res+outcountry_res)
# Write this results to Customer Location Breakdown Sheet
custloc_sheet.write(0,4,"Reservation Breakdown by State")
custloc_sheet.write(1,4,"Category")
custloc_sheet.write(1,5,"# of Reservations")
custloc_sheet.write(2,4,"Same State as Site")
custloc_sheet.write(2,5,instate_res)
custloc_sheet.write(3,4,"Out of State")
custloc_sheet.write(3,5,outstate_res)
custloc_sheet.write(4,4,"Outside USA")
custloc_sheet.write(4,5,outcountry_res)
custloc_sheet.write(5,4,"Total Reservations")
custloc_sheet.write(5,5,total_res)
wb.save(new_file)
#############################################################
#Item 3 Zip code local/non-local distinction Note: Some Facilities do not have Zip
#Level 1: Reservations has same zip code as site
local_res_lev1 = len(target_fac.loc[target_fac['CustomerZIP']==target_fac['FacilityZIP']])
#Level 2: Reservations have same 3 digit level zip as facility
#Pull facility ZipCode (just use first row data as this should remanin the same for the filtered sheet)
#set level of zip code to check i.e zip_lvl=3 for 33027 would check against 330*
zip_lvl = 3
fac_zip = target_fac['FacilityZIP'].iloc[0][:zip_lvl]
#create new columns with ZipCodes as strings to use regex with
target_fac['CustomerZIP_Str']=target_fac['CustomerZIP']
target_fac['CustomerZIP_Str']=target_fac['CustomerZIP_Str'].apply(str)
#form 3 digit regex expression. if handles if there is no Zip
print ("Running Zip Codes Local/NonLocal Analysis")
if fac_zip != '':
fac_zip_regex=fac_zip+'*'
local_res_lev2=len(target_fac['CustomerZIP'].filter(regex=fac_zip_regex))
#write out to Breakdown sheet if data exists
custloc_sheet.write(0,7,"Reservation Breakdown by Zip Code")
custloc_sheet.write(1,7,"Category")
custloc_sheet.write(1,8,"# of Reservations")
custloc_sheet.write(2,7,"Same Zip as Site")
custloc_sheet.write(2,8,local_res_lev1)
custloc_sheet.write(3,7,"Within same "+str(zip_lvl)+ " Digit Level as Site")
custloc_sheet.write(3,8,local_res_lev2)
custloc_sheet.write(4,7,"Total Reservations")
custloc_sheet.write(4,8,total_res)
else:
print('No Facility Zip Code Available in Data Set')
#############################################################
#Item 1 - Add entity type to standard report
#get entity counts as a data frame to iterate over
entity_count = target_fac['EntityType'].value_counts().to_frame().reset_index()
#add sum of Number of people per entity type placeholder
entity_count['NumPeople'] = np.NaN
#print (len(entity_count))
#write to new sheet
# Entity Type
print ("Entity Type")
ent_sheet = wb.add_sheet("EntityType")
ent_sheet.write(0,0,'Entity Type')
ent_sheet.write(0,1,'# of Reservations')
ent_sheet.write(0,2,'Reserved Visitors')
for index, row in entity_count.iterrows():
ent_sheet.write(int(index)+1,0,row['index'])
ent_sheet.write(int(index)+1,1,row['EntityType'])
#count Number of people per EntityType
ReservedVisitors = target_fac.loc[target_fac['EntityType']==row['index']].NumberOfPeople.sum()
ent_sheet.write(int(index)+1,2,ReservedVisitors)
wb.save(new_file)
#Create sheet of related facilities
print("Creating Facility List")
fac_sheet = wb.add_sheet("FacilityList")
fac_sheet.write(0,0,'FacilityID')
fac_sheet.write(0,1,'FacilityName')
fac_sheet.write(0,2,'# of Reservations')
fac_sheet.write(0,3,'# of Reserved People')
#count reservations based on facility ID
FacList_count = target_fac['FacilityID'].value_counts().to_frame().reset_index()
# Rename field that actually holds FacilityID to facid. "FacilityID" field actually holds counts.
FacList_count = FacList_count.rename(columns={'index':'facid'})
FacGrouper = target_fac.groupby('FacilityID')
for index,row in FacList_count.iterrows():
fac_sheet.write(int(index)+1,0,row['facid'])
facRow = target_fac.loc[target_fac['FacilityID'] == row['facid']]
fac_sheet.write(int(index)+1,1,facRow.iloc[0]['Park'])
fac_sheet.write(int(index)+1,2,row['FacilityID'])
fac_res=target_fac.loc[target_fac['FacilityID']==row['facid']]['NumberOfPeople'].sum()
fac_sheet.write(int(index)+1,3,fac_res)
wb.save(new_file)
# Dates
# Create list of facilities where reservations are being calculated.
FacArray = []
for index,row in FacList_count.iterrows():
FacArray.append(int(row['facid']))
FacArrayFormatted = ','.join(str(i) for i in FacArray)
#calendar dates
print ("reservations by date")
fac_agg = wb.add_sheet("Date Analysis")
fac_agg.write(0,0,"Date")
fac_agg.write(0,1,"Number Reservations")
temp_date_query = date_query.replace("'___FACID___'", FacArrayFormatted)
fac_date_counter = {}
starting = "2015-01-01"
ending = "2015-12-31"
start_year_as_int = int(starting[:4])
start_month_as_int = int(starting[5:-3])
start_day_as_int = int(starting[-2:])
end_year_as_int = int(ending[:4])
end_month_as_int = int(ending[5:-3])
end_day_as_int = int(ending[-2:])
start_date = datetime.datetime(start_year_as_int, start_month_as_int, start_day_as_int)
end_date = datetime.datetime(end_year_as_int, end_month_as_int, end_day_as_int)
total_days = (end_date - start_date).days + 1
for day_number in range(total_days):
current_date = (start_date + datetime.timedelta(days = day_number)).date()
day_m = str(current_date)[-5:]
if not day_m in fac_date_counter:
fac_date_counter[day_m] = 0
else:
fac_date_counter[day_m] += 1
for index,year in enumerate(YEARS):
temp_year_query = temp_date_query.replace("___YEAR___", str(year))
date = recreation_cursor.execute(temp_year_query)
date_counter = {}
for record in date:
start = record[1]
end = record[2]
if start != None and end != None and end != '' and start != '':
start_year_as_int = int(start[:4])
start_month_as_int = int(start[5:-3])
start_day_as_int = int(start[-2:])
end_year_as_int = int(end[:4])
end_month_as_int = int(end[5:-3])
end_day_as_int = int(end[-2:])
start_date = datetime.datetime(start_year_as_int, start_month_as_int, start_day_as_int)
end_date = datetime.datetime(end_year_as_int, end_month_as_int, end_day_as_int)
total_days = (end_date - start_date).days + 1
for day_number in range(total_days):
current_date = (start_date + datetime.timedelta(days = day_number)).date()
day_m = str(current_date)[-5:]
# if not str(current_date) in date_counter:
# date_counter[str(current_date)] = 1
# else:
# date_counter[str(current_date)] += 1
if not day_m in fac_date_counter:
fac_date_counter[day_m] = 1
else:
fac_date_counter[day_m] += 1
# Handles reservations with only a start-date. Typical for one-day events such as tours, but not typical for campgrounds.
elif start != None and start != '' and (end == None or end == ''):
# Seperate out year, month, and day
start_year_as_int = int(start[:4])
start_month_as_int = int(start[5:-3])
start_day_as_int = int(start[-2:])
# Input into common time format
start_date = datetime.datetime(start_year_as_int, start_month_as_int, start_day_as_int)
# Convert date/time format to just date
start_date = start_date.date()
day_m = str(start_date)[-5:]
if not day_m in fac_date_counter:
fac_date_counter[day_m] = 1
else:
fac_date_counter[day_m] += 1
else:
continue
i = 1
for k,v in fac_date_counter.items():
fac_agg.write(i, 0, k)
fac_agg.write(i, 1, v)
i = i + 1
wb.save(new_file)
#Close db connections
recreation_cursor.close()
recreation_cnxn.close()
print ("finish {}".format(datetime.datetime.now().time()))
| [
"xlwt.Workbook",
"os.makedirs",
"os.path.dirname",
"os.path.exists",
"datetime.datetime.now",
"datetime.datetime",
"numpy.timedelta64",
"sqlite3.connect",
"pandas.to_datetime",
"pandas.read_sql_query",
"datetime.timedelta",
"os.path.join"
] | [((298, 323), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (313, 323), False, 'import os\n'), ((349, 380), 'os.path.join', 'os.path.join', (['FileDir', '"""output"""'], {}), "(FileDir, 'output')\n", (361, 380), False, 'import os\n'), ((1352, 1380), 'sqlite3.connect', 'sqlite3.connect', (['sqlite_file'], {}), '(sqlite_file)\n', (1367, 1380), False, 'import sqlite3\n'), ((1747, 1779), 'os.path.join', 'os.path.join', (['OUTDIR', '"""RecAreas"""'], {}), "(OUTDIR, 'RecAreas')\n", (1759, 1779), False, 'import os\n'), ((1787, 1813), 'os.path.exists', 'os.path.exists', (['new_folder'], {}), '(new_folder)\n', (1801, 1813), False, 'import os\n'), ((1819, 1842), 'os.makedirs', 'os.makedirs', (['new_folder'], {}), '(new_folder)\n', (1830, 1842), False, 'import os\n'), ((4357, 4414), 'pandas.read_sql_query', 'pd.read_sql_query', (['temp_fac_target_query', 'recreation_cnxn'], {}), '(temp_fac_target_query, recreation_cnxn)\n', (4374, 4414), True, 'import pandas as pd\n'), ((4624, 4661), 'pandas.to_datetime', 'pd.to_datetime', (["target_fac['EndDate']"], {}), "(target_fac['EndDate'])\n", (4638, 4661), True, 'import pandas as pd\n'), ((4696, 4735), 'pandas.to_datetime', 'pd.to_datetime', (["target_fac['StartDate']"], {}), "(target_fac['StartDate'])\n", (4710, 4735), True, 'import pandas as pd\n'), ((4770, 4809), 'pandas.to_datetime', 'pd.to_datetime', (["target_fac['OrderDate']"], {}), "(target_fac['OrderDate'])\n", (4784, 4809), True, 'import pandas as pd\n'), ((5490, 5505), 'xlwt.Workbook', 'xlwt.Workbook', ([], {}), '()\n', (5503, 5505), False, 'import xlwt\n'), ((6096, 6156), 'pandas.read_sql_query', 'pd.read_sql_query', (['temp_RecArea_basic_query', 'recreation_cnxn'], {}), '(temp_RecArea_basic_query, recreation_cnxn)\n', (6113, 6156), True, 'import pandas as pd\n'), ((14903, 14977), 'datetime.datetime', 'datetime.datetime', (['start_year_as_int', 'start_month_as_int', 'start_day_as_int'], {}), '(start_year_as_int, start_month_as_int, start_day_as_int)\n', (14920, 14977), False, 'import datetime\n'), ((14997, 15065), 'datetime.datetime', 'datetime.datetime', (['end_year_as_int', 'end_month_as_int', 'end_day_as_int'], {}), '(end_year_as_int, end_month_as_int, end_day_as_int)\n', (15014, 15065), False, 'import datetime\n'), ((2654, 2708), 'pandas.read_sql_query', 'pd.read_sql_query', (['temp_RecArea_query', 'recreation_cnxn'], {}), '(temp_RecArea_query, recreation_cnxn)\n', (2671, 2708), True, 'import pandas as pd\n'), ((3625, 3680), 'pandas.read_sql_query', 'pd.read_sql_query', (['temp_campsite_query', 'recreation_cnxn'], {}), '(temp_campsite_query, recreation_cnxn)\n', (3642, 3680), True, 'import pandas as pd\n'), ((4984, 5006), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""D"""'], {}), "(1, 'D')\n", (4998, 5006), True, 'import numpy as np\n'), ((5269, 5291), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""D"""'], {}), "(1, 'D')\n", (5283, 5291), True, 'import numpy as np\n'), ((18399, 18422), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (18420, 18422), False, 'import datetime\n'), ((2265, 2288), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2286, 2288), False, 'import datetime\n'), ((16212, 16286), 'datetime.datetime', 'datetime.datetime', (['start_year_as_int', 'start_month_as_int', 'start_day_as_int'], {}), '(start_year_as_int, start_month_as_int, start_day_as_int)\n', (16229, 16286), False, 'import datetime\n'), ((16318, 16386), 'datetime.datetime', 'datetime.datetime', (['end_year_as_int', 'end_month_as_int', 'end_day_as_int'], {}), '(end_year_as_int, end_month_as_int, end_day_as_int)\n', (16335, 16386), False, 'import datetime\n'), ((15209, 15244), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'day_number'}), '(days=day_number)\n', (15227, 15244), False, 'import datetime\n'), ((17637, 17711), 'datetime.datetime', 'datetime.datetime', (['start_year_as_int', 'start_month_as_int', 'start_day_as_int'], {}), '(start_year_as_int, start_month_as_int, start_day_as_int)\n', (17654, 17711), False, 'import datetime\n'), ((16566, 16601), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'day_number'}), '(days=day_number)\n', (16584, 16601), False, 'import datetime\n')] |
import tempfile
import os
import shutil
import unittest
import numpy as np
from deeprankcore.tools.pssm_3dcons_to_deeprank import pssm_3dcons_to_deeprank
from deeprankcore.tools.hdf5_to_csv import hdf5_to_csv
from deeprankcore.tools.CustomizeGraph import add_target
from deeprankcore.tools.embedding import manifold_embedding
class TestTools(unittest.TestCase):
def setUp(self):
self.pdb_path = "./tests/data/pdb/1ATN/"
self.pssm_path = "./tests/data/pssm/1ATN/1ATN.A.pdb.pssm"
self.ref = "./tests/data/ref/1ATN/"
self.h5_train_ref = "tests/data/train_ref/train_data.hdf5"
self.h5_graphs = "tests/hdf5/1ATN_ppi.hdf5"
def test_pssm_convert(self):
pssm_3dcons_to_deeprank(self.pssm_path)
def test_h52csv(self):
hdf5_to_csv(self.h5_train_ref)
def test_add_target(self):
f, target_path = tempfile.mkstemp(prefix="target", suffix=".lst")
os.close(f)
f, graph_path = tempfile.mkstemp(prefix="1ATN_ppi", suffix=".hdf5")
os.close(f)
try:
target_list = ""
for i in range(1, 11):
target_list += f"1ATN_{i}w {i}\n"
with open(target_path, "w", encoding="utf-8") as f:
f.write(target_list)
shutil.copy(self.h5_graphs, graph_path)
add_target(graph_path, "test_target", target_path)
finally:
os.remove(target_path)
os.remove(graph_path)
def test_embeding(self):
pos = np.random.rand(110, 3)
for method in ["tsne", "spectral", "mds"]:
_ = manifold_embedding(pos, method=method)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"os.remove",
"tempfile.mkstemp",
"deeprankcore.tools.CustomizeGraph.add_target",
"deeprankcore.tools.hdf5_to_csv.hdf5_to_csv",
"deeprankcore.tools.pssm_3dcons_to_deeprank.pssm_3dcons_to_deeprank",
"os.close",
"deeprankcore.tools.embedding.manifold_embedding",
"numpy.random.rand",
... | [((1677, 1692), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1690, 1692), False, 'import unittest\n'), ((707, 746), 'deeprankcore.tools.pssm_3dcons_to_deeprank.pssm_3dcons_to_deeprank', 'pssm_3dcons_to_deeprank', (['self.pssm_path'], {}), '(self.pssm_path)\n', (730, 746), False, 'from deeprankcore.tools.pssm_3dcons_to_deeprank import pssm_3dcons_to_deeprank\n'), ((783, 813), 'deeprankcore.tools.hdf5_to_csv.hdf5_to_csv', 'hdf5_to_csv', (['self.h5_train_ref'], {}), '(self.h5_train_ref)\n', (794, 813), False, 'from deeprankcore.tools.hdf5_to_csv import hdf5_to_csv\n'), ((872, 920), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'prefix': '"""target"""', 'suffix': '""".lst"""'}), "(prefix='target', suffix='.lst')\n", (888, 920), False, 'import tempfile\n'), ((929, 940), 'os.close', 'os.close', (['f'], {}), '(f)\n', (937, 940), False, 'import os\n'), ((966, 1017), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'prefix': '"""1ATN_ppi"""', 'suffix': '""".hdf5"""'}), "(prefix='1ATN_ppi', suffix='.hdf5')\n", (982, 1017), False, 'import tempfile\n'), ((1026, 1037), 'os.close', 'os.close', (['f'], {}), '(f)\n', (1034, 1037), False, 'import os\n'), ((1515, 1537), 'numpy.random.rand', 'np.random.rand', (['(110)', '(3)'], {}), '(110, 3)\n', (1529, 1537), True, 'import numpy as np\n'), ((1281, 1320), 'shutil.copy', 'shutil.copy', (['self.h5_graphs', 'graph_path'], {}), '(self.h5_graphs, graph_path)\n', (1292, 1320), False, 'import shutil\n'), ((1334, 1384), 'deeprankcore.tools.CustomizeGraph.add_target', 'add_target', (['graph_path', '"""test_target"""', 'target_path'], {}), "(graph_path, 'test_target', target_path)\n", (1344, 1384), False, 'from deeprankcore.tools.CustomizeGraph import add_target\n'), ((1414, 1436), 'os.remove', 'os.remove', (['target_path'], {}), '(target_path)\n', (1423, 1436), False, 'import os\n'), ((1449, 1470), 'os.remove', 'os.remove', (['graph_path'], {}), '(graph_path)\n', (1458, 1470), False, 'import os\n'), ((1605, 1643), 'deeprankcore.tools.embedding.manifold_embedding', 'manifold_embedding', (['pos'], {'method': 'method'}), '(pos, method=method)\n', (1623, 1643), False, 'from deeprankcore.tools.embedding import manifold_embedding\n')] |
import tensorflow as tf
import random as rn
import numpy as np
import os
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(45)
# Setting the graph-level random seed.
tf.set_random_seed(1337)
rn.seed(73)
from keras import backend as K
session_conf = tf.ConfigProto(
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
# Force Tensorflow to use a single thread
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
import math
import pandas as pd
import keras
from keras import backend as K
from keras.models import Model
from keras.layers import InputLayer, Input, Dropout, Dense, Embedding, LSTM
from keras.layers.merge import concatenate
from keras.callbacks import TensorBoard, EarlyStopping
from keras.optimizers import Adam, Adamax
from keras.models import load_model
from keras import regularizers
import skopt
from skopt import gp_minimize, forest_minimize
from skopt.space import Real, Categorical, Integer
from skopt.plots import plot_convergence
from skopt.plots import plot_objective, plot_evaluations
from skopt.utils import use_named_args
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer, LabelEncoder
from sklearn.metrics import confusion_matrix, classification_report
import sys
dim_learning_rate = Real(low=1e-4, high=1e-2, prior='log-uniform', name='learning_rate')
dim_weight_decay = Real(low=1e-3, high=0.5, prior='log-uniform', name='weight_decay')
dim_num_dense_layers = Integer(low=0, high=5, name='num_dense_layers')
dim_num_dense_nodes = Integer(low=5, high=1024, name='num_dense_nodes')
dim_activation = Categorical(categories=['relu', 'softplus'], name='activation')
dim_dropout = Real(low=1e-6, high=0.5, prior='log-uniform', name='dropout')
### DRIVER
dim_driver_weight_decay = Real(low=1e-3, high=0.5, prior='log-uniform', name='driver_weight_decay')
dim_driver_num_dense_layers = Integer(low=0, high=5, name='driver_num_dense_layers')
dim_driver_num_dense_nodes = Integer(low=5, high=1024, name='driver_num_dense_nodes')
dim_driver_activation = Categorical(categories=['relu', 'softplus'], name='driver_activation')
dim_driver_dropout = Real(low=1e-6, high=0.5, prior='log-uniform', name='driver_dropout')
### MOTIF
dim_motif_weight_decay = Real(low=1e-3, high=0.5, prior='log-uniform', name='motif_weight_decay')
dim_motif_num_dense_layers = Integer(low=0, high=5, name='motif_num_dense_layers')
dim_motif_num_dense_nodes = Integer(low=5, high=1024, name='motif_num_dense_nodes')
dim_motif_activation = Categorical(categories=['relu', 'softplus'], name='motif_activation')
dim_motif_dropout = Real(low=1e-6, high=0.5, prior='log-uniform', name='motif_dropout')
dimensions = [dim_learning_rate, dim_weight_decay, dim_dropout, dim_num_dense_layers, dim_num_dense_nodes,
dim_activation, dim_driver_weight_decay, dim_driver_dropout, dim_driver_num_dense_layers, dim_driver_num_dense_nodes,
dim_driver_activation, dim_motif_weight_decay, dim_motif_dropout, dim_motif_num_dense_layers, dim_motif_num_dense_nodes,
dim_motif_activation]
default_paramaters = [1e-4, 1e-3, 1e-6, 0, 100, 'relu', 1e-3, 1e-6, 0, 100, 'relu', 1e-3, 1e-6, 0, 100, 'relu']
def log_dir_name(learning_rate, weight_decay, num_dense_layers, num_dense_nodes, activation):
log_dir = "./crossvalidation{}_logs/{}__lr_{}_wd_{}_layers_{}_nodes{}_{}/".format(fold, output_name, learning_rate, weight_decay, num_dense_layers, num_dense_nodes, activation)
## make sure that dir exists
if not os.path.exists(log_dir):
os.makedirs(log_dir)
return log_dir
def create_model(learning_rate, weight_decay, dropout, num_dense_layers, num_dense_nodes, activation,
driver_weight_decay, driver_dropout, driver_num_dense_layers, driver_num_dense_nodes, driver_activation,
motif_weight_decay, motif_dropout, motif_num_dense_layers, motif_num_dense_nodes, motif_activation):
### Define model here
main_input = Input(shape=(input_size,), name='main_input')
name = 'main_layer_dense_{0}'.format(1)
main_branch = Dense(num_dense_nodes, activation=activation, name=name,
kernel_regularizer=regularizers.l2(weight_decay))(main_input)
main_branch = Dropout(dropout)(main_branch)
for i in range(1,num_dense_layers):
name = 'main_layer_dense_{0}'.format(i + 1)
main_branch = Dense(num_dense_nodes, activation=activation, name=name, kernel_regularizer=regularizers.l2(weight_decay))(main_branch)
main_branch = Dropout(dropout)(main_branch)
driver_input = Input(shape=(input_driver_size,), name='driver_input')
name = 'driver_layer_dense_{0}'.format(1)
driver_branch = Dense(driver_num_dense_nodes, activation=driver_activation, name=name,
kernel_regularizer=regularizers.l2(driver_weight_decay))(driver_input)
driver_branch = Dropout(driver_dropout)(driver_branch)
for i in range(1,driver_num_dense_layers):
name = 'driver_layer_dense_{0}'.format(i + 1)
driver_branch = Dense(driver_num_dense_nodes, activation=driver_activation, name=name, kernel_regularizer=regularizers.l2(driver_weight_decay))(driver_branch)
driver_branch = Dropout(driver_dropout)(driver_branch)
motif_input = Input(shape=(input_motif_size,), name='motif_input')
name = 'motif_layer_dense_{0}'.format(1)
motif_branch = Dense(motif_num_dense_nodes, activation=motif_activation, name=name,
kernel_regularizer=regularizers.l2(motif_weight_decay))(motif_input)
motif_branch = Dropout(motif_dropout)(motif_branch)
for i in range(1,motif_num_dense_layers):
name = 'motif_layer_dense_{0}'.format(i + 1)
motif_branch = Dense(motif_num_dense_nodes, activation=motif_activation, name=name, kernel_regularizer=regularizers.l2(motif_weight_decay))(motif_branch)
motif_branch = Dropout(motif_dropout)(motif_branch)
x = concatenate([main_branch, driver_branch, motif_branch])
predictions = Dense(num_classes, activation='softmax', name='output')(x)
optimizer = Adam(lr=learning_rate)
model = Model(inputs=[main_input, driver_input, motif_input], outputs=predictions)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
return model
@use_named_args(dimensions=dimensions)
def fitness(learning_rate, weight_decay, dropout, num_dense_layers, num_dense_nodes, activation, driver_weight_decay, driver_dropout, driver_num_dense_layers, driver_num_dense_nodes, driver_activation,
motif_weight_decay, motif_dropout, motif_num_dense_layers, motif_num_dense_nodes, motif_activation):
global best_accuracy
# best_accuracy = 0.0
print('learning rate: ', learning_rate)
print('weight_decay: ', weight_decay)
print('dropout', dropout)
print('num_dense_layers: ', num_dense_layers)
print('num_dense_nodes: ', num_dense_nodes)
print('activation: ', activation)
print('driver_weight_decay: ', driver_weight_decay)
print('driver_dropout', driver_dropout)
print('driver_num_dense_layers: ', driver_num_dense_layers)
print('driver_num_dense_nodes: ', driver_num_dense_nodes)
print('driver_activation: ', driver_activation)
print('motif_weight_decay: ', motif_weight_decay)
print('motif_dropout', motif_dropout)
print('motif_num_dense_layers: ', motif_num_dense_layers)
print('motif_num_dense_nodes: ', motif_num_dense_nodes)
print('motif_activation: ', motif_activation)
model = create_model(learning_rate=learning_rate, weight_decay=weight_decay, dropout=dropout,
num_dense_layers=num_dense_layers, num_dense_nodes=num_dense_nodes, activation=activation,
driver_weight_decay=driver_weight_decay, driver_dropout=driver_dropout,
driver_num_dense_layers=driver_num_dense_layers, driver_num_dense_nodes=driver_num_dense_nodes, driver_activation=driver_activation,
motif_weight_decay=motif_weight_decay, motif_dropout=motif_dropout,
motif_num_dense_layers=motif_num_dense_layers, motif_num_dense_nodes=motif_num_dense_nodes, motif_activation=motif_activation)
log_dir = log_dir_name(learning_rate, weight_decay, num_dense_layers, num_dense_nodes, activation)
callback_log = TensorBoard(
log_dir=log_dir,
histogram_freq=0,
batch_size=32,
write_graph=True,
write_grads=True,
write_images=False)
callbacks = [callback_log]
### FIXME model.fit - x_train and y_train
history = model.fit(x=[x_train, x_train_driver, x_train_motif], y=y_train, epochs=50, batch_size=32, validation_data=validation_data,
callbacks=callbacks)
accuracy = history.history['val_acc'][-1]
print('Accuracy: {0:.2%}'.format(accuracy))
if accuracy > best_accuracy:
model.save(path_best_model)
best_accuracy = accuracy
del model
K.clear_session()
return -accuracy
def to_table(report):
report = report.splitlines()
res = []
header = [''] + report[0].split()
for row in report[2:-4]:
res.append(np.array(row.split()))
return np.array(res), header
if __name__ == '__main__':
fold = int(sys.argv[1])
input_data_filename = sys.argv[2]
input_driver_data_filename = sys.argv[3]
input_motif_data_filename = sys.argv[4]
output_name = sys.argv[5]
path_best_model = './{}__crossvalidation{}_best_model.keras'.format(output_name, fold)
best_accuracy = 0.0
data = pd.read_csv("./{}.csv".format(input_data_filename), index_col=[0])
driver_data = pd.read_csv("./{}.csv".format(input_driver_data_filename),
index_col=[0])
motif_data = pd.read_csv("./{}.csv".format(input_motif_data_filename),
index_col=[0])
### Making training, test, validation data
training_samples = pd.read_csv('./training_idx_pcawg.csv', index_col=[0])
training_samples.columns = ['guid', 'split']
training_samples = training_samples[training_samples.split == fold]
frames = []
for guid_ in training_samples.guid:
frames.append(data[data['guid'].str.contains(guid_)])
training_data = pd.concat(frames)
training_data = training_data.sort_values(by=['guid'])
print(training_data.head())
validation_samples = pd.read_csv('./validation_idx_pcawg.csv', index_col=[0])
validation_samples.columns = ['guid', 'split']
validation_samples = validation_samples[validation_samples.split == fold]
validation_data = data[data['guid'].isin(validation_samples.guid)]
validation_data = validation_data.sort_values(by=['guid'])
print(validation_data.head())
test_samples = pd.read_csv('./test_idx_pcawg.csv', index_col=[0])
test_samples.columns = ['guid', 'split']
test_samples = test_samples[test_samples.split == fold]
test_data = data[data['guid'].isin(test_samples.guid)]
test_data = test_data.sort_values(by=['guid'])
print(test_data.head())
training_data = training_data.drop(['guid'], axis=1)
validation_data = validation_data.drop(['guid'], axis=1)
test_data = test_data.drop(['guid'], axis=1)
x_train = training_data.values
y_train = training_data.index
x_val = validation_data.values
y_val = validation_data.index
x_test = test_data.values
y_test = test_data.index
### DRIVER Making training, test, validation data
frames = []
for guid_ in training_samples.guid:
frames.append(driver_data[driver_data['guid'].str.contains(guid_)])
driver_training_data = pd.concat(frames)
driver_training_data = driver_training_data.sort_values(by=['guid'])
driver_validation_data = driver_data[driver_data['guid'].isin(validation_samples.guid)]
driver_validation_data = driver_validation_data.sort_values(by=['guid'])
driver_test_data = driver_data[driver_data['guid'].isin(test_samples.guid)]
driver_test_data = driver_test_data.sort_values(by=['guid'])
driver_training_data = driver_training_data.drop(['guid'], axis=1)
driver_validation_data = driver_validation_data.drop(['guid'], axis=1)
driver_test_data = driver_test_data.drop(['guid'], axis=1)
x_train_driver = driver_training_data.values
y_train_driver = driver_training_data.index
x_val_driver = driver_validation_data.values
y_val_driver = driver_validation_data.index
x_test_driver = driver_test_data.values
y_test_driver = driver_test_data.index
### MOTIF Making training, test, validation data
frames = []
for guid_ in training_samples.guid:
frames.append(motif_data[motif_data['guid'].str.contains(guid_)])
motif_training_data = pd.concat(frames)
motif_training_data = motif_training_data.sort_values(by=['guid'])
motif_validation_data = motif_data[motif_data['guid'].isin(validation_samples.guid)]
motif_validation_data = motif_validation_data.sort_values(by=['guid'])
motif_test_data = motif_data[motif_data['guid'].isin(test_samples.guid)]
motif_test_data = motif_test_data.sort_values(by=['guid'])
motif_training_data = motif_training_data.drop(['guid'], axis=1)
motif_validation_data = motif_validation_data.drop(['guid'], axis=1)
motif_test_data = motif_test_data.drop(['guid'], axis=1)
x_train_motif = motif_training_data.values
y_train_motif = motif_training_data.index
x_val_motif = motif_validation_data.values
y_val_motif = motif_validation_data.index
x_test_motif = motif_test_data.values
y_test_motif = motif_test_data.index
encoder = LabelEncoder()
test_labels_names = y_test
y_test = encoder.fit_transform(y_test)
test_labels = y_test
num_of_cancers = len(encoder.classes_)
print("Num of cancers: {}".format(num_of_cancers))
y_test = keras.utils.to_categorical(y_test, num_of_cancers)
y_train = encoder.fit_transform(y_train)
y_train = keras.utils.to_categorical(y_train, num_of_cancers)
y_val = encoder.fit_transform(y_val)
y_val = keras.utils.to_categorical(y_val, num_of_cancers)
### DRIVER + MOTIF
y_train_driver = encoder.fit_transform(y_train_driver)
y_train_driver = keras.utils.to_categorical(y_train_driver, num_of_cancers)
y_val_driver = encoder.fit_transform(y_val_driver)
y_val_driver = keras.utils.to_categorical(y_val_driver, num_of_cancers)
y_test_driver = encoder.fit_transform(y_test_driver)
y_test_driver = keras.utils.to_categorical(y_test_driver, num_of_cancers)
y_train_motif = encoder.fit_transform(y_train_motif)
y_train_motif = keras.utils.to_categorical(y_train_motif, num_of_cancers)
y_val_motif = encoder.fit_transform(y_val_motif)
y_val_motif = keras.utils.to_categorical(y_val_motif, num_of_cancers)
y_test_motif = encoder.fit_transform(y_test_motif)
y_test_motif = keras.utils.to_categorical(y_test_motif, num_of_cancers)
validation_data = ([x_val, x_val_driver, x_val_motif], y_val)
input_size = x_train.shape[1]
input_driver_size = x_train_driver.shape[1]
input_motif_size = x_train_motif.shape[1]
num_classes = num_of_cancers
### Run Bayesian optimization
search_result = gp_minimize(func=fitness, dimensions=dimensions, acq_func='EI', n_calls=200, x0=default_paramaters, random_state=7, n_jobs=-1)
# Save Best Hyperparameters
hyps = np.asarray(search_result.x)
np.save('./crossvalidation_results/{}__fold{}_hyperparams'.format(output_name, fold), hyps, allow_pickle=False)
model = load_model(path_best_model)
# Evaluate best model on test data
result = model.evaluate(x=[x_test, x_test_driver, x_test_motif], y=y_test)
# Save best model
model.save('./crossvalidation_results/{}__fold_{}_model.keras'.format(output_name, fold))
Y_pred = model.predict([x_test, x_test_driver, x_test_motif])
y_pred = np.argmax(Y_pred, axis=1)
a = pd.Series(test_labels_names)
b = pd.Series(test_labels)
d = pd.DataFrame({'Factor': b, 'Cancer': a})
d = d.drop_duplicates()
d = d.sort_values('Factor')
## Create array of prediction probabilities
p_df = pd.DataFrame(data=Y_pred, columns=d.Cancer, index=test_labels_names)
## Generate Confusion Matrix
c_matrix = confusion_matrix(test_labels, y_pred)
c_df = pd.DataFrame(data=c_matrix, index=d.Cancer, columns=d.Cancer)
## Generate Class Report
c_report = classification_report(test_labels, y_pred, digits=3)
r, header_ = to_table(c_report)
r = pd.DataFrame(data=r, columns=header_, index=d.Cancer)
samples = []
for i in r.index:
l = len(data[data.index == i])
samples.append(l)
r['sample_size'] = samples
r.columns = [x.replace('-', '_') for x in r.columns]
r['f1_score'] = [float(x) for x in r.f1_score]
r.to_csv('./report/{}_class_report_fold{}.csv'.format(output_name, fold))
c_df.to_csv('./report/{}_confusion_matrix_fold{}.csv'.format(output_name, fold))
p_df.to_csv('./report/{}_probability_classification_{}.csv'.format(output_name, fold)) | [
"keras.models.load_model",
"keras.regularizers.l2",
"numpy.random.seed",
"numpy.argmax",
"pandas.read_csv",
"keras.layers.merge.concatenate",
"keras.models.Model",
"sklearn.metrics.classification_report",
"skopt.space.Real",
"tensorflow.ConfigProto",
"keras.layers.Input",
"tensorflow.get_defau... | [((109, 127), 'numpy.random.seed', 'np.random.seed', (['(45)'], {}), '(45)\n', (123, 127), True, 'import numpy as np\n'), ((168, 192), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(1337)'], {}), '(1337)\n', (186, 192), True, 'import tensorflow as tf\n'), ((194, 205), 'random.seed', 'rn.seed', (['(73)'], {}), '(73)\n', (201, 205), True, 'import random as rn\n'), ((254, 332), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'intra_op_parallelism_threads': '(1)', 'inter_op_parallelism_threads': '(1)'}), '(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)\n', (268, 332), True, 'import tensorflow as tf\n'), ((455, 474), 'keras.backend.set_session', 'K.set_session', (['sess'], {}), '(sess)\n', (468, 474), True, 'from keras import backend as K\n'), ((1335, 1405), 'skopt.space.Real', 'Real', ([], {'low': '(0.0001)', 'high': '(0.01)', 'prior': '"""log-uniform"""', 'name': '"""learning_rate"""'}), "(low=0.0001, high=0.01, prior='log-uniform', name='learning_rate')\n", (1339, 1405), False, 'from skopt.space import Real, Categorical, Integer\n'), ((1424, 1491), 'skopt.space.Real', 'Real', ([], {'low': '(0.001)', 'high': '(0.5)', 'prior': '"""log-uniform"""', 'name': '"""weight_decay"""'}), "(low=0.001, high=0.5, prior='log-uniform', name='weight_decay')\n", (1428, 1491), False, 'from skopt.space import Real, Categorical, Integer\n'), ((1515, 1562), 'skopt.space.Integer', 'Integer', ([], {'low': '(0)', 'high': '(5)', 'name': '"""num_dense_layers"""'}), "(low=0, high=5, name='num_dense_layers')\n", (1522, 1562), False, 'from skopt.space import Real, Categorical, Integer\n'), ((1586, 1635), 'skopt.space.Integer', 'Integer', ([], {'low': '(5)', 'high': '(1024)', 'name': '"""num_dense_nodes"""'}), "(low=5, high=1024, name='num_dense_nodes')\n", (1593, 1635), False, 'from skopt.space import Real, Categorical, Integer\n'), ((1654, 1717), 'skopt.space.Categorical', 'Categorical', ([], {'categories': "['relu', 'softplus']", 'name': '"""activation"""'}), "(categories=['relu', 'softplus'], name='activation')\n", (1665, 1717), False, 'from skopt.space import Real, Categorical, Integer\n'), ((1733, 1795), 'skopt.space.Real', 'Real', ([], {'low': '(1e-06)', 'high': '(0.5)', 'prior': '"""log-uniform"""', 'name': '"""dropout"""'}), "(low=1e-06, high=0.5, prior='log-uniform', name='dropout')\n", (1737, 1795), False, 'from skopt.space import Real, Categorical, Integer\n'), ((1833, 1907), 'skopt.space.Real', 'Real', ([], {'low': '(0.001)', 'high': '(0.5)', 'prior': '"""log-uniform"""', 'name': '"""driver_weight_decay"""'}), "(low=0.001, high=0.5, prior='log-uniform', name='driver_weight_decay')\n", (1837, 1907), False, 'from skopt.space import Real, Categorical, Integer\n'), ((1938, 1992), 'skopt.space.Integer', 'Integer', ([], {'low': '(0)', 'high': '(5)', 'name': '"""driver_num_dense_layers"""'}), "(low=0, high=5, name='driver_num_dense_layers')\n", (1945, 1992), False, 'from skopt.space import Real, Categorical, Integer\n'), ((2023, 2079), 'skopt.space.Integer', 'Integer', ([], {'low': '(5)', 'high': '(1024)', 'name': '"""driver_num_dense_nodes"""'}), "(low=5, high=1024, name='driver_num_dense_nodes')\n", (2030, 2079), False, 'from skopt.space import Real, Categorical, Integer\n'), ((2105, 2175), 'skopt.space.Categorical', 'Categorical', ([], {'categories': "['relu', 'softplus']", 'name': '"""driver_activation"""'}), "(categories=['relu', 'softplus'], name='driver_activation')\n", (2116, 2175), False, 'from skopt.space import Real, Categorical, Integer\n'), ((2198, 2267), 'skopt.space.Real', 'Real', ([], {'low': '(1e-06)', 'high': '(0.5)', 'prior': '"""log-uniform"""', 'name': '"""driver_dropout"""'}), "(low=1e-06, high=0.5, prior='log-uniform', name='driver_dropout')\n", (2202, 2267), False, 'from skopt.space import Real, Categorical, Integer\n'), ((2303, 2376), 'skopt.space.Real', 'Real', ([], {'low': '(0.001)', 'high': '(0.5)', 'prior': '"""log-uniform"""', 'name': '"""motif_weight_decay"""'}), "(low=0.001, high=0.5, prior='log-uniform', name='motif_weight_decay')\n", (2307, 2376), False, 'from skopt.space import Real, Categorical, Integer\n'), ((2406, 2459), 'skopt.space.Integer', 'Integer', ([], {'low': '(0)', 'high': '(5)', 'name': '"""motif_num_dense_layers"""'}), "(low=0, high=5, name='motif_num_dense_layers')\n", (2413, 2459), False, 'from skopt.space import Real, Categorical, Integer\n'), ((2489, 2544), 'skopt.space.Integer', 'Integer', ([], {'low': '(5)', 'high': '(1024)', 'name': '"""motif_num_dense_nodes"""'}), "(low=5, high=1024, name='motif_num_dense_nodes')\n", (2496, 2544), False, 'from skopt.space import Real, Categorical, Integer\n'), ((2569, 2638), 'skopt.space.Categorical', 'Categorical', ([], {'categories': "['relu', 'softplus']", 'name': '"""motif_activation"""'}), "(categories=['relu', 'softplus'], name='motif_activation')\n", (2580, 2638), False, 'from skopt.space import Real, Categorical, Integer\n'), ((2660, 2728), 'skopt.space.Real', 'Real', ([], {'low': '(1e-06)', 'high': '(0.5)', 'prior': '"""log-uniform"""', 'name': '"""motif_dropout"""'}), "(low=1e-06, high=0.5, prior='log-uniform', name='motif_dropout')\n", (2664, 2728), False, 'from skopt.space import Real, Categorical, Integer\n'), ((6372, 6409), 'skopt.utils.use_named_args', 'use_named_args', ([], {'dimensions': 'dimensions'}), '(dimensions=dimensions)\n', (6386, 6409), False, 'from skopt.utils import use_named_args\n'), ((4033, 4078), 'keras.layers.Input', 'Input', ([], {'shape': '(input_size,)', 'name': '"""main_input"""'}), "(shape=(input_size,), name='main_input')\n", (4038, 4078), False, 'from keras.layers import InputLayer, Input, Dropout, Dense, Embedding, LSTM\n'), ((4638, 4692), 'keras.layers.Input', 'Input', ([], {'shape': '(input_driver_size,)', 'name': '"""driver_input"""'}), "(shape=(input_driver_size,), name='driver_input')\n", (4643, 4692), False, 'from keras.layers import InputLayer, Input, Dropout, Dense, Embedding, LSTM\n'), ((5334, 5386), 'keras.layers.Input', 'Input', ([], {'shape': '(input_motif_size,)', 'name': '"""motif_input"""'}), "(shape=(input_motif_size,), name='motif_input')\n", (5339, 5386), False, 'from keras.layers import InputLayer, Input, Dropout, Dense, Embedding, LSTM\n'), ((5999, 6054), 'keras.layers.merge.concatenate', 'concatenate', (['[main_branch, driver_branch, motif_branch]'], {}), '([main_branch, driver_branch, motif_branch])\n', (6010, 6054), False, 'from keras.layers.merge import concatenate\n'), ((6148, 6170), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'learning_rate'}), '(lr=learning_rate)\n', (6152, 6170), False, 'from keras.optimizers import Adam, Adamax\n'), ((6183, 6257), 'keras.models.Model', 'Model', ([], {'inputs': '[main_input, driver_input, motif_input]', 'outputs': 'predictions'}), '(inputs=[main_input, driver_input, motif_input], outputs=predictions)\n', (6188, 6257), False, 'from keras.models import Model\n'), ((8413, 8535), 'keras.callbacks.TensorBoard', 'TensorBoard', ([], {'log_dir': 'log_dir', 'histogram_freq': '(0)', 'batch_size': '(32)', 'write_graph': '(True)', 'write_grads': '(True)', 'write_images': '(False)'}), '(log_dir=log_dir, histogram_freq=0, batch_size=32, write_graph=\n True, write_grads=True, write_images=False)\n', (8424, 8535), False, 'from keras.callbacks import TensorBoard, EarlyStopping\n'), ((9054, 9071), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (9069, 9071), True, 'from keras import backend as K\n'), ((10012, 10066), 'pandas.read_csv', 'pd.read_csv', (['"""./training_idx_pcawg.csv"""'], {'index_col': '[0]'}), "('./training_idx_pcawg.csv', index_col=[0])\n", (10023, 10066), True, 'import pandas as pd\n'), ((10326, 10343), 'pandas.concat', 'pd.concat', (['frames'], {}), '(frames)\n', (10335, 10343), True, 'import pandas as pd\n'), ((10461, 10517), 'pandas.read_csv', 'pd.read_csv', (['"""./validation_idx_pcawg.csv"""'], {'index_col': '[0]'}), "('./validation_idx_pcawg.csv', index_col=[0])\n", (10472, 10517), True, 'import pandas as pd\n'), ((10835, 10885), 'pandas.read_csv', 'pd.read_csv', (['"""./test_idx_pcawg.csv"""'], {'index_col': '[0]'}), "('./test_idx_pcawg.csv', index_col=[0])\n", (10846, 10885), True, 'import pandas as pd\n'), ((11709, 11726), 'pandas.concat', 'pd.concat', (['frames'], {}), '(frames)\n', (11718, 11726), True, 'import pandas as pd\n'), ((12817, 12834), 'pandas.concat', 'pd.concat', (['frames'], {}), '(frames)\n', (12826, 12834), True, 'import pandas as pd\n'), ((13700, 13714), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (13712, 13714), False, 'from sklearn.preprocessing import LabelBinarizer, LabelEncoder\n'), ((13926, 13976), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_test', 'num_of_cancers'], {}), '(y_test, num_of_cancers)\n', (13952, 13976), False, 'import keras\n'), ((14036, 14087), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_train', 'num_of_cancers'], {}), '(y_train, num_of_cancers)\n', (14062, 14087), False, 'import keras\n'), ((14141, 14190), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_val', 'num_of_cancers'], {}), '(y_val, num_of_cancers)\n', (14167, 14190), False, 'import keras\n'), ((14294, 14352), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_train_driver', 'num_of_cancers'], {}), '(y_train_driver, num_of_cancers)\n', (14320, 14352), False, 'import keras\n'), ((14427, 14483), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_val_driver', 'num_of_cancers'], {}), '(y_val_driver, num_of_cancers)\n', (14453, 14483), False, 'import keras\n'), ((14561, 14618), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_test_driver', 'num_of_cancers'], {}), '(y_test_driver, num_of_cancers)\n', (14587, 14618), False, 'import keras\n'), ((14697, 14754), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_train_motif', 'num_of_cancers'], {}), '(y_train_motif, num_of_cancers)\n', (14723, 14754), False, 'import keras\n'), ((14826, 14881), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_val_motif', 'num_of_cancers'], {}), '(y_val_motif, num_of_cancers)\n', (14852, 14881), False, 'import keras\n'), ((14956, 15012), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_test_motif', 'num_of_cancers'], {}), '(y_test_motif, num_of_cancers)\n', (14982, 15012), False, 'import keras\n'), ((15297, 15427), 'skopt.gp_minimize', 'gp_minimize', ([], {'func': 'fitness', 'dimensions': 'dimensions', 'acq_func': '"""EI"""', 'n_calls': '(200)', 'x0': 'default_paramaters', 'random_state': '(7)', 'n_jobs': '(-1)'}), "(func=fitness, dimensions=dimensions, acq_func='EI', n_calls=200,\n x0=default_paramaters, random_state=7, n_jobs=-1)\n", (15308, 15427), False, 'from skopt import gp_minimize, forest_minimize\n'), ((15468, 15495), 'numpy.asarray', 'np.asarray', (['search_result.x'], {}), '(search_result.x)\n', (15478, 15495), True, 'import numpy as np\n'), ((15624, 15651), 'keras.models.load_model', 'load_model', (['path_best_model'], {}), '(path_best_model)\n', (15634, 15651), False, 'from keras.models import load_model\n'), ((15966, 15991), 'numpy.argmax', 'np.argmax', (['Y_pred'], {'axis': '(1)'}), '(Y_pred, axis=1)\n', (15975, 15991), True, 'import numpy as np\n'), ((16001, 16029), 'pandas.Series', 'pd.Series', (['test_labels_names'], {}), '(test_labels_names)\n', (16010, 16029), True, 'import pandas as pd\n'), ((16038, 16060), 'pandas.Series', 'pd.Series', (['test_labels'], {}), '(test_labels)\n', (16047, 16060), True, 'import pandas as pd\n'), ((16069, 16109), 'pandas.DataFrame', 'pd.DataFrame', (["{'Factor': b, 'Cancer': a}"], {}), "({'Factor': b, 'Cancer': a})\n", (16081, 16109), True, 'import pandas as pd\n'), ((16230, 16298), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'Y_pred', 'columns': 'd.Cancer', 'index': 'test_labels_names'}), '(data=Y_pred, columns=d.Cancer, index=test_labels_names)\n', (16242, 16298), True, 'import pandas as pd\n'), ((16348, 16385), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['test_labels', 'y_pred'], {}), '(test_labels, y_pred)\n', (16364, 16385), False, 'from sklearn.metrics import confusion_matrix, classification_report\n'), ((16397, 16458), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'c_matrix', 'index': 'd.Cancer', 'columns': 'd.Cancer'}), '(data=c_matrix, index=d.Cancer, columns=d.Cancer)\n', (16409, 16458), True, 'import pandas as pd\n'), ((16504, 16556), 'sklearn.metrics.classification_report', 'classification_report', (['test_labels', 'y_pred'], {'digits': '(3)'}), '(test_labels, y_pred, digits=3)\n', (16525, 16556), False, 'from sklearn.metrics import confusion_matrix, classification_report\n'), ((16602, 16655), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'r', 'columns': 'header_', 'index': 'd.Cancer'}), '(data=r, columns=header_, index=d.Cancer)\n', (16614, 16655), True, 'import pandas as pd\n'), ((409, 431), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (429, 431), True, 'import tensorflow as tf\n'), ((3574, 3597), 'os.path.exists', 'os.path.exists', (['log_dir'], {}), '(log_dir)\n', (3588, 3597), False, 'import os\n'), ((3607, 3627), 'os.makedirs', 'os.makedirs', (['log_dir'], {}), '(log_dir)\n', (3618, 3627), False, 'import os\n'), ((4302, 4318), 'keras.layers.Dropout', 'Dropout', (['dropout'], {}), '(dropout)\n', (4309, 4318), False, 'from keras.layers import InputLayer, Input, Dropout, Dense, Embedding, LSTM\n'), ((4945, 4968), 'keras.layers.Dropout', 'Dropout', (['driver_dropout'], {}), '(driver_dropout)\n', (4952, 4968), False, 'from keras.layers import InputLayer, Input, Dropout, Dense, Embedding, LSTM\n'), ((5632, 5654), 'keras.layers.Dropout', 'Dropout', (['motif_dropout'], {}), '(motif_dropout)\n', (5639, 5654), False, 'from keras.layers import InputLayer, Input, Dropout, Dense, Embedding, LSTM\n'), ((6073, 6128), 'keras.layers.Dense', 'Dense', (['num_classes'], {'activation': '"""softmax"""', 'name': '"""output"""'}), "(num_classes, activation='softmax', name='output')\n", (6078, 6128), False, 'from keras.layers import InputLayer, Input, Dropout, Dense, Embedding, LSTM\n'), ((9283, 9296), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (9291, 9296), True, 'import numpy as np\n'), ((4588, 4604), 'keras.layers.Dropout', 'Dropout', (['dropout'], {}), '(dropout)\n', (4595, 4604), False, 'from keras.layers import InputLayer, Input, Dropout, Dense, Embedding, LSTM\n'), ((5276, 5299), 'keras.layers.Dropout', 'Dropout', (['driver_dropout'], {}), '(driver_dropout)\n', (5283, 5299), False, 'from keras.layers import InputLayer, Input, Dropout, Dense, Embedding, LSTM\n'), ((5953, 5975), 'keras.layers.Dropout', 'Dropout', (['motif_dropout'], {}), '(motif_dropout)\n', (5960, 5975), False, 'from keras.layers import InputLayer, Input, Dropout, Dense, Embedding, LSTM\n'), ((4241, 4270), 'keras.regularizers.l2', 'regularizers.l2', (['weight_decay'], {}), '(weight_decay)\n', (4256, 4270), False, 'from keras import regularizers\n'), ((4873, 4909), 'keras.regularizers.l2', 'regularizers.l2', (['driver_weight_decay'], {}), '(driver_weight_decay)\n', (4888, 4909), False, 'from keras import regularizers\n'), ((5563, 5598), 'keras.regularizers.l2', 'regularizers.l2', (['motif_weight_decay'], {}), '(motif_weight_decay)\n', (5578, 5598), False, 'from keras import regularizers\n'), ((4522, 4551), 'keras.regularizers.l2', 'regularizers.l2', (['weight_decay'], {}), '(weight_decay)\n', (4537, 4551), False, 'from keras import regularizers\n'), ((5199, 5235), 'keras.regularizers.l2', 'regularizers.l2', (['driver_weight_decay'], {}), '(driver_weight_decay)\n', (5214, 5235), False, 'from keras import regularizers\n'), ((5879, 5914), 'keras.regularizers.l2', 'regularizers.l2', (['motif_weight_decay'], {}), '(motif_weight_decay)\n', (5894, 5914), False, 'from keras import regularizers\n')] |
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
df=pd.read_csv('/Users/CoraJune/Google Drive/Pozyx/Data/lab_applications/lab_redos/atwood_machine/alpha_ema_testing/alpha0.9/atwood_0.9_4diff.csv', delimiter=',', usecols=['Time', '0x6103 Range'])
df.columns = ['Time', 'Range']
x = df['Time']
y = df['Range']
n = 25 #small n = less smoothed
fwd = pd.Series.ewm(df,span=n, adjust=True).mean()
bwd = pd.Series.ewm(df[::-1],span=n, adjust=True).mean()
filtered = np.stack(( fwd, bwd[::-1] ))
filtered = np.mean(filtered, axis=0)
plt.subplot(2,1,1)
plt.title('smoothed and raw data')
plt.plot(x,y, color = 'orange')
plt.plot(x,filtered, color='green')
plt.plot(x,fwd, color='red')
plt.plot(x[::-1],bwd, color='blue')
plt.xlabel('time')
plt.ylabel('distance')
plt.tight_layout()
df['Velocity'] = ((df['Range'] - df['Range'].shift(1)) / (df['Time'] - df['Time'].shift(1)))
y2 = df['Velocity']
m = 15
fwd2 = pd.Series.ewm(df.Velocity,span=m, adjust=True).mean()
bwd2 = pd.Series.ewm(df.Velocity[::-1],span=m, adjust=True).mean()
filtered2 = np.stack(( fwd2, bwd2[::-1] ))
filtered2 = np.mean(filtered2, axis=0)
plt.subplot(2,1,2)
plt.title('velocity smoothed and raw data')
plt.plot(x,y2, color = 'orange')
plt.plot(x,filtered2, color='green')
plt.plot(x,fwd2, color='red')
plt.plot(x[::-1],bwd2, color='blue')
plt.xlabel('time')
plt.ylabel('velocity')
plt.tight_layout()
#smoothed_velocity = ((df.filtered - df.filtered.shift(1)) / df['Time'] - df['Time'].shift(1))
#print(smoothed_velocity)
#plt.subplot (2,2,3)
#plt.title ('smoothed velocity')
#plt.plot (smoothed_velocity, color = 'orange')
plt.tight_layout()
plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"numpy.stack",
"pandas.Series.ewm",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"pandas.read_csv",
"numpy.mean",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.tight_layout"
] | [((75, 278), 'pandas.read_csv', 'pd.read_csv', (['"""/Users/CoraJune/Google Drive/Pozyx/Data/lab_applications/lab_redos/atwood_machine/alpha_ema_testing/alpha0.9/atwood_0.9_4diff.csv"""'], {'delimiter': '""","""', 'usecols': "['Time', '0x6103 Range']"}), "(\n '/Users/CoraJune/Google Drive/Pozyx/Data/lab_applications/lab_redos/atwood_machine/alpha_ema_testing/alpha0.9/atwood_0.9_4diff.csv'\n , delimiter=',', usecols=['Time', '0x6103 Range'])\n", (86, 278), True, 'import pandas as pd\n'), ((487, 513), 'numpy.stack', 'np.stack', (['(fwd, bwd[::-1])'], {}), '((fwd, bwd[::-1]))\n', (495, 513), True, 'import numpy as np\n'), ((527, 552), 'numpy.mean', 'np.mean', (['filtered'], {'axis': '(0)'}), '(filtered, axis=0)\n', (534, 552), True, 'import numpy as np\n'), ((553, 573), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (564, 573), True, 'import matplotlib.pyplot as plt\n'), ((572, 606), 'matplotlib.pyplot.title', 'plt.title', (['"""smoothed and raw data"""'], {}), "('smoothed and raw data')\n", (581, 606), True, 'import matplotlib.pyplot as plt\n'), ((607, 637), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'color': '"""orange"""'}), "(x, y, color='orange')\n", (615, 637), True, 'import matplotlib.pyplot as plt\n'), ((639, 675), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'filtered'], {'color': '"""green"""'}), "(x, filtered, color='green')\n", (647, 675), True, 'import matplotlib.pyplot as plt\n'), ((675, 704), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'fwd'], {'color': '"""red"""'}), "(x, fwd, color='red')\n", (683, 704), True, 'import matplotlib.pyplot as plt\n'), ((704, 740), 'matplotlib.pyplot.plot', 'plt.plot', (['x[::-1]', 'bwd'], {'color': '"""blue"""'}), "(x[::-1], bwd, color='blue')\n", (712, 740), True, 'import matplotlib.pyplot as plt\n'), ((740, 758), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time"""'], {}), "('time')\n", (750, 758), True, 'import matplotlib.pyplot as plt\n'), ((759, 781), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""distance"""'], {}), "('distance')\n", (769, 781), True, 'import matplotlib.pyplot as plt\n'), ((782, 800), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (798, 800), True, 'import matplotlib.pyplot as plt\n'), ((1064, 1092), 'numpy.stack', 'np.stack', (['(fwd2, bwd2[::-1])'], {}), '((fwd2, bwd2[::-1]))\n', (1072, 1092), True, 'import numpy as np\n'), ((1107, 1133), 'numpy.mean', 'np.mean', (['filtered2'], {'axis': '(0)'}), '(filtered2, axis=0)\n', (1114, 1133), True, 'import numpy as np\n'), ((1134, 1154), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (1145, 1154), True, 'import matplotlib.pyplot as plt\n'), ((1153, 1196), 'matplotlib.pyplot.title', 'plt.title', (['"""velocity smoothed and raw data"""'], {}), "('velocity smoothed and raw data')\n", (1162, 1196), True, 'import matplotlib.pyplot as plt\n'), ((1197, 1228), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y2'], {'color': '"""orange"""'}), "(x, y2, color='orange')\n", (1205, 1228), True, 'import matplotlib.pyplot as plt\n'), ((1230, 1267), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'filtered2'], {'color': '"""green"""'}), "(x, filtered2, color='green')\n", (1238, 1267), True, 'import matplotlib.pyplot as plt\n'), ((1267, 1297), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'fwd2'], {'color': '"""red"""'}), "(x, fwd2, color='red')\n", (1275, 1297), True, 'import matplotlib.pyplot as plt\n'), ((1297, 1334), 'matplotlib.pyplot.plot', 'plt.plot', (['x[::-1]', 'bwd2'], {'color': '"""blue"""'}), "(x[::-1], bwd2, color='blue')\n", (1305, 1334), True, 'import matplotlib.pyplot as plt\n'), ((1334, 1352), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time"""'], {}), "('time')\n", (1344, 1352), True, 'import matplotlib.pyplot as plt\n'), ((1353, 1375), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""velocity"""'], {}), "('velocity')\n", (1363, 1375), True, 'import matplotlib.pyplot as plt\n'), ((1376, 1394), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1392, 1394), True, 'import matplotlib.pyplot as plt\n'), ((1622, 1640), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1638, 1640), True, 'import matplotlib.pyplot as plt\n'), ((1641, 1651), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1649, 1651), True, 'import matplotlib.pyplot as plt\n'), ((374, 412), 'pandas.Series.ewm', 'pd.Series.ewm', (['df'], {'span': 'n', 'adjust': '(True)'}), '(df, span=n, adjust=True)\n', (387, 412), True, 'import pandas as pd\n'), ((425, 469), 'pandas.Series.ewm', 'pd.Series.ewm', (['df[::-1]'], {'span': 'n', 'adjust': '(True)'}), '(df[::-1], span=n, adjust=True)\n', (438, 469), True, 'import pandas as pd\n'), ((931, 978), 'pandas.Series.ewm', 'pd.Series.ewm', (['df.Velocity'], {'span': 'm', 'adjust': '(True)'}), '(df.Velocity, span=m, adjust=True)\n', (944, 978), True, 'import pandas as pd\n'), ((992, 1045), 'pandas.Series.ewm', 'pd.Series.ewm', (['df.Velocity[::-1]'], {'span': 'm', 'adjust': '(True)'}), '(df.Velocity[::-1], span=m, adjust=True)\n', (1005, 1045), True, 'import pandas as pd\n')] |
# coding=utf-8
import numpy as np
import paddle
from tb_paddle import SummaryWriter
import matplotlib
matplotlib.use('TkAgg')
writer = SummaryWriter('./log')
BATCH_SIZE = 768
train_reader = paddle.batch(
paddle.reader.shuffle(paddle.dataset.mnist.train(), buf_size=5120),
batch_size=BATCH_SIZE)
mat = np.zeros([BATCH_SIZE, 784])
for step_id, data in enumerate(train_reader()):
# type(data) : <class 'list'>
# len(data) : BATCH_SIZE
for i in range(len(data)):
# type(data[i][0]) : <class 'numpy.ndarray'>
# data[i][0].shape : (784,)
mat[i] = data[i][0]
video_data = mat.reshape((8, 96, 1, 28, 28))
writer.add_video('mnist_video_fps4', video_data)
writer.add_video('mnist_video_fps1', video_data, fps=1)
writer.close()
| [
"paddle.dataset.mnist.train",
"matplotlib.use",
"numpy.zeros",
"tb_paddle.SummaryWriter"
] | [((102, 125), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (116, 125), False, 'import matplotlib\n'), ((136, 158), 'tb_paddle.SummaryWriter', 'SummaryWriter', (['"""./log"""'], {}), "('./log')\n", (149, 158), False, 'from tb_paddle import SummaryWriter\n'), ((311, 338), 'numpy.zeros', 'np.zeros', (['[BATCH_SIZE, 784]'], {}), '([BATCH_SIZE, 784])\n', (319, 338), True, 'import numpy as np\n'), ((231, 259), 'paddle.dataset.mnist.train', 'paddle.dataset.mnist.train', ([], {}), '()\n', (257, 259), False, 'import paddle\n')] |
from baconian.envs.gym_env import make
from baconian.core.core import EnvSpec
from baconian.test.tests.set_up.setup import BaseTestCase
from baconian.common.data_pre_processing import *
import numpy as np
class TestDataPreProcessing(BaseTestCase):
def test_min_max(self):
for env in (make('Pendulum-v0'), make('Acrobot-v1'), make('HalfCheetahBulletEnv-v0')):
for sample_space in (env.observation_space, env.action_space):
sample_fn = sample_space.sample
dims = sample_space.flat_dim
try:
print("test {} with sample {} dims {}".format(env, sample_fn, dims))
# test batch scaler
min_max = BatchMinMaxScaler(dims=dims)
data_list = []
for i in range(100):
data_list.append(sample_fn())
data = min_max.process(np.array(data_list))
self.assertTrue(np.greater_equal(np.ones(dims),
data).all())
self.assertTrue(np.less_equal(np.zeros(dims),
data).all())
# test batch scaler with given range
min_max = BatchMinMaxScaler(dims=dims,
desired_range=(np.ones(dims) * -1.0,
np.ones(dims) * 5.0))
data_list = []
for i in range(100):
data_list.append(sample_fn())
data = min_max.process(np.array(data_list))
self.assertTrue(np.greater_equal(np.ones(dims) * 5.0,
data).all())
self.assertTrue(np.less_equal(np.ones(dims) * -1.0,
data).all())
self.assertEqual(np.max(data), 5.0)
self.assertEqual(np.min(data), -1.0)
data = min_max.inverse_process(data)
self.assertTrue(np.isclose(data, np.array(data_list)).all())
# test batch scaler with given range and given initial data
data_list = []
for i in range(100):
data_list.append(sample_fn())
min_max = RunningMinMaxScaler(dims=dims,
desired_range=(np.ones(dims) * -1.0,
np.ones(dims) * 5.0),
init_data=np.array(data_list))
data = min_max.process(np.array(data_list))
self.assertTrue(np.greater_equal(np.ones(dims) * 5.0,
data).all())
self.assertTrue(np.less_equal(np.ones(dims) * -1.0,
data).all())
self.assertEqual(np.max(data), 5.0)
self.assertEqual(np.min(data), -1.0)
# test batch scaler with given range and given initial min and max
data_list = []
for i in range(100):
data_list.append(sample_fn())
min_max = RunningMinMaxScaler(dims=dims,
desired_range=(np.ones(dims) * -1.0,
np.ones(dims) * 5.0),
init_min=np.min(np.array(data_list), axis=0),
init_max=np.max(np.array(data_list), axis=0))
data = min_max.process(np.array(data_list))
self.assertTrue(np.greater_equal(np.ones(dims) * 5.0,
data).all())
self.assertTrue(np.less_equal(np.ones(dims) * -1.0,
data).all())
self.assertEqual(np.max(data), 5.0)
self.assertEqual(np.min(data), -1.0)
# test update function by a larger range of data
pre_min = np.min(np.array(data_list), axis=0)
pre_max = np.max(np.array(data_list), axis=0)
data_list = np.array(data_list) * 2.0
min_max.update_scaler(data_list)
self.assertTrue(np.equal(pre_min * 2.0, min_max._min).all())
self.assertTrue(np.equal(pre_max * 2.0, min_max._max).all())
except ShapeNotCompatibleError as e:
from baconian.common.spaces import Box
if isinstance(sample_space, Box):
raise ValueError
else:
pass
def test_standard_scaler(self):
for env in (make('Pendulum-v0'), make('Acrobot-v1'), make('HalfCheetahBulletEnv-v0')):
for sample_space in (env.observation_space, env.action_space):
sample_fn = sample_space.sample
dims = sample_space.flat_dim
try:
# test batch standard scaler
standard_scaler = BatchStandardScaler(dims=dims)
data_list = []
for i in range(100):
data_list.append(sample_fn())
data = standard_scaler.process(np.array(data_list))
self.assertTrue(np.isclose(np.mean(data, axis=0), 0.0).all())
# TODO a theoretical bound should be given
# self.assertTrue(np.isclose(np.var(data, axis=0), 1.0, atol=0.04).all())
data = standard_scaler.inverse_process(data)
self.assertTrue(np.isclose(data, np.array(data_list)).all())
# test running standard scaler
standard_scaler = RunningStandardScaler(dims=dims)
data_list = []
for i in range(100):
data_list.append(sample_fn())
standard_scaler.update_scaler(np.array(data_list))
self.assertEqual(standard_scaler._data_count, 100)
data = standard_scaler.process(np.array(data_list))
self.assertTrue(np.isclose(np.mean(data, axis=0), 0.0).all())
# TODO a theoretical bound should be given
# self.assertTrue(np.isclose(np.var(data, axis=0), 1.0, atol=0.04).all())
# test update function
new_data_list = []
for i in range(100):
new_data_list.append(sample_fn())
standard_scaler.update_scaler(np.array(new_data_list))
self.assertEqual(standard_scaler._data_count, 200)
data_list += new_data_list
data = standard_scaler.process(np.array(data_list))
self.assertTrue(np.isclose(np.mean(data, axis=0), 0.0).all())
# TODO a theoretical bound should be given
# self.assertTrue(np.isclose(np.var(data, axis=0), 1.0, atol=0.04).all())
# test running scaler with given data
data_list = []
for i in range(100):
data_list.append(sample_fn())
standard_scaler = RunningStandardScaler(dims=dims,
init_data=np.array(data_list))
self.assertEqual(standard_scaler._data_count, 100)
data = standard_scaler.process(np.array(data_list))
self.assertTrue(np.isclose(np.mean(data, axis=0), 0.0).all())
# TODO a theoretical bound should be given
# self.assertTrue(np.isclose(np.var(data, axis=0), 1.0, atol=0.04).all())
# test update of running scaler with given data
new_data_list = []
for i in range(100):
new_data_list.append(sample_fn())
standard_scaler.update_scaler(np.array(new_data_list))
self.assertEqual(standard_scaler._data_count, 200)
data_list += new_data_list
data = standard_scaler.process(np.array(data_list))
self.assertTrue(np.isclose(np.mean(data, axis=0), 0.0).all())
# TODO a theoretical bound should be given
# self.assertTrue(np.isclose(np.var(data, axis=0), 1.0, atol=0.04).all())
# test running scaler with given initial mean, var.
data_list = []
for i in range(100):
data_list.append(sample_fn())
standard_scaler = RunningStandardScaler(dims=dims,
init_mean=np.mean(data_list, axis=0),
init_var=np.var(data_list, axis=0),
init_mean_var_data_count=100)
self.assertEqual(standard_scaler._data_count, 100)
data = standard_scaler.process(np.array(data_list))
self.assertTrue(np.isclose(np.mean(data, axis=0), 0.0).all())
# TODO a theoretical bound should be given
# self.assertTrue(np.isclose(np.var(data, axis=0), 1.0, atol=0.04).all())
new_data_list = []
for i in range(100):
new_data_list.append(sample_fn())
standard_scaler.update_scaler(np.array(new_data_list))
self.assertEqual(standard_scaler._data_count, 200)
data_list += new_data_list
data = standard_scaler.process(np.array(data_list))
self.assertTrue(np.isclose(np.mean(data, axis=0), 0.0).all())
# TODO a theoretical bound should be given
# self.assertTrue(np.isclose(np.var(data, axis=0), 1.0, atol=0.04).all())
except ShapeNotCompatibleError as e:
from baconian.common.spaces import Box
if isinstance(sample_space, Box):
raise ValueError
else:
pass
| [
"baconian.envs.gym_env.make",
"numpy.zeros",
"numpy.ones",
"numpy.equal",
"numpy.max",
"numpy.mean",
"numpy.array",
"numpy.min",
"numpy.var"
] | [((298, 317), 'baconian.envs.gym_env.make', 'make', (['"""Pendulum-v0"""'], {}), "('Pendulum-v0')\n", (302, 317), False, 'from baconian.envs.gym_env import make\n'), ((319, 337), 'baconian.envs.gym_env.make', 'make', (['"""Acrobot-v1"""'], {}), "('Acrobot-v1')\n", (323, 337), False, 'from baconian.envs.gym_env import make\n'), ((339, 370), 'baconian.envs.gym_env.make', 'make', (['"""HalfCheetahBulletEnv-v0"""'], {}), "('HalfCheetahBulletEnv-v0')\n", (343, 370), False, 'from baconian.envs.gym_env import make\n'), ((5087, 5106), 'baconian.envs.gym_env.make', 'make', (['"""Pendulum-v0"""'], {}), "('Pendulum-v0')\n", (5091, 5106), False, 'from baconian.envs.gym_env import make\n'), ((5108, 5126), 'baconian.envs.gym_env.make', 'make', (['"""Acrobot-v1"""'], {}), "('Acrobot-v1')\n", (5112, 5126), False, 'from baconian.envs.gym_env import make\n'), ((5128, 5159), 'baconian.envs.gym_env.make', 'make', (['"""HalfCheetahBulletEnv-v0"""'], {}), "('HalfCheetahBulletEnv-v0')\n", (5132, 5159), False, 'from baconian.envs.gym_env import make\n'), ((923, 942), 'numpy.array', 'np.array', (['data_list'], {}), '(data_list)\n', (931, 942), True, 'import numpy as np\n'), ((1666, 1685), 'numpy.array', 'np.array', (['data_list'], {}), '(data_list)\n', (1674, 1685), True, 'import numpy as np\n'), ((1999, 2011), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (2005, 2011), True, 'import numpy as np\n'), ((2055, 2067), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (2061, 2067), True, 'import numpy as np\n'), ((2785, 2804), 'numpy.array', 'np.array', (['data_list'], {}), '(data_list)\n', (2793, 2804), True, 'import numpy as np\n'), ((3118, 3130), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (3124, 3130), True, 'import numpy as np\n'), ((3174, 3186), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (3180, 3186), True, 'import numpy as np\n'), ((3884, 3903), 'numpy.array', 'np.array', (['data_list'], {}), '(data_list)\n', (3892, 3903), True, 'import numpy as np\n'), ((4217, 4229), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (4223, 4229), True, 'import numpy as np\n'), ((4273, 4285), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (4279, 4285), True, 'import numpy as np\n'), ((4400, 4419), 'numpy.array', 'np.array', (['data_list'], {}), '(data_list)\n', (4408, 4419), True, 'import numpy as np\n'), ((4466, 4485), 'numpy.array', 'np.array', (['data_list'], {}), '(data_list)\n', (4474, 4485), True, 'import numpy as np\n'), ((4527, 4546), 'numpy.array', 'np.array', (['data_list'], {}), '(data_list)\n', (4535, 4546), True, 'import numpy as np\n'), ((5650, 5669), 'numpy.array', 'np.array', (['data_list'], {}), '(data_list)\n', (5658, 5669), True, 'import numpy as np\n'), ((6359, 6378), 'numpy.array', 'np.array', (['data_list'], {}), '(data_list)\n', (6367, 6378), True, 'import numpy as np\n'), ((6502, 6521), 'numpy.array', 'np.array', (['data_list'], {}), '(data_list)\n', (6510, 6521), True, 'import numpy as np\n'), ((6994, 7017), 'numpy.array', 'np.array', (['new_data_list'], {}), '(new_data_list)\n', (7002, 7017), True, 'import numpy as np\n'), ((7189, 7208), 'numpy.array', 'np.array', (['data_list'], {}), '(data_list)\n', (7197, 7208), True, 'import numpy as np\n'), ((7924, 7943), 'numpy.array', 'np.array', (['data_list'], {}), '(data_list)\n', (7932, 7943), True, 'import numpy as np\n'), ((8440, 8463), 'numpy.array', 'np.array', (['new_data_list'], {}), '(new_data_list)\n', (8448, 8463), True, 'import numpy as np\n'), ((8635, 8654), 'numpy.array', 'np.array', (['data_list'], {}), '(data_list)\n', (8643, 8654), True, 'import numpy as np\n'), ((9577, 9596), 'numpy.array', 'np.array', (['data_list'], {}), '(data_list)\n', (9585, 9596), True, 'import numpy as np\n'), ((10026, 10049), 'numpy.array', 'np.array', (['new_data_list'], {}), '(new_data_list)\n', (10034, 10049), True, 'import numpy as np\n'), ((10221, 10240), 'numpy.array', 'np.array', (['data_list'], {}), '(data_list)\n', (10229, 10240), True, 'import numpy as np\n'), ((2720, 2739), 'numpy.array', 'np.array', (['data_list'], {}), '(data_list)\n', (2728, 2739), True, 'import numpy as np\n'), ((7780, 7799), 'numpy.array', 'np.array', (['data_list'], {}), '(data_list)\n', (7788, 7799), True, 'import numpy as np\n'), ((9240, 9266), 'numpy.mean', 'np.mean', (['data_list'], {'axis': '(0)'}), '(data_list, axis=0)\n', (9247, 9266), True, 'import numpy as np\n'), ((9337, 9362), 'numpy.var', 'np.var', (['data_list'], {'axis': '(0)'}), '(data_list, axis=0)\n', (9343, 9362), True, 'import numpy as np\n'), ((3714, 3733), 'numpy.array', 'np.array', (['data_list'], {}), '(data_list)\n', (3722, 3733), True, 'import numpy as np\n'), ((3810, 3829), 'numpy.array', 'np.array', (['data_list'], {}), '(data_list)\n', (3818, 3829), True, 'import numpy as np\n'), ((4642, 4679), 'numpy.equal', 'np.equal', (['(pre_min * 2.0)', 'min_max._min'], {}), '(pre_min * 2.0, min_max._min)\n', (4650, 4679), True, 'import numpy as np\n'), ((4723, 4760), 'numpy.equal', 'np.equal', (['(pre_max * 2.0)', 'min_max._max'], {}), '(pre_max * 2.0, min_max._max)\n', (4731, 4760), True, 'import numpy as np\n'), ((997, 1010), 'numpy.ones', 'np.ones', (['dims'], {}), '(dims)\n', (1004, 1010), True, 'import numpy as np\n'), ((1128, 1142), 'numpy.zeros', 'np.zeros', (['dims'], {}), '(dims)\n', (1136, 1142), True, 'import numpy as np\n'), ((1386, 1399), 'numpy.ones', 'np.ones', (['dims'], {}), '(dims)\n', (1393, 1399), True, 'import numpy as np\n'), ((1471, 1484), 'numpy.ones', 'np.ones', (['dims'], {}), '(dims)\n', (1478, 1484), True, 'import numpy as np\n'), ((2185, 2204), 'numpy.array', 'np.array', (['data_list'], {}), '(data_list)\n', (2193, 2204), True, 'import numpy as np\n'), ((2551, 2564), 'numpy.ones', 'np.ones', (['dims'], {}), '(dims)\n', (2558, 2564), True, 'import numpy as np\n'), ((2638, 2651), 'numpy.ones', 'np.ones', (['dims'], {}), '(dims)\n', (2645, 2651), True, 'import numpy as np\n'), ((3539, 3552), 'numpy.ones', 'np.ones', (['dims'], {}), '(dims)\n', (3546, 3552), True, 'import numpy as np\n'), ((3626, 3639), 'numpy.ones', 'np.ones', (['dims'], {}), '(dims)\n', (3633, 3639), True, 'import numpy as np\n'), ((5718, 5739), 'numpy.mean', 'np.mean', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (5725, 5739), True, 'import numpy as np\n'), ((6028, 6047), 'numpy.array', 'np.array', (['data_list'], {}), '(data_list)\n', (6036, 6047), True, 'import numpy as np\n'), ((6570, 6591), 'numpy.mean', 'np.mean', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (6577, 6591), True, 'import numpy as np\n'), ((7257, 7278), 'numpy.mean', 'np.mean', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (7264, 7278), True, 'import numpy as np\n'), ((7992, 8013), 'numpy.mean', 'np.mean', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (7999, 8013), True, 'import numpy as np\n'), ((8703, 8724), 'numpy.mean', 'np.mean', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (8710, 8724), True, 'import numpy as np\n'), ((9645, 9666), 'numpy.mean', 'np.mean', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (9652, 9666), True, 'import numpy as np\n'), ((10289, 10310), 'numpy.mean', 'np.mean', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (10296, 10310), True, 'import numpy as np\n'), ((1740, 1753), 'numpy.ones', 'np.ones', (['dims'], {}), '(dims)\n', (1747, 1753), True, 'import numpy as np\n'), ((1877, 1890), 'numpy.ones', 'np.ones', (['dims'], {}), '(dims)\n', (1884, 1890), True, 'import numpy as np\n'), ((2859, 2872), 'numpy.ones', 'np.ones', (['dims'], {}), '(dims)\n', (2866, 2872), True, 'import numpy as np\n'), ((2996, 3009), 'numpy.ones', 'np.ones', (['dims'], {}), '(dims)\n', (3003, 3009), True, 'import numpy as np\n'), ((3958, 3971), 'numpy.ones', 'np.ones', (['dims'], {}), '(dims)\n', (3965, 3971), True, 'import numpy as np\n'), ((4095, 4108), 'numpy.ones', 'np.ones', (['dims'], {}), '(dims)\n', (4102, 4108), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import cv2
import numpy as np
from tensorflow.keras.models import load_model
from flask import Flask, Response, request, g
from flask_cors import CORS
from camera_opencv import Camera
import time
import os
from collections import deque
app = Flask(__name__)
CORS(app, resources={r'/*': {'origins': '*'}})
@app.after_request
def add_header(response):
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Headers'] = 'Access-Control-Allow-Headers, Origin, X-Requested-With, Content-Type, Accept, Authorization'
response.headers['Access-Control-Allow-Methods'] = 'GET, POST, PUT, DELETE, OPTIONS, HEAD'
response.headers['Access-Control-Expose-Headers'] = '*'
return response
def preprocess_image(img, size):
"""
-TO DO: Normalize and resize the image to feed into the model
-Inputs/ Arguments:
img: extracted image from the video with a size of (634 x 640)
size: same as the size (width, height) as input size of the model
-Outputs/ Returns:
img: Normalized data (Z-score) between [0,1]
"""
img = cv2.resize(img, (size, size))
img = img/255
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
img = (img - mean) / std
return img
def gen(camera):
"""Video streaming generator function."""
class_names = ['away', 'clean_tray', 'count_pills', 'drawer', 'scan_papers_computer']
model = load_model("./models/C3D_tf_vgg_test_ines_3.hdf5")
frame_index = 0
size = 224
Q = deque(maxlen=128)
font = cv2.FONT_HERSHEY_TRIPLEX
start_time = time.time()
while True:
frame = camera.get_frame()
img = preprocess_image(frame, size)
pred_probs = model.predict(np.expand_dims(img, axis=0))
Q.append(pred_probs)
rolling_probs = np.array(Q).mean(axis=0)
rolling_prob = max(rolling_probs[0])
index = np.argmax(rolling_probs, axis=1)[0]
pred_cls = class_names[index]
frame_index += 1
current_time = time.time()
fps = frame_index / (current_time - start_time)
classTxt = " Class: " + pred_cls
fpsTxt = " FPS: " + "{:.2f}".format(fps+3)
cv2.putText(frame, classTxt, (0, 50), font, 1, (0, 0, 255), 1) # text,coor # text,coordinate,font,size of text,color,thickness of font
cv2.putText(frame, fpsTxt, (0, 100), font, 1, (0, 0, 255), 1)
frame = cv2.imencode('.jpg', frame)[1].tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
@app.route('/streaming')
def streaming():
print("Start streaming*********")
"""Video streaming route. Put this in the src attribute of an img tag."""
return Response(gen(Camera()),
mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == '__main__':
print("Start flask*************")
app.run(host='0.0.0.0', debug=True, threaded=True)
| [
"tensorflow.keras.models.load_model",
"cv2.putText",
"numpy.argmax",
"flask_cors.CORS",
"flask.Flask",
"collections.deque",
"numpy.expand_dims",
"time.time",
"numpy.array",
"cv2.imencode",
"camera_opencv.Camera",
"cv2.resize"
] | [((268, 283), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (273, 283), False, 'from flask import Flask, Response, request, g\n'), ((284, 329), 'flask_cors.CORS', 'CORS', (['app'], {'resources': "{'/*': {'origins': '*'}}"}), "(app, resources={'/*': {'origins': '*'}})\n", (288, 329), False, 'from flask_cors import CORS\n'), ((1122, 1151), 'cv2.resize', 'cv2.resize', (['img', '(size, size)'], {}), '(img, (size, size))\n', (1132, 1151), False, 'import cv2\n'), ((1181, 1212), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (1189, 1212), True, 'import numpy as np\n'), ((1223, 1254), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (1231, 1254), True, 'import numpy as np\n'), ((1470, 1520), 'tensorflow.keras.models.load_model', 'load_model', (['"""./models/C3D_tf_vgg_test_ines_3.hdf5"""'], {}), "('./models/C3D_tf_vgg_test_ines_3.hdf5')\n", (1480, 1520), False, 'from tensorflow.keras.models import load_model\n'), ((1564, 1581), 'collections.deque', 'deque', ([], {'maxlen': '(128)'}), '(maxlen=128)\n', (1569, 1581), False, 'from collections import deque\n'), ((1635, 1646), 'time.time', 'time.time', ([], {}), '()\n', (1644, 1646), False, 'import time\n'), ((2067, 2078), 'time.time', 'time.time', ([], {}), '()\n', (2076, 2078), False, 'import time\n'), ((2240, 2302), 'cv2.putText', 'cv2.putText', (['frame', 'classTxt', '(0, 50)', 'font', '(1)', '(0, 0, 255)', '(1)'], {}), '(frame, classTxt, (0, 50), font, 1, (0, 0, 255), 1)\n', (2251, 2302), False, 'import cv2\n'), ((2385, 2446), 'cv2.putText', 'cv2.putText', (['frame', 'fpsTxt', '(0, 100)', 'font', '(1)', '(0, 0, 255)', '(1)'], {}), '(frame, fpsTxt, (0, 100), font, 1, (0, 0, 255), 1)\n', (2396, 2446), False, 'import cv2\n'), ((1777, 1804), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (1791, 1804), True, 'import numpy as np\n'), ((1945, 1977), 'numpy.argmax', 'np.argmax', (['rolling_probs'], {'axis': '(1)'}), '(rolling_probs, axis=1)\n', (1954, 1977), True, 'import numpy as np\n'), ((2801, 2809), 'camera_opencv.Camera', 'Camera', ([], {}), '()\n', (2807, 2809), False, 'from camera_opencv import Camera\n'), ((1859, 1870), 'numpy.array', 'np.array', (['Q'], {}), '(Q)\n', (1867, 1870), True, 'import numpy as np\n'), ((2463, 2490), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'frame'], {}), "('.jpg', frame)\n", (2475, 2490), False, 'import cv2\n')] |
"""
We consider a randomly generated svf v in the Lie algebra.
We then consider its inverse in the lie Algebra: -v
The composition in the Lie algebra does not exist. But we apply the numerical method anyway to see what may happen.
v dot (-v) and (-v) dot v does not return the approximated identity (in green).
Afterwards we compose exp(v) and exp(-v) to see the approximated identity with the correct composition (again in green).
"""
import matplotlib.pyplot as plt
import numpy as np
from calie.operations import lie_exp
from calie.visualisations.fields import fields_at_the_window
from calie.fields import generate as gen
from calie.fields import compose as cp
if __name__ == '__main__':
# generate two vector fields
omega = (20, 20)
svf_v = gen.generate_random(omega, parameters=(2, 2))
svf_v_inv = np.copy(-1 * svf_v)
# we wrongly perform the composition of stationary velocity fields. The outcome is not the identity.
v_o_v_inv_alg = cp.lagrangian_dot_lagrangian(svf_v, svf_v_inv)
v_inv_o_v_alg = cp.lagrangian_dot_lagrangian(svf_v_inv, svf_v)
# we correctly perform the composition after exponentiating the SVF in the Lie group.
# The outcome is the identity, as expected.
l_exp = lie_exp.LieExp()
disp_v = l_exp.scaling_and_squaring(svf_v)
disp_v_inv = l_exp.scaling_and_squaring(svf_v_inv)
v_o_v_inv_grp = cp.lagrangian_dot_lagrangian(disp_v, disp_v_inv)
f_inv_o_f_grp = cp.lagrangian_dot_lagrangian(disp_v_inv, disp_v)
# see svf map the svfs
fields_at_the_window.see_field(svf_v, fig_tag=77)
fields_at_the_window.see_field(svf_v_inv, fig_tag=77, input_color='r', title_input='2 SVF: v blue, -v red ')
fields_at_the_window.see_2_fields(svf_v, svf_v, fig_tag=78, window_title_input='Improper composition of SVF')
fields_at_the_window.see_2_fields(svf_v_inv, svf_v_inv, fig_tag=78, input_color='r')
fields_at_the_window.see_2_fields(v_inv_o_v_alg, v_o_v_inv_alg, fig_tag=78, input_color='g',
title_input_0='(-v o v)', title_input_1='(v o -v)')
fields_at_the_window.see_2_fields(disp_v, disp_v, fig_tag=79,
window_title_input='Properly computed composition of SVF after exp')
fields_at_the_window.see_2_fields(disp_v_inv, disp_v_inv, fig_tag=79, input_color='r')
fields_at_the_window.see_2_fields(f_inv_o_f_grp, v_o_v_inv_grp, fig_tag=79, input_color='g',
title_input_0='(exp(-v) o exp(v))', title_input_1='(exp(v) o exp(-v))')
plt.show()
| [
"matplotlib.pyplot.show",
"numpy.copy",
"calie.fields.generate.generate_random",
"calie.visualisations.fields.fields_at_the_window.see_field",
"calie.operations.lie_exp.LieExp",
"calie.visualisations.fields.fields_at_the_window.see_2_fields",
"calie.fields.compose.lagrangian_dot_lagrangian"
] | [((770, 815), 'calie.fields.generate.generate_random', 'gen.generate_random', (['omega'], {'parameters': '(2, 2)'}), '(omega, parameters=(2, 2))\n', (789, 815), True, 'from calie.fields import generate as gen\n'), ((832, 851), 'numpy.copy', 'np.copy', (['(-1 * svf_v)'], {}), '(-1 * svf_v)\n', (839, 851), True, 'import numpy as np\n'), ((978, 1024), 'calie.fields.compose.lagrangian_dot_lagrangian', 'cp.lagrangian_dot_lagrangian', (['svf_v', 'svf_v_inv'], {}), '(svf_v, svf_v_inv)\n', (1006, 1024), True, 'from calie.fields import compose as cp\n'), ((1045, 1091), 'calie.fields.compose.lagrangian_dot_lagrangian', 'cp.lagrangian_dot_lagrangian', (['svf_v_inv', 'svf_v'], {}), '(svf_v_inv, svf_v)\n', (1073, 1091), True, 'from calie.fields import compose as cp\n'), ((1244, 1260), 'calie.operations.lie_exp.LieExp', 'lie_exp.LieExp', ([], {}), '()\n', (1258, 1260), False, 'from calie.operations import lie_exp\n'), ((1385, 1433), 'calie.fields.compose.lagrangian_dot_lagrangian', 'cp.lagrangian_dot_lagrangian', (['disp_v', 'disp_v_inv'], {}), '(disp_v, disp_v_inv)\n', (1413, 1433), True, 'from calie.fields import compose as cp\n'), ((1454, 1502), 'calie.fields.compose.lagrangian_dot_lagrangian', 'cp.lagrangian_dot_lagrangian', (['disp_v_inv', 'disp_v'], {}), '(disp_v_inv, disp_v)\n', (1482, 1502), True, 'from calie.fields import compose as cp\n'), ((1535, 1584), 'calie.visualisations.fields.fields_at_the_window.see_field', 'fields_at_the_window.see_field', (['svf_v'], {'fig_tag': '(77)'}), '(svf_v, fig_tag=77)\n', (1565, 1584), False, 'from calie.visualisations.fields import fields_at_the_window\n'), ((1589, 1701), 'calie.visualisations.fields.fields_at_the_window.see_field', 'fields_at_the_window.see_field', (['svf_v_inv'], {'fig_tag': '(77)', 'input_color': '"""r"""', 'title_input': '"""2 SVF: v blue, -v red """'}), "(svf_v_inv, fig_tag=77, input_color='r',\n title_input='2 SVF: v blue, -v red ')\n", (1619, 1701), False, 'from calie.visualisations.fields import fields_at_the_window\n'), ((1703, 1816), 'calie.visualisations.fields.fields_at_the_window.see_2_fields', 'fields_at_the_window.see_2_fields', (['svf_v', 'svf_v'], {'fig_tag': '(78)', 'window_title_input': '"""Improper composition of SVF"""'}), "(svf_v, svf_v, fig_tag=78,\n window_title_input='Improper composition of SVF')\n", (1736, 1816), False, 'from calie.visualisations.fields import fields_at_the_window\n'), ((1817, 1905), 'calie.visualisations.fields.fields_at_the_window.see_2_fields', 'fields_at_the_window.see_2_fields', (['svf_v_inv', 'svf_v_inv'], {'fig_tag': '(78)', 'input_color': '"""r"""'}), "(svf_v_inv, svf_v_inv, fig_tag=78,\n input_color='r')\n", (1850, 1905), False, 'from calie.visualisations.fields import fields_at_the_window\n'), ((1906, 2054), 'calie.visualisations.fields.fields_at_the_window.see_2_fields', 'fields_at_the_window.see_2_fields', (['v_inv_o_v_alg', 'v_o_v_inv_alg'], {'fig_tag': '(78)', 'input_color': '"""g"""', 'title_input_0': '"""(-v o v)"""', 'title_input_1': '"""(v o -v)"""'}), "(v_inv_o_v_alg, v_o_v_inv_alg, fig_tag=78,\n input_color='g', title_input_0='(-v o v)', title_input_1='(v o -v)')\n", (1939, 2054), False, 'from calie.visualisations.fields import fields_at_the_window\n'), ((2094, 2228), 'calie.visualisations.fields.fields_at_the_window.see_2_fields', 'fields_at_the_window.see_2_fields', (['disp_v', 'disp_v'], {'fig_tag': '(79)', 'window_title_input': '"""Properly computed composition of SVF after exp"""'}), "(disp_v, disp_v, fig_tag=79,\n window_title_input='Properly computed composition of SVF after exp')\n", (2127, 2228), False, 'from calie.visualisations.fields import fields_at_the_window\n'), ((2267, 2357), 'calie.visualisations.fields.fields_at_the_window.see_2_fields', 'fields_at_the_window.see_2_fields', (['disp_v_inv', 'disp_v_inv'], {'fig_tag': '(79)', 'input_color': '"""r"""'}), "(disp_v_inv, disp_v_inv, fig_tag=79,\n input_color='r')\n", (2300, 2357), False, 'from calie.visualisations.fields import fields_at_the_window\n'), ((2358, 2531), 'calie.visualisations.fields.fields_at_the_window.see_2_fields', 'fields_at_the_window.see_2_fields', (['f_inv_o_f_grp', 'v_o_v_inv_grp'], {'fig_tag': '(79)', 'input_color': '"""g"""', 'title_input_0': '"""(exp(-v) o exp(v))"""', 'title_input_1': '"""(exp(v) o exp(-v))"""'}), "(f_inv_o_f_grp, v_o_v_inv_grp, fig_tag=79,\n input_color='g', title_input_0='(exp(-v) o exp(v))', title_input_1=\n '(exp(v) o exp(-v))')\n", (2391, 2531), False, 'from calie.visualisations.fields import fields_at_the_window\n'), ((2566, 2576), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2574, 2576), True, 'import matplotlib.pyplot as plt\n')] |
import time
import cv2
import numpy as np
from test.screenUtils import grabscreen
def make_coordinates(image, line_parameters):
slope, intercept = line_parameters
y1 = image.shape[0]
y2 = int(y1 * (33 / 80))
x1 = int((y1 - intercept) / slope)
x2 = int((y2 - intercept) / slope)
return np.array([x1, y1, x2, y2])
def average_slope_intercept(image, lines):
left_fit = []
right_fit = []
for line in lines:
x1, y1, x2, y2 = line.reshape(4)
parameters = np.polyfit((x1, x2), (y1, y2), 1)
slope = parameters[0]
intercept = parameters[1]
if slope < 0:
left_fit.append((slope, intercept))
else:
right_fit.append((slope, intercept))
left_fit_average = np.average(left_fit, axis=0)
right_fit_average = np.average(right_fit, axis=0)
try:
left_line = make_coordinates(image, left_fit_average)
right_line = make_coordinates(image, right_fit_average)
return np.array([left_line, right_line])
except Exception as e:
print(e, '\n')
return None
def canny(image):
grey = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
blur = cv2.GaussianBlur(grey, (5, 5), 0)
canny = cv2.Canny(blur, 50, 150)
return canny
def display_lines(image, lines):
line_image = np.zeros_like(image)
if lines is not None:
for x1, y1, x2, y2 in lines:
cv2.line(line_image, (x1, y1), (x2, y2), (0, 255, 0), 10)
return line_image
def region_of_interest(image):
polygons = np.array([
[(500, 570), (1340, 570), (1000, 430)]
])
mask = np.zeros_like(image)
cv2.fillPoly(mask, polygons, 255)
masked_image = cv2.bitwise_and(image, mask)
return masked_image
# image = cv2.imread('test_image.jpg')
# lane_image = np.copy(image)
# canny_image = canny(lane_image)
# cropped_image = region_of_interest(canny_image)
# lines = cv2.HoughLinesP(cropped_image, 3, np.pi/180, 94, np.array([]), minLineLength=10, maxLineGap=50)
# averaged_lines = average_slope_intercept(lane_image, lines)
# line_image = display_lines(lane_image, averaged_lines)
# combo_image = cv2.addWeighted(lane_image, 0.8, line_image, 0.5, 1)
# cv2.imshow("result", combo_image)
# cv2.waitKey(0)
time.sleep(4)
grabscreen.printWindow()
while True:
try:
frame = grabscreen.screenshot("Euro Truck Simulator 2")
canny_image = canny(frame)
cropped_image = region_of_interest(canny_image)
lines = cv2.HoughLinesP(cropped_image, 3, np.pi / 180, 94, np.array([]), minLineLength=10, maxLineGap=50)
averaged_lines = average_slope_intercept(frame, lines)
line_image = display_lines(frame, averaged_lines)
combo_image = cv2.addWeighted(frame, 0.8, line_image, 0.5, 1)
cv2.imshow("result", line_image)
cv2.VideoCapture
if cv2.waitKey(1) == ord('q'):
break
except Exception as e:
print(e)
cv2.destroyAllWindows() | [
"cv2.line",
"cv2.GaussianBlur",
"cv2.Canny",
"numpy.zeros_like",
"numpy.average",
"cv2.bitwise_and",
"numpy.polyfit",
"cv2.cvtColor",
"cv2.waitKey",
"test.screenUtils.grabscreen.printWindow",
"cv2.imshow",
"time.sleep",
"cv2.fillPoly",
"cv2.addWeighted",
"numpy.array",
"test.screenUtil... | [((2254, 2267), 'time.sleep', 'time.sleep', (['(4)'], {}), '(4)\n', (2264, 2267), False, 'import time\n'), ((2268, 2292), 'test.screenUtils.grabscreen.printWindow', 'grabscreen.printWindow', ([], {}), '()\n', (2290, 2292), False, 'from test.screenUtils import grabscreen\n'), ((2941, 2964), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2962, 2964), False, 'import cv2\n'), ((313, 339), 'numpy.array', 'np.array', (['[x1, y1, x2, y2]'], {}), '([x1, y1, x2, y2])\n', (321, 339), True, 'import numpy as np\n'), ((761, 789), 'numpy.average', 'np.average', (['left_fit'], {'axis': '(0)'}), '(left_fit, axis=0)\n', (771, 789), True, 'import numpy as np\n'), ((814, 843), 'numpy.average', 'np.average', (['right_fit'], {'axis': '(0)'}), '(right_fit, axis=0)\n', (824, 843), True, 'import numpy as np\n'), ((1129, 1168), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2GRAY'], {}), '(image, cv2.COLOR_RGB2GRAY)\n', (1141, 1168), False, 'import cv2\n'), ((1180, 1213), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['grey', '(5, 5)', '(0)'], {}), '(grey, (5, 5), 0)\n', (1196, 1213), False, 'import cv2\n'), ((1226, 1250), 'cv2.Canny', 'cv2.Canny', (['blur', '(50)', '(150)'], {}), '(blur, 50, 150)\n', (1235, 1250), False, 'import cv2\n'), ((1320, 1340), 'numpy.zeros_like', 'np.zeros_like', (['image'], {}), '(image)\n', (1333, 1340), True, 'import numpy as np\n'), ((1544, 1594), 'numpy.array', 'np.array', (['[[(500, 570), (1340, 570), (1000, 430)]]'], {}), '([[(500, 570), (1340, 570), (1000, 430)]])\n', (1552, 1594), True, 'import numpy as np\n'), ((1620, 1640), 'numpy.zeros_like', 'np.zeros_like', (['image'], {}), '(image)\n', (1633, 1640), True, 'import numpy as np\n'), ((1645, 1678), 'cv2.fillPoly', 'cv2.fillPoly', (['mask', 'polygons', '(255)'], {}), '(mask, polygons, 255)\n', (1657, 1678), False, 'import cv2\n'), ((1698, 1726), 'cv2.bitwise_and', 'cv2.bitwise_and', (['image', 'mask'], {}), '(image, mask)\n', (1713, 1726), False, 'import cv2\n'), ((507, 540), 'numpy.polyfit', 'np.polyfit', (['(x1, x2)', '(y1, y2)', '(1)'], {}), '((x1, x2), (y1, y2), 1)\n', (517, 540), True, 'import numpy as np\n'), ((994, 1027), 'numpy.array', 'np.array', (['[left_line, right_line]'], {}), '([left_line, right_line])\n', (1002, 1027), True, 'import numpy as np\n'), ((2330, 2377), 'test.screenUtils.grabscreen.screenshot', 'grabscreen.screenshot', (['"""Euro Truck Simulator 2"""'], {}), "('Euro Truck Simulator 2')\n", (2351, 2377), False, 'from test.screenUtils import grabscreen\n'), ((2726, 2773), 'cv2.addWeighted', 'cv2.addWeighted', (['frame', '(0.8)', 'line_image', '(0.5)', '(1)'], {}), '(frame, 0.8, line_image, 0.5, 1)\n', (2741, 2773), False, 'import cv2\n'), ((2782, 2814), 'cv2.imshow', 'cv2.imshow', (['"""result"""', 'line_image'], {}), "('result', line_image)\n", (2792, 2814), False, 'import cv2\n'), ((1416, 1473), 'cv2.line', 'cv2.line', (['line_image', '(x1, y1)', '(x2, y2)', '(0, 255, 0)', '(10)'], {}), '(line_image, (x1, y1), (x2, y2), (0, 255, 0), 10)\n', (1424, 1473), False, 'import cv2\n'), ((2536, 2548), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2544, 2548), True, 'import numpy as np\n'), ((2851, 2865), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2862, 2865), False, 'import cv2\n')] |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import glob
import numpy as np
from multiprocessing import Pool
from functools import partial
from shapely.geometry import Polygon
import argparse
nms_thresh = 0.1
class_name_15 = [
'plane', 'baseball-diamond', 'bridge', 'ground-track-field',
'small-vehicle', 'large-vehicle', 'ship', 'tennis-court',
'basketball-court', 'storage-tank', 'soccer-ball-field', 'roundabout',
'harbor', 'swimming-pool', 'helicopter'
]
class_name_16 = [
'plane', 'baseball-diamond', 'bridge', 'ground-track-field',
'small-vehicle', 'large-vehicle', 'ship', 'tennis-court',
'basketball-court', 'storage-tank', 'soccer-ball-field', 'roundabout',
'harbor', 'swimming-pool', 'helicopter', 'container-crane'
]
def rbox_iou(g, p):
"""
iou of rbox
"""
g = np.array(g)
p = np.array(p)
g = Polygon(g[:8].reshape((4, 2)))
p = Polygon(p[:8].reshape((4, 2)))
g = g.buffer(0)
p = p.buffer(0)
if not g.is_valid or not p.is_valid:
return 0
inter = Polygon(g).intersection(Polygon(p)).area
union = g.area + p.area - inter
if union == 0:
return 0
else:
return inter / union
def py_cpu_nms_poly_fast(dets, thresh):
"""
Args:
dets: pred results
thresh: nms threshold
Returns: index of keep
"""
obbs = dets[:, 0:-1]
x1 = np.min(obbs[:, 0::2], axis=1)
y1 = np.min(obbs[:, 1::2], axis=1)
x2 = np.max(obbs[:, 0::2], axis=1)
y2 = np.max(obbs[:, 1::2], axis=1)
scores = dets[:, 8]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
polys = []
for i in range(len(dets)):
tm_polygon = [
dets[i][0], dets[i][1], dets[i][2], dets[i][3], dets[i][4],
dets[i][5], dets[i][6], dets[i][7]
]
polys.append(tm_polygon)
polys = np.array(polys)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
ovr = []
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1)
h = np.maximum(0.0, yy2 - yy1)
hbb_inter = w * h
hbb_ovr = hbb_inter / (areas[i] + areas[order[1:]] - hbb_inter)
# h_keep_inds = np.where(hbb_ovr == 0)[0]
h_inds = np.where(hbb_ovr > 0)[0]
tmp_order = order[h_inds + 1]
for j in range(tmp_order.size):
iou = rbox_iou(polys[i], polys[tmp_order[j]])
hbb_ovr[h_inds[j]] = iou
# ovr.append(iou)
# ovr_index.append(tmp_order[j])
try:
if math.isnan(ovr[0]):
pdb.set_trace()
except:
pass
inds = np.where(hbb_ovr <= thresh)[0]
order = order[inds + 1]
return keep
def poly2origpoly(poly, x, y, rate):
origpoly = []
for i in range(int(len(poly) / 2)):
tmp_x = float(poly[i * 2] + x) / float(rate)
tmp_y = float(poly[i * 2 + 1] + y) / float(rate)
origpoly.append(tmp_x)
origpoly.append(tmp_y)
return origpoly
def nmsbynamedict(nameboxdict, nms, thresh):
"""
Args:
nameboxdict: nameboxdict
nms: nms
thresh: nms threshold
Returns: nms result as dict
"""
nameboxnmsdict = {x: [] for x in nameboxdict}
for imgname in nameboxdict:
keep = nms(np.array(nameboxdict[imgname]), thresh)
outdets = []
for index in keep:
outdets.append(nameboxdict[imgname][index])
nameboxnmsdict[imgname] = outdets
return nameboxnmsdict
def merge_single(output_dir, nms, pred_class_lst):
"""
Args:
output_dir: output_dir
nms: nms
pred_class_lst: pred_class_lst
class_name: class_name
Returns:
"""
class_name, pred_bbox_list = pred_class_lst
nameboxdict = {}
for line in pred_bbox_list:
splitline = line.split(' ')
subname = splitline[0]
splitname = subname.split('__')
oriname = splitname[0]
pattern1 = re.compile(r'__\d+___\d+')
x_y = re.findall(pattern1, subname)
x_y_2 = re.findall(r'\d+', x_y[0])
x, y = int(x_y_2[0]), int(x_y_2[1])
pattern2 = re.compile(r'__([\d+\.]+)__\d+___')
rate = re.findall(pattern2, subname)[0]
confidence = splitline[1]
poly = list(map(float, splitline[2:]))
origpoly = poly2origpoly(poly, x, y, rate)
det = origpoly
det.append(confidence)
det = list(map(float, det))
if (oriname not in nameboxdict):
nameboxdict[oriname] = []
nameboxdict[oriname].append(det)
nameboxnmsdict = nmsbynamedict(nameboxdict, nms, nms_thresh)
# write result
dstname = os.path.join(output_dir, class_name + '.txt')
with open(dstname, 'w') as f_out:
for imgname in nameboxnmsdict:
for det in nameboxnmsdict[imgname]:
confidence = det[-1]
bbox = det[0:-1]
outline = imgname + ' ' + str(confidence) + ' ' + ' '.join(
map(str, bbox))
f_out.write(outline + '\n')
def dota_generate_test_result(pred_txt_dir,
output_dir='output',
dota_version='v1.0'):
"""
pred_txt_dir: dir of pred txt
output_dir: dir of output
dota_version: dota_version v1.0 or v1.5 or v2.0
"""
pred_txt_list = glob.glob("{}/*.txt".format(pred_txt_dir))
# step1: summary pred bbox
pred_classes = {}
class_lst = class_name_15 if dota_version == 'v1.0' else class_name_16
for class_name in class_lst:
pred_classes[class_name] = []
for current_txt in pred_txt_list:
img_id = os.path.split(current_txt)[1]
img_id = img_id.split('.txt')[0]
with open(current_txt) as f:
res = f.readlines()
for item in res:
item = item.split(' ')
pred_class = item[0]
item[0] = img_id
pred_bbox = ' '.join(item)
pred_classes[pred_class].append(pred_bbox)
pred_classes_lst = []
for class_name in pred_classes.keys():
print('class_name: {}, count: {}'.format(class_name,
len(pred_classes[class_name])))
pred_classes_lst.append((class_name, pred_classes[class_name]))
# step2: merge
pool = Pool(len(class_lst))
nms = py_cpu_nms_poly_fast
mergesingle_fn = partial(merge_single, output_dir, nms)
pool.map(mergesingle_fn, pred_classes_lst)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='dota anno to coco')
parser.add_argument('--pred_txt_dir', help='path of pred txt dir')
parser.add_argument(
'--output_dir', help='path of output dir', default='output')
parser.add_argument(
'--dota_version',
help='dota_version, v1.0 or v1.5 or v2.0',
type=str,
default='v1.0')
args = parser.parse_args()
# process
dota_generate_test_result(args.pred_txt_dir, args.output_dir,
args.dota_version)
print('done!')
| [
"functools.partial",
"numpy.minimum",
"numpy.maximum",
"argparse.ArgumentParser",
"shapely.geometry.Polygon",
"numpy.min",
"numpy.max",
"numpy.array",
"re.findall",
"numpy.where",
"os.path.split",
"os.path.join",
"re.compile"
] | [((1412, 1423), 'numpy.array', 'np.array', (['g'], {}), '(g)\n', (1420, 1423), True, 'import numpy as np\n'), ((1432, 1443), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (1440, 1443), True, 'import numpy as np\n'), ((1971, 2000), 'numpy.min', 'np.min', (['obbs[:, 0::2]'], {'axis': '(1)'}), '(obbs[:, 0::2], axis=1)\n', (1977, 2000), True, 'import numpy as np\n'), ((2010, 2039), 'numpy.min', 'np.min', (['obbs[:, 1::2]'], {'axis': '(1)'}), '(obbs[:, 1::2], axis=1)\n', (2016, 2039), True, 'import numpy as np\n'), ((2049, 2078), 'numpy.max', 'np.max', (['obbs[:, 0::2]'], {'axis': '(1)'}), '(obbs[:, 0::2], axis=1)\n', (2055, 2078), True, 'import numpy as np\n'), ((2088, 2117), 'numpy.max', 'np.max', (['obbs[:, 1::2]'], {'axis': '(1)'}), '(obbs[:, 1::2], axis=1)\n', (2094, 2117), True, 'import numpy as np\n'), ((2428, 2443), 'numpy.array', 'np.array', (['polys'], {}), '(polys)\n', (2436, 2443), True, 'import numpy as np\n'), ((5461, 5506), 'os.path.join', 'os.path.join', (['output_dir', "(class_name + '.txt')"], {}), "(output_dir, class_name + '.txt')\n", (5473, 5506), False, 'import os\n'), ((7226, 7264), 'functools.partial', 'partial', (['merge_single', 'output_dir', 'nms'], {}), '(merge_single, output_dir, nms)\n', (7233, 7264), False, 'from functools import partial\n'), ((7354, 7410), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""dota anno to coco"""'}), "(description='dota anno to coco')\n", (7377, 7410), False, 'import argparse\n'), ((2596, 2628), 'numpy.maximum', 'np.maximum', (['x1[i]', 'x1[order[1:]]'], {}), '(x1[i], x1[order[1:]])\n', (2606, 2628), True, 'import numpy as np\n'), ((2643, 2675), 'numpy.maximum', 'np.maximum', (['y1[i]', 'y1[order[1:]]'], {}), '(y1[i], y1[order[1:]])\n', (2653, 2675), True, 'import numpy as np\n'), ((2690, 2722), 'numpy.minimum', 'np.minimum', (['x2[i]', 'x2[order[1:]]'], {}), '(x2[i], x2[order[1:]])\n', (2700, 2722), True, 'import numpy as np\n'), ((2737, 2769), 'numpy.minimum', 'np.minimum', (['y2[i]', 'y2[order[1:]]'], {}), '(y2[i], y2[order[1:]])\n', (2747, 2769), True, 'import numpy as np\n'), ((2782, 2808), 'numpy.maximum', 'np.maximum', (['(0.0)', '(xx2 - xx1)'], {}), '(0.0, xx2 - xx1)\n', (2792, 2808), True, 'import numpy as np\n'), ((2821, 2847), 'numpy.maximum', 'np.maximum', (['(0.0)', '(yy2 - yy1)'], {}), '(0.0, yy2 - yy1)\n', (2831, 2847), True, 'import numpy as np\n'), ((4756, 4783), 're.compile', 're.compile', (['"""__\\\\d+___\\\\d+"""'], {}), "('__\\\\d+___\\\\d+')\n", (4766, 4783), False, 'import re\n'), ((4797, 4826), 're.findall', 're.findall', (['pattern1', 'subname'], {}), '(pattern1, subname)\n', (4807, 4826), False, 'import re\n'), ((4843, 4869), 're.findall', 're.findall', (['"""\\\\d+"""', 'x_y[0]'], {}), "('\\\\d+', x_y[0])\n", (4853, 4869), False, 'import re\n'), ((4934, 4971), 're.compile', 're.compile', (['"""__([\\\\d+\\\\.]+)__\\\\d+___"""'], {}), "('__([\\\\d+\\\\.]+)__\\\\d+___')\n", (4944, 4971), False, 'import re\n'), ((1656, 1666), 'shapely.geometry.Polygon', 'Polygon', (['p'], {}), '(p)\n', (1663, 1666), False, 'from shapely.geometry import Polygon\n'), ((3013, 3034), 'numpy.where', 'np.where', (['(hbb_ovr > 0)'], {}), '(hbb_ovr > 0)\n', (3021, 3034), True, 'import numpy as np\n'), ((3415, 3442), 'numpy.where', 'np.where', (['(hbb_ovr <= thresh)'], {}), '(hbb_ovr <= thresh)\n', (3423, 3442), True, 'import numpy as np\n'), ((4073, 4103), 'numpy.array', 'np.array', (['nameboxdict[imgname]'], {}), '(nameboxdict[imgname])\n', (4081, 4103), True, 'import numpy as np\n'), ((4986, 5015), 're.findall', 're.findall', (['pattern2', 'subname'], {}), '(pattern2, subname)\n', (4996, 5015), False, 'import re\n'), ((6458, 6484), 'os.path.split', 'os.path.split', (['current_txt'], {}), '(current_txt)\n', (6471, 6484), False, 'import os\n'), ((1632, 1642), 'shapely.geometry.Polygon', 'Polygon', (['g'], {}), '(g)\n', (1639, 1642), False, 'from shapely.geometry import Polygon\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 26 23:30:13 2018
@author: luyfc
"""
# python onlinetrain2_2.py
import numpy as np
import csv
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset,DataLoader,TensorDataset
#from model import SimpleNet
from modelv3_0 import SimpleNet3
from game2048.game import Game
from game2048.displays import Display
from game2048.agents import Agent, RandomAgent, ExpectiMaxAgent,DIYAgent,DIY2Agent,DIY21Agent
from game2048.expectimax import board_to_move
from tqdm import tqdm
#torch.set_default_tensor_type('torch.DoubleTensor')
Batch_size=2000
OUT_SHAPE=(4,4)
CAND=12
map_table={2**i: i for i in range(1,CAND)}
map_table[0]=0
def grid_ohe(lst):
arr=lst.reshape(4,4)
ret=np.zeros(shape=OUT_SHAPE+(CAND,),dtype=bool)
for r in range(OUT_SHAPE[0]):
for c in range(OUT_SHAPE[1]):
ret[r,c,map_table[arr[r,c]]]=1
ret=np.swapaxes(ret,0,2)
ret=np.swapaxes(ret,1,2)
return ret
def single_run(size, score_to_win, AgentClass, **kwargs):
game = Game(size, score_to_win)
agent = AgentClass(game, display=Display(), **kwargs)
agent.play(verbose=True)
return game.score
rate_history=[5]
nicenet=[0]
train_num=20000000
position=0
capacity=720000
for i in range(67,train_num):
print('loading data...')
print(i)
if(i==67):
alldata=np.loadtxt('b128-'+str(i)+'.csv',usecols=(0,1,2,3,4,5,6,7,8,9,10,
11,12,13,14,15,16),delimiter=",")
np.random.shuffle(alldata)
datasets1=alldata[:,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]]
datasets=alldata[:,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]]
labels=alldata[:,16]
labels=labels.astype(int)
if(len(datasets)>capacity):
datasets1=datasets1[0:capacity]
datasets=datasets[0:capacity]
if(len(labels)>capacity):
labels=labels[0:capacity]
position=len(labels)
test_datasets=np.loadtxt('b128-test.csv',usecols=(0,1,2,3,4,5,6,7,8,9,10,
11,12,13,14,15),delimiter=",")
test_labels=np.loadtxt('b128-test.csv',usecols=16,delimiter=",")
test_labels=test_labels.astype(int)
test_onehot=np.empty(shape=[len(test_labels),12,4,4])
data_onehot=np.empty(shape=[len(labels),12,4,4])
pros=tqdm(datasets)
iii=0
iiii=0
for (idxt,item) in enumerate(pros):
tmp=grid_ohe(item)
data_onehot[iii]=tmp
iii=iii+1
#data_onehot=np.append(data_onehot,[tmp],axis=0)
for itemt in test_datasets:
tmpt=grid_ohe(itemt)
test_onehot[iiii]=tmpt
iiii=iiii+1
#test_onehot=np.append(test_onehot,[tmpt],axis=0)
if(i>67):
#tmpdata=np.loadtxt('b2048-'+str(i)+'.csv',usecols=(0,1,2,3,4,5,6,7,8,9,10,
#11,12,13,14,15,16),delimiter=",")
tmpdata=saveboard
for itemtmp in tmpdata:
#tmp=grid_ohe(itemtmp)
position=position % capacity
datasets1[position]=itemtmp
position=position+1
#data_onehot[position]=tmp
tmpdatasets=datasets1[:,[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]]
tmplabels=datasets1[:,16]
tmplabels=tmplabels.astype(int)
labels=tmplabels
if(i%8==0):
np.savetxt('newboard_'+str(i)+'.csv',datasets, delimiter=',')
data_onehot=np.empty(shape=[len(tmplabels),12,4,4])
pros=tqdm(tmpdatasets)
i6=0
for (idxt,item) in enumerate(pros):
tmp=grid_ohe(item)
data_onehot[i6]=tmp
i6=i6+1
#data_onehot=np.append(data_onehot,[tmp],axis=0)
print('loading..'+str(len(data_onehot)))
train_data=torch.from_numpy(data_onehot)
train_label=torch.from_numpy(labels)
test_data=torch.from_numpy(test_onehot)
test_label=torch.from_numpy(test_labels)
print('loading...'+str(position))
deal_train_dataset=TensorDataset(train_data,train_label)
deal_test_dataset=TensorDataset(test_data,test_label)
train_loader=DataLoader(dataset=deal_train_dataset,batch_size=Batch_size,shuffle=True)
test_loader=DataLoader(dataset=deal_test_dataset,batch_size=Batch_size,shuffle=True)
if (i==67):
model = SimpleNet3()
model=torch.load('modelv1_'+str(i)+'.pkl')
if(torch.cuda.is_available()):
device = torch.device("cuda:0")
model=model.to(device)
print('1111111')
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.03, momentum=0.8)
NUM_EPOCHS=1
print('beginning training..')
losslist2=[]
acclist=[]
for epoch in range(NUM_EPOCHS):
losslist=[]
cnt=0
running_loss = 0.0
for boards, label in train_loader:
if(torch.cuda.is_available()):
device = torch.device("cuda:0")
model=model.to(device)
boards=boards.double().cuda()
label=label.double().cuda()
label=label.long()
optimizer.zero_grad()
outputs = model(boards)
# loss
loss = criterion(outputs, label)
# backward
loss.backward()
# update weights
optimizer.step()
# print statistics
running_loss += loss.data
print(str(cnt)+' epoch:%d, loss: %.3f' %(epoch + 1, running_loss ))
losslist.append(running_loss)
running_loss = 0.0
cnt=cnt+1
print(sum(losslist)/len(losslist))
losslist2.append(sum(losslist)/len(losslist))
print("Finished Training")
modelsave=model.cpu()
torch.save(modelsave.double(),'onlinemodel.pkl')
if(i%8==0):
modelsave=model.cpu()
torch.save(modelsave.double(),'modelv1_'+str(i+1)+'.pkl')
#********************************************************************
af=0
with open('resulttest.csv', 'w', newline='') as csv_file:
af=af+1
scores=[]
N_TESTS=160
yboard=np.empty(shape=[0,18])
for i2 in range(N_TESTS):
gametest = Game(4, score_to_win=2048, random=False)
agent = DIY21Agent(gametest,version=i+1,filenum=i+1)
xboard=agent.play(verbose=True)
yboard=np.vstack((yboard,xboard))
s=gametest.score
scores.append(s)
countelem={}
for itemc in set(scores):
countelem[itemc]=scores.count(itemc)
#rate=countelem[64]/len(scores)
#if(countelem[64]<rate_history[-1]):
#rate_history.append(countelem[64])
#nicenet.append(i+1)
#torch.save(model,'nicemodelv1_'+str(i+1)+'.pkl')
f=open('myresult3_2.txt','a')
f.write('\n')
f.write('*********************'+str(i)+'******************************************')
f.write('\n')
for ss in scores:
f.write(str(ss))
f.write(', ')
f.write('\n')
f.write('[')
for cc in countelem:
f.write(str(cc)+': '+str(countelem[cc]))
f.write(', ')
f.write(']')
f.write('\n')
f.write('rate_history:[')
for rr in rate_history:
f.write(str(rr)+', ')
f.write(']')
f.write('\n')
f.write('nicenet:[')
for nice in nicenet:
f.write(str(nice)+', ')
f.write(']')
f.write('\n')
f.write('Average scores: @'+str(N_TESTS)+'times:'+ str(sum(scores)/len(scores) ))
f.close()
#*******************************************************************
'''
af=0
with open('resulttest.csv', 'w', newline='') as csv_file:
af=af+1
N_TESTS=100
for i2 in range(N_TESTS):
gametest = Game(4, score_to_win=2048, random=False)
agent = DIY2Agent(gametest,version=i+1)
agent.play(verbose=True)
diyend=np.loadtxt('resulttest.csv',usecols=(0,1,2,3,4,5),delimiter=",")
endindx=len(diyend)
for i3 in range(8):
gameeasy = Game(4, score_to_win=256, random=False)
agente = ExpectiMaxAgent(gameeasy)
agente.play(verbose=True)
'''
#***********************************************************************
#*********************************************************************
#boardsets=np.loadtxt('b1024-'+str(i+1)+'.csv',usecols=(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15),delimiter=',')
#boardsets=csv.reader(open('resulttest.csv'))
#for row in boardsets:
#print row
boardsets=yboard
rest=np.empty(shape=[100000,17])
i3=0
print(1)
for item2 in boardsets:
tmpitem2=item2[0:16]
boardtmp=tmpitem2.reshape(4,4)
direction = board_to_move(boardtmp)
item2tmp=np.append(tmpitem2,[direction],axis=0)
rest[i3]=item2tmp
i3=i3+1
saveboard=rest[0:i3]
#with open('b2048-'+str(i+1)+'.csv', 'a', newline='') as csv_file:
#csv_writer = csv.writer(csv_file)
#for iw in range(i3):
#csv_writer.writerow(rest[iw])
| [
"game2048.expectimax.board_to_move",
"numpy.empty",
"game2048.game.Game",
"torch.utils.data.TensorDataset",
"torch.device",
"torch.utils.data.DataLoader",
"numpy.append",
"numpy.swapaxes",
"numpy.loadtxt",
"numpy.random.shuffle",
"tqdm.tqdm",
"modelv3_0.SimpleNet3",
"torch.cuda.is_available"... | [((801, 848), 'numpy.zeros', 'np.zeros', ([], {'shape': '(OUT_SHAPE + (CAND,))', 'dtype': 'bool'}), '(shape=OUT_SHAPE + (CAND,), dtype=bool)\n', (809, 848), True, 'import numpy as np\n'), ((973, 995), 'numpy.swapaxes', 'np.swapaxes', (['ret', '(0)', '(2)'], {}), '(ret, 0, 2)\n', (984, 995), True, 'import numpy as np\n'), ((1003, 1025), 'numpy.swapaxes', 'np.swapaxes', (['ret', '(1)', '(2)'], {}), '(ret, 1, 2)\n', (1014, 1025), True, 'import numpy as np\n'), ((1113, 1137), 'game2048.game.Game', 'Game', (['size', 'score_to_win'], {}), '(size, score_to_win)\n', (1117, 1137), False, 'from game2048.game import Game\n'), ((4043, 4072), 'torch.from_numpy', 'torch.from_numpy', (['data_onehot'], {}), '(data_onehot)\n', (4059, 4072), False, 'import torch\n'), ((4091, 4115), 'torch.from_numpy', 'torch.from_numpy', (['labels'], {}), '(labels)\n', (4107, 4115), False, 'import torch\n'), ((4131, 4160), 'torch.from_numpy', 'torch.from_numpy', (['test_onehot'], {}), '(test_onehot)\n', (4147, 4160), False, 'import torch\n'), ((4178, 4207), 'torch.from_numpy', 'torch.from_numpy', (['test_labels'], {}), '(test_labels)\n', (4194, 4207), False, 'import torch\n'), ((4277, 4315), 'torch.utils.data.TensorDataset', 'TensorDataset', (['train_data', 'train_label'], {}), '(train_data, train_label)\n', (4290, 4315), False, 'from torch.utils.data import Dataset, DataLoader, TensorDataset\n'), ((4338, 4374), 'torch.utils.data.TensorDataset', 'TensorDataset', (['test_data', 'test_label'], {}), '(test_data, test_label)\n', (4351, 4374), False, 'from torch.utils.data import Dataset, DataLoader, TensorDataset\n'), ((4392, 4467), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'deal_train_dataset', 'batch_size': 'Batch_size', 'shuffle': '(True)'}), '(dataset=deal_train_dataset, batch_size=Batch_size, shuffle=True)\n', (4402, 4467), False, 'from torch.utils.data import Dataset, DataLoader, TensorDataset\n'), ((4483, 4557), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'deal_test_dataset', 'batch_size': 'Batch_size', 'shuffle': '(True)'}), '(dataset=deal_test_dataset, batch_size=Batch_size, shuffle=True)\n', (4493, 4557), False, 'from torch.utils.data import Dataset, DataLoader, TensorDataset\n'), ((4823, 4844), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (4842, 4844), True, 'import torch.nn as nn\n'), ((6462, 6485), 'numpy.empty', 'np.empty', ([], {'shape': '[0, 18]'}), '(shape=[0, 18])\n', (6470, 6485), True, 'import numpy as np\n'), ((8933, 8961), 'numpy.empty', 'np.empty', ([], {'shape': '[100000, 17]'}), '(shape=[100000, 17])\n', (8941, 8961), True, 'import numpy as np\n'), ((1595, 1621), 'numpy.random.shuffle', 'np.random.shuffle', (['alldata'], {}), '(alldata)\n', (1612, 1621), True, 'import numpy as np\n'), ((2090, 2201), 'numpy.loadtxt', 'np.loadtxt', (['"""b128-test.csv"""'], {'usecols': '(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15)', 'delimiter': '""","""'}), "('b128-test.csv', usecols=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, \n 12, 13, 14, 15), delimiter=',')\n", (2100, 2201), True, 'import numpy as np\n'), ((2246, 2300), 'numpy.loadtxt', 'np.loadtxt', (['"""b128-test.csv"""'], {'usecols': '(16)', 'delimiter': '""","""'}), "('b128-test.csv', usecols=16, delimiter=',')\n", (2256, 2300), True, 'import numpy as np\n'), ((2483, 2497), 'tqdm.tqdm', 'tqdm', (['datasets'], {}), '(datasets)\n', (2487, 2497), False, 'from tqdm import tqdm\n'), ((3722, 3739), 'tqdm.tqdm', 'tqdm', (['tmpdatasets'], {}), '(tmpdatasets)\n', (3726, 3739), False, 'from tqdm import tqdm\n'), ((4590, 4602), 'modelv3_0.SimpleNet3', 'SimpleNet3', ([], {}), '()\n', (4600, 4602), False, 'from modelv3_0 import SimpleNet3\n'), ((4667, 4692), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4690, 4692), False, 'import torch\n'), ((6536, 6576), 'game2048.game.Game', 'Game', (['(4)'], {'score_to_win': '(2048)', 'random': '(False)'}), '(4, score_to_win=2048, random=False)\n', (6540, 6576), False, 'from game2048.game import Game\n'), ((6604, 6654), 'game2048.agents.DIY21Agent', 'DIY21Agent', (['gametest'], {'version': '(i + 1)', 'filenum': '(i + 1)'}), '(gametest, version=i + 1, filenum=i + 1)\n', (6614, 6654), False, 'from game2048.agents import Agent, RandomAgent, ExpectiMaxAgent, DIYAgent, DIY2Agent, DIY21Agent\n'), ((6706, 6733), 'numpy.vstack', 'np.vstack', (['(yboard, xboard)'], {}), '((yboard, xboard))\n', (6715, 6733), True, 'import numpy as np\n'), ((9105, 9128), 'game2048.expectimax.board_to_move', 'board_to_move', (['boardtmp'], {}), '(boardtmp)\n', (9118, 9128), False, 'from game2048.expectimax import board_to_move\n'), ((9147, 9187), 'numpy.append', 'np.append', (['tmpitem2', '[direction]'], {'axis': '(0)'}), '(tmpitem2, [direction], axis=0)\n', (9156, 9187), True, 'import numpy as np\n'), ((1176, 1185), 'game2048.displays.Display', 'Display', ([], {}), '()\n', (1183, 1185), False, 'from game2048.displays import Display\n'), ((4717, 4739), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (4729, 4739), False, 'import torch\n'), ((5166, 5191), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5189, 5191), False, 'import torch\n'), ((5220, 5242), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (5232, 5242), False, 'import torch\n')] |
# This code is part of Mthree.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=no-name-in-module
"""Test matrix elements"""
import numpy as np
import scipy.sparse.linalg as spla
from qiskit import QuantumCircuit, execute
from qiskit.test.mock import FakeAthens
import mthree
from mthree.matvec import M3MatVec
def test_matvec():
"""Check that matvec and rmatvec values are returned as expected"""
backend = FakeAthens()
qc = QuantumCircuit(5)
qc.h(2)
qc.cx(2, 1)
qc.cx(2, 3)
qc.cx(1, 0)
qc.cx(3, 4)
qc.measure_all()
raw_counts = execute(qc, backend).result().get_counts()
mit = mthree.M3Mitigation(backend)
mit.cals_from_system(range(5))
cals = mit._form_cals(range(5))
M = M3MatVec(dict(raw_counts), cals, 5)
L = spla.LinearOperator((M.num_elems, M.num_elems),
matvec=M.matvec)
LT = spla.LinearOperator((M.num_elems, M.num_elems),
matvec=M.rmatvec)
A = mit.reduced_cal_matrix(raw_counts, range(5))[0]
vec = (-1)**np.arange(M.num_elems)*np.ones(M.num_elems, dtype=float) / M.num_elems
v1 = L.dot(vec)
v2 = A.dot(vec)
assert np.allclose(v1, v2, atol=1e-14)
v3 = LT.dot(vec)
v4 = (A.T).dot(vec)
assert np.allclose(v3, v4, atol=1e-14)
| [
"qiskit.QuantumCircuit",
"qiskit.test.mock.FakeAthens",
"numpy.allclose",
"numpy.ones",
"scipy.sparse.linalg.LinearOperator",
"numpy.arange",
"qiskit.execute",
"mthree.M3Mitigation"
] | [((833, 845), 'qiskit.test.mock.FakeAthens', 'FakeAthens', ([], {}), '()\n', (843, 845), False, 'from qiskit.test.mock import FakeAthens\n'), ((856, 873), 'qiskit.QuantumCircuit', 'QuantumCircuit', (['(5)'], {}), '(5)\n', (870, 873), False, 'from qiskit import QuantumCircuit, execute\n'), ((1042, 1070), 'mthree.M3Mitigation', 'mthree.M3Mitigation', (['backend'], {}), '(backend)\n', (1061, 1070), False, 'import mthree\n'), ((1195, 1259), 'scipy.sparse.linalg.LinearOperator', 'spla.LinearOperator', (['(M.num_elems, M.num_elems)'], {'matvec': 'M.matvec'}), '((M.num_elems, M.num_elems), matvec=M.matvec)\n', (1214, 1259), True, 'import scipy.sparse.linalg as spla\n'), ((1298, 1363), 'scipy.sparse.linalg.LinearOperator', 'spla.LinearOperator', (['(M.num_elems, M.num_elems)'], {'matvec': 'M.rmatvec'}), '((M.num_elems, M.num_elems), matvec=M.rmatvec)\n', (1317, 1363), True, 'import scipy.sparse.linalg as spla\n'), ((1590, 1621), 'numpy.allclose', 'np.allclose', (['v1', 'v2'], {'atol': '(1e-14)'}), '(v1, v2, atol=1e-14)\n', (1601, 1621), True, 'import numpy as np\n'), ((1680, 1711), 'numpy.allclose', 'np.allclose', (['v3', 'v4'], {'atol': '(1e-14)'}), '(v3, v4, atol=1e-14)\n', (1691, 1711), True, 'import numpy as np\n'), ((1489, 1522), 'numpy.ones', 'np.ones', (['M.num_elems'], {'dtype': 'float'}), '(M.num_elems, dtype=float)\n', (1496, 1522), True, 'import numpy as np\n'), ((1466, 1488), 'numpy.arange', 'np.arange', (['M.num_elems'], {}), '(M.num_elems)\n', (1475, 1488), True, 'import numpy as np\n'), ((989, 1009), 'qiskit.execute', 'execute', (['qc', 'backend'], {}), '(qc, backend)\n', (996, 1009), False, 'from qiskit import QuantumCircuit, execute\n')] |
import numpy as np
import math
def spatial_accuracy(ps1, ps2, thresh):
'''
Args) ps1, ps2 : normalized point sets
Retern) acc: spatial accuracy
'''
assert len(ps1) == len(ps2), \
f"length of given point sets are differenct: len(ps1)={len(ps1)}, len(ps2)={len(ps2)}"
dists = (ps1 - ps2) ** 2
dists = np.sum(dists, axis=-1)
dists = np.sqrt(dists)
acc = np.mean(dists <= thresh)
return acc
def temporal_accuracy(ps1, ps2, prev_ps1, prev_ps2, thresh):
'''
Args) ps1, ps2 : normalized point sets
Retern) acc: temporal accuracy
'''
assert len(ps1) == len(ps2), \
f"length of given point sets are differenct: len(ps1)={len(ps1)}, len(ps2)={len(ps2)}"
assert len(prev_ps1) == len(prev_ps2), \
f"length of given point sets are differenct: len(prev_ps1)={len(prev_ps1)}, len(prev_ps2)={len(prev_ps2)}"
dists_prev = ps1 - ps2
dists_next = prev_ps1 - prev_ps2
diffs = (dists_prev - dists_next) ** 2
diffs = np.sum(diffs, axis=-1)
diffs = np.sqrt(diffs)
acc = np.mean(diffs <= thresh, axis=-1)
acc = np.mean(acc)
return acc
| [
"numpy.mean",
"numpy.sum",
"numpy.sqrt"
] | [((346, 368), 'numpy.sum', 'np.sum', (['dists'], {'axis': '(-1)'}), '(dists, axis=-1)\n', (352, 368), True, 'import numpy as np\n'), ((381, 395), 'numpy.sqrt', 'np.sqrt', (['dists'], {}), '(dists)\n', (388, 395), True, 'import numpy as np\n'), ((407, 431), 'numpy.mean', 'np.mean', (['(dists <= thresh)'], {}), '(dists <= thresh)\n', (414, 431), True, 'import numpy as np\n'), ((1031, 1053), 'numpy.sum', 'np.sum', (['diffs'], {'axis': '(-1)'}), '(diffs, axis=-1)\n', (1037, 1053), True, 'import numpy as np\n'), ((1066, 1080), 'numpy.sqrt', 'np.sqrt', (['diffs'], {}), '(diffs)\n', (1073, 1080), True, 'import numpy as np\n'), ((1092, 1125), 'numpy.mean', 'np.mean', (['(diffs <= thresh)'], {'axis': '(-1)'}), '(diffs <= thresh, axis=-1)\n', (1099, 1125), True, 'import numpy as np\n'), ((1136, 1148), 'numpy.mean', 'np.mean', (['acc'], {}), '(acc)\n', (1143, 1148), True, 'import numpy as np\n')] |
import numpy as np
from math import pi,asin,sin
import lattice_utils as lu
from mpl_toolkits.axes_grid.grid_helper_curvelinear import GridHelperCurveLinear
from mpl_toolkits.axes_grid.axislines import Subplot
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
def dynamic_range(Efixed,E,E_max,theta_range = [10,120],step = 10, color = 'k',showplot = True):
#modify to allow fixed Ef or fixed Ei, and input of scattering
#angles
omega = np.linspace(0,E_max,100)
theta_s = np.arange(theta_range[0]*np.pi/180,theta_range[1]*np.pi/180,step*np.pi/180)
Q = np.empty([theta_s.size,omega.size],float)
if Efixed == "Ef":
kf = np.sqrt((E)/2.072)
ki = np.sqrt((omega+E)/2.072)
elif Efixed =="Ei":
ki = np.sqrt((E)/2.072)
kf = np.sqrt((E-omega)/2.072)
for i, theta in enumerate(theta_s):
Q[i] = np.sqrt(ki**2 + kf**2 - 2*ki*kf*np.cos(theta))
if showplot:
plt.plot(Q[i],omega,lw=1,ls = '--',color = color)
txt = "$2\\theta_s$ = {0}$^o$".format(np.round(theta*180/np.pi,1))
plt.text(Q[i,i+25],omega[50],txt, bbox=dict(fc = '1',lw = 0,alpha = 0.05),rotation = 75,color = color)
plt.xlabel('Q ($\\AA^{-1}$)')
plt.ylabel('Energy Transfer (meV)')
title = 'Accessible dynamic range for {1} fixed = {0} meV'.format(E,Efixed)
plt.title(title)
plt.grid(True)
plt.show()
return omega,Q
def spec_twoTheta(Efixed,E,E_T,Q):
if Efixed == "Ef":
kf = np.sqrt((E)/2.072)
ki = np.sqrt((E_T+E)/2.072)
elif Efixed =="Ei":
ki = np.sqrt((E)/2.072)
kf = np.sqrt((E-E_T)/2.072)
theta =np.arccos(-(Q**2 - ki**2 - kf**2)/ki/kf/2.)
return theta*180./np.pi
def Bragg_angle(wavelength,q,rlatt):
d = lu.dspacing(q,rlatt)
print(wavelength/d/2)
tth = 360./pi*asin(wavelength/d/2)
print('\n')
print( '\t wavelength = {:.2f}, Q = [{:.2f} {:.2f} {:.2f}], Two-theta = {:.3f}'.format(wavelength,q[0],q[1],q[2],tth))
return
def TOF_par(q,tth,rlatt):
d = lu.dspacing(q,rlatt)
wavelength = 2*d*sin(tth*pi/360)
E = (9.044/wavelength)**2
k = 2*pi/wavelength
velocity = 629.62*k # m/s
print('Q = [{:.2f} {:.2f} {:.2f}]\n d = {:.3f} \n Two-theta = {:.2f}\n wavelength = {:.3f} Angstrom\n Energy = {:3f} meV\n Velocity = {:3f} m/s'.format(q[0],q[1],q[2],d,tth,wavelength,E,velocity))
return d,wavelength,E,velocity
def Recip_space(sample):
"""
Set up general reciprocal space grid for plotting Miller indicies in a general space.
Would be cool if returned fig object had a custom transformation so that all data added
to plot after it has been created can be given in miller indicies
grid for custom transform.
"""
def tr(x, y):
x, y = np.asarray(x), np.asarray(y)
return x, y-x
def inv_tr(x,y):
x, y = np.asarray(x), np.asarray(y)
return x, y+x
grid_helper = GridHelperCurveLinear((tr, inv_tr))
fig = plt.figure(1, figsize=(7, 4))
ax = Subplot(fig, 1, 1, 1, grid_helper=grid_helper)
rlatt = sample.star_lattice
[xs,ys,zs] = sample.StandardSystem
fig.add_subplot(ax)
ax.grid(True)
return
def Al_peaks(wavelength = 1.0):
energy = (9.044/wavelength)**2
# Al_lattice
a = 4.0498; b = 4.0498; c = 4.0498
aa =90; bb = 90; cc = 90
latt = lu.lattice(a,b,c,aa,bb,cc)
rlatt = lu.recip_lattice(latt)
# Al is FCC so peaks must be all even or all odd
peaks = [[1,1,1],[2,0,0],[2,2,0],[3,1,1],[2,2,2],[4,0,0],[3,3,1],[4,2,0],
[4,2,2],[5,1,1],[3,3,3],[4,4,0],[5,3,1],[4,4,2],[6,0,0]]
print('\tAluminum Bragg Peaks')
print('\tNeutron wavelength {:.2f} Angstroms ({:.2f} meV)\n'.format(wavelength,energy))
print('\t H K L\tQ (AA-1) d(AA) 2theta 2theta(l/2) 2theta(l/3)')
print('\t----------------------------------------------------------------------------')
for p in peaks:
modQ = lu.modVec(p,rlatt)
dsp = lu.dspacing(p,rlatt)
if abs(wavelength/(2*dsp)) < 1:
tth = 360/pi * asin(wavelength/(2*dsp))
line = '\t {:d} {:d} {:d}\t {:.3f} {:.3f} {:.2f}\t {:.2f}\t {:.2f}'
else :
tth = 'NaN'
line = '\t {:d} {:d} {:d}\t {:.3f} {:.3f} {:s}\t {:.2f}\t {:.2f}'
if abs((wavelength/2)/(2*dsp)) < 1:
tth2 = 360/pi * asin((wavelength/2)/(2*dsp))
else :
tth2 = 'NaN'
line = '\t {:d} {:d} {:d}\t {:.3f} {:.3f} {:s}\t {:s}\t\t {:.2f}'
if abs((wavelength/3)/(2*dsp)) < 1:
tth3 = 360/pi * asin((wavelength/3)/(2*dsp))
else :
tth3 = 'NaN'
line = '\t {:d} {:d} {:d}\t {:.3f} {:.3f} {:s}\t {:s}\t\t {:s}'
# else :
# tth = 360/pi * asin(wavelength/(2*dsp))
# tth2 = 360/pi * asin((wavelength/2)/(2*dsp))
# tth3 = 360/pi * asin((wavelength/3)/(2*dsp))
# line = '\t {:d} {:d} {:d}\t {:.3f} {:.3f} {:.2f}\t {:.2f}\t {:.2f}'
print(line.format(p[0],p[1],p[2],modQ,dsp,tth,tth2,tth3))
return
if __name__ == '__main__':
Al_peaks() | [
"matplotlib.pyplot.title",
"math.asin",
"numpy.empty",
"lattice_utils.lattice",
"mpl_toolkits.axes_grid.axislines.Subplot",
"matplotlib.pyplot.figure",
"numpy.arange",
"lattice_utils.dspacing",
"numpy.round",
"mpl_toolkits.axes_grid.grid_helper_curvelinear.GridHelperCurveLinear",
"numpy.linspace... | [((230, 251), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (244, 251), False, 'import matplotlib\n'), ((474, 500), 'numpy.linspace', 'np.linspace', (['(0)', 'E_max', '(100)'], {}), '(0, E_max, 100)\n', (485, 500), True, 'import numpy as np\n'), ((513, 606), 'numpy.arange', 'np.arange', (['(theta_range[0] * np.pi / 180)', '(theta_range[1] * np.pi / 180)', '(step * np.pi / 180)'], {}), '(theta_range[0] * np.pi / 180, theta_range[1] * np.pi / 180, step *\n np.pi / 180)\n', (522, 606), True, 'import numpy as np\n'), ((597, 640), 'numpy.empty', 'np.empty', (['[theta_s.size, omega.size]', 'float'], {}), '([theta_s.size, omega.size], float)\n', (605, 640), True, 'import numpy as np\n'), ((1732, 1788), 'numpy.arccos', 'np.arccos', (['(-(Q ** 2 - ki ** 2 - kf ** 2) / ki / kf / 2.0)'], {}), '(-(Q ** 2 - ki ** 2 - kf ** 2) / ki / kf / 2.0)\n', (1741, 1788), True, 'import numpy as np\n'), ((1857, 1878), 'lattice_utils.dspacing', 'lu.dspacing', (['q', 'rlatt'], {}), '(q, rlatt)\n', (1868, 1878), True, 'import lattice_utils as lu\n'), ((2135, 2156), 'lattice_utils.dspacing', 'lu.dspacing', (['q', 'rlatt'], {}), '(q, rlatt)\n', (2146, 2156), True, 'import lattice_utils as lu\n'), ((3036, 3071), 'mpl_toolkits.axes_grid.grid_helper_curvelinear.GridHelperCurveLinear', 'GridHelperCurveLinear', (['(tr, inv_tr)'], {}), '((tr, inv_tr))\n', (3057, 3071), False, 'from mpl_toolkits.axes_grid.grid_helper_curvelinear import GridHelperCurveLinear\n'), ((3091, 3120), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(7, 4)'}), '(1, figsize=(7, 4))\n', (3101, 3120), True, 'import matplotlib.pyplot as plt\n'), ((3130, 3176), 'mpl_toolkits.axes_grid.axislines.Subplot', 'Subplot', (['fig', '(1)', '(1)', '(1)'], {'grid_helper': 'grid_helper'}), '(fig, 1, 1, 1, grid_helper=grid_helper)\n', (3137, 3176), False, 'from mpl_toolkits.axes_grid.axislines import Subplot\n'), ((3481, 3512), 'lattice_utils.lattice', 'lu.lattice', (['a', 'b', 'c', 'aa', 'bb', 'cc'], {}), '(a, b, c, aa, bb, cc)\n', (3491, 3512), True, 'import lattice_utils as lu\n'), ((3522, 3544), 'lattice_utils.recip_lattice', 'lu.recip_lattice', (['latt'], {}), '(latt)\n', (3538, 3544), True, 'import lattice_utils as lu\n'), ((677, 695), 'numpy.sqrt', 'np.sqrt', (['(E / 2.072)'], {}), '(E / 2.072)\n', (684, 695), True, 'import numpy as np\n'), ((709, 737), 'numpy.sqrt', 'np.sqrt', (['((omega + E) / 2.072)'], {}), '((omega + E) / 2.072)\n', (716, 737), True, 'import numpy as np\n'), ((1573, 1591), 'numpy.sqrt', 'np.sqrt', (['(E / 2.072)'], {}), '(E / 2.072)\n', (1580, 1591), True, 'import numpy as np\n'), ((1605, 1631), 'numpy.sqrt', 'np.sqrt', (['((E_T + E) / 2.072)'], {}), '((E_T + E) / 2.072)\n', (1612, 1631), True, 'import numpy as np\n'), ((1923, 1947), 'math.asin', 'asin', (['(wavelength / d / 2)'], {}), '(wavelength / d / 2)\n', (1927, 1947), False, 'from math import pi, asin, sin\n'), ((2177, 2196), 'math.sin', 'sin', (['(tth * pi / 360)'], {}), '(tth * pi / 360)\n', (2180, 2196), False, 'from math import pi, asin, sin\n'), ((4098, 4117), 'lattice_utils.modVec', 'lu.modVec', (['p', 'rlatt'], {}), '(p, rlatt)\n', (4107, 4117), True, 'import lattice_utils as lu\n'), ((4131, 4152), 'lattice_utils.dspacing', 'lu.dspacing', (['p', 'rlatt'], {}), '(p, rlatt)\n', (4142, 4152), True, 'import lattice_utils as lu\n'), ((771, 789), 'numpy.sqrt', 'np.sqrt', (['(E / 2.072)'], {}), '(E / 2.072)\n', (778, 789), True, 'import numpy as np\n'), ((803, 831), 'numpy.sqrt', 'np.sqrt', (['((E - omega) / 2.072)'], {}), '((E - omega) / 2.072)\n', (810, 831), True, 'import numpy as np\n'), ((974, 1023), 'matplotlib.pyplot.plot', 'plt.plot', (['Q[i]', 'omega'], {'lw': '(1)', 'ls': '"""--"""', 'color': 'color'}), "(Q[i], omega, lw=1, ls='--', color=color)\n", (982, 1023), True, 'import matplotlib.pyplot as plt\n'), ((1230, 1259), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Q ($\\\\AA^{-1}$)"""'], {}), "('Q ($\\\\AA^{-1}$)')\n", (1240, 1259), True, 'import matplotlib.pyplot as plt\n'), ((1272, 1307), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Energy Transfer (meV)"""'], {}), "('Energy Transfer (meV)')\n", (1282, 1307), True, 'import matplotlib.pyplot as plt\n'), ((1408, 1424), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (1417, 1424), True, 'import matplotlib.pyplot as plt\n'), ((1437, 1451), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1445, 1451), True, 'import matplotlib.pyplot as plt\n'), ((1464, 1474), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1472, 1474), True, 'import matplotlib.pyplot as plt\n'), ((1665, 1683), 'numpy.sqrt', 'np.sqrt', (['(E / 2.072)'], {}), '(E / 2.072)\n', (1672, 1683), True, 'import numpy as np\n'), ((1697, 1723), 'numpy.sqrt', 'np.sqrt', (['((E - E_T) / 2.072)'], {}), '((E - E_T) / 2.072)\n', (1704, 1723), True, 'import numpy as np\n'), ((2878, 2891), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (2888, 2891), True, 'import numpy as np\n'), ((2893, 2906), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (2903, 2906), True, 'import numpy as np\n'), ((2966, 2979), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (2976, 2979), True, 'import numpy as np\n'), ((2981, 2994), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (2991, 2994), True, 'import numpy as np\n'), ((1074, 1106), 'numpy.round', 'np.round', (['(theta * 180 / np.pi)', '(1)'], {}), '(theta * 180 / np.pi, 1)\n', (1082, 1106), True, 'import numpy as np\n'), ((4228, 4256), 'math.asin', 'asin', (['(wavelength / (2 * dsp))'], {}), '(wavelength / (2 * dsp))\n', (4232, 4256), False, 'from math import pi, asin, sin\n'), ((4567, 4599), 'math.asin', 'asin', (['(wavelength / 2 / (2 * dsp))'], {}), '(wavelength / 2 / (2 * dsp))\n', (4571, 4599), False, 'from math import pi, asin, sin\n'), ((4813, 4845), 'math.asin', 'asin', (['(wavelength / 3 / (2 * dsp))'], {}), '(wavelength / 3 / (2 * dsp))\n', (4817, 4845), False, 'from math import pi, asin, sin\n'), ((917, 930), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (923, 930), True, 'import numpy as np\n')] |
import sys
from pprint import pprint
from silk import Silk, ValidationError
from silk.mixed import Monitor, SilkBackend, MixedObject
def reset_backend(sb=None):
if sb is None:
sb = silk_backend
sb._data = None
sb._form = None
sb._storage = None
sb._silk = None
def adder(self, other):
return other + self.x
silk_backend = SilkBackend()
monitor = Monitor(silk_backend)
mixed_object = MixedObject(monitor, ())
silk_backend2 = SilkBackend()
monitor2 = Monitor(silk_backend2)
mixed_object2 = MixedObject(monitor2, ())
silk_backend3 = SilkBackend()
monitor3 = Monitor(silk_backend3)
mixed_object3 = MixedObject(monitor3, ())
s = Silk(data=mixed_object)
silk_backend.set_silk(s)
s.x = 80
print(s.x.data)
s.__add__ = adder
s.bla = adder
print(s.bla(5))
print(s+5)
s2 = Silk(data=mixed_object2,schema=s.schema)
silk_backend2.set_silk(s2)
s2.x = 10
print(s2+5)
s3 = Silk(data=mixed_object3,schema=s2.schema)
silk_backend3.set_silk(s3)
s3.x = 10
print(s3+25)
def xy(self):
return self.x + self.y
s.x = 1
s.y = 2
print(s.x + s.y)
s3.xy = property(xy) # all three Silks use the same schema
print(s.xy)
def xx_get(self):
return self.x * self.x
def xx_set(self, xx):
import math
self.x = int(math.sqrt(xx))
s.x = 3
s.xx = property(xx_get, xx_set)
print(s.xx)
s.xx = 16
print(s.xx)
print(s.x.data)
s.z = {}
s.z.q = 12
s.z.r = 24
sz = s.z
print(sz.q.data, sz.r.data)
s.z.r = 25
print(sz.q.data, sz.r.data)
s.z.qr = property(lambda self: self.q * self.r)
print(s.z.qr)
def validate_z(self):
print("VALIDATE", self.q.data, self.r.data)
assert self.q < self.r
try:
s.z.add_validator(validate_z)
except Exception:
pprint(s.schema)
s.z.validate()
pprint(s.schema)
s.lis = [1,2,3]
s.lis.append(10)
s.validate()
print(s.lis.data)
s.lis += [5]
s.validate()
print(s.lis*2)
"""
for a in s.lis[1:3]: # slices not yet supported by monitor
print(a.data)
"""
for a in s.lis:
print(a.data)
print(hasattr(s, "lis"), "lis" in s)
print(hasattr(s, "lis2"), "lis2" in s)
for v in s:
#print(v.data) # With Monitor, iteration does *not* give a Silk object
print(v)
print("")
for v in s.lis:
print(v.data)
print()
reset_backend()
s = Silk(data=mixed_object)
silk_backend.set_silk(s)
s.set(5)
inc = lambda self: self + 1
s.x = inc
print(s.x())
s.y = property(inc)
print(s.y)
def setter(self,v):
self.set(v - 1)
s.z = property(inc, setter)
print(s.z)
s.z = 10
print(s.data)
print(s.z)
import numpy as np
arr = np.array([1.0,2.0,3.0])
s2.arr = arr
# Need .self.data or .unsilk for Numpy arrays, because Numpy arrays have a .data method
print(s2.arr.self.data, arr)
print(s2.arr.unsilk, arr)
print(type(s2.arr.self.data), type(arr))
print(s2.arr[2].self.data, arr[2])
print(type(s2.arr[2].self.data), type(arr[2]))
#s2.arr.schema["type"] = "array" # inferred
print(s2.arr.schema["type"])
reset_backend()
item = Silk(data=mixed_object)
silk_backend.set_silk(item)
item.set(5.0)
#item.schema["type"] = "number" # inferred
def func(self):
assert self > 0
item.add_validator(func)
s2.arr.schema["items"] = item.schema
s2.validate()
print(silk_backend._data)
print(silk_backend2._data)
print("START")
s2.arr[0] = 5
print(s2.arr.unsilk)
reset_backend()
s = Silk(data=mixed_object)
silk_backend.set_silk(s)
s.x = 1.0
s.y = 0.0
s.z = 0.0
def func(self):
assert abs(self.x**2+self.y**2+self.z**2 - 1) < 0.001
s.add_validator(func)
s.y = 0.0
s.validate()
try:
s.y = 1.0 # would fail
s.validate()
except ValidationError:
s.y = 0
# setting 3 inter-validated values at once is *really* inconvenient with SilkBackend...
try:
s.x = 0.0
except ValidationError:
pass
try:
s.y = 0.0
except ValidationError:
pass
s.z = 1.0
print(s.data)
try:
s.x = 1.0
except ValidationError:
pass
try:
s.y = 0.0
except ValidationError:
pass
s.z = 0.0
print(s.data)
import numpy as np
reset_backend()
a = Silk(data=mixed_object)
silk_backend.set_silk(a)
a.coor = [0.0,0.0,1.0]
pprint(a.coor.schema)
print(a.coor.data)
print("START")
np.array(a.coor.data)
print(np.array(a.coor.data))
def func(self):
import numpy as np #necessary!
arr = np.array(self.data)
assert abs(np.sum(arr**2) - 1) < 0.01
a.coor.add_validator(func)
reset_backend(mixed_object2)
c = Silk(data=mixed_object2)
silk_backend2.set_silk(c)
c.set( [0.0, 0.0, 0.0] )
c.schema.clear()
c.schema.update(a.coor.schema)
def set_x(self, value):
self[0] = value
c.x = property(lambda self: self[0], set_x)
def set_y(self, value):
self[1] = value
c.y = property(lambda self: self[1], set_y)
def set_z(self, value):
self[2] = value
c.z = property(lambda self: self[2], set_z)
def set_xyz(self, xyz):
x,y,z = xyz
try:
self.x = x
except ValidationError:
pass
try:
self.y = y
except ValidationError:
pass
self.z = z
c.xyz = property(lambda self: tuple(self.data), set_xyz)
try:
c.x = 0.2
except ValidationError:
pass
try:
c.y = -0.3
except ValidationError:
pass
c.z = 0.93
print(c.data)
c.xyz = -1,0,0
print(c.data, c.xyz)
c.xyz = 0.2,-0.3,0.93
print(c.data, c.xyz)
pprint(c.schema)
reset_backend()
Test = Silk(data=mixed_object) # singleton
silk_backend.set_silk(Test)
"""
# will never work for a singleton backed up by a mixed object
def __init__(self, a, b):
self.a = a
self.b = b
"""
def __call__(self, c):
return self.a + self.b + c
#Test.__init__ = __init__
Test.__call__ = __call__
#test = Test(7,8)
test = Test
test.a, test.b = 7, 8
test.validate()
print(test.data)
print(test(5))
pprint(test.schema)
print("START")
test.l = []
l = test.l
l.append("bla")
test.validate()
try:
l.append(10) #Error
l.validate()
except ValidationError as exc:
print(exc)
l.pop(-1)
print(test.l.data) | [
"numpy.sum",
"math.sqrt",
"silk.mixed.SilkBackend",
"silk.mixed.Monitor",
"numpy.array",
"pprint.pprint",
"silk.Silk",
"silk.mixed.MixedObject"
] | [((357, 370), 'silk.mixed.SilkBackend', 'SilkBackend', ([], {}), '()\n', (368, 370), False, 'from silk.mixed import Monitor, SilkBackend, MixedObject\n'), ((381, 402), 'silk.mixed.Monitor', 'Monitor', (['silk_backend'], {}), '(silk_backend)\n', (388, 402), False, 'from silk.mixed import Monitor, SilkBackend, MixedObject\n'), ((418, 442), 'silk.mixed.MixedObject', 'MixedObject', (['monitor', '()'], {}), '(monitor, ())\n', (429, 442), False, 'from silk.mixed import Monitor, SilkBackend, MixedObject\n'), ((460, 473), 'silk.mixed.SilkBackend', 'SilkBackend', ([], {}), '()\n', (471, 473), False, 'from silk.mixed import Monitor, SilkBackend, MixedObject\n'), ((485, 507), 'silk.mixed.Monitor', 'Monitor', (['silk_backend2'], {}), '(silk_backend2)\n', (492, 507), False, 'from silk.mixed import Monitor, SilkBackend, MixedObject\n'), ((524, 549), 'silk.mixed.MixedObject', 'MixedObject', (['monitor2', '()'], {}), '(monitor2, ())\n', (535, 549), False, 'from silk.mixed import Monitor, SilkBackend, MixedObject\n'), ((567, 580), 'silk.mixed.SilkBackend', 'SilkBackend', ([], {}), '()\n', (578, 580), False, 'from silk.mixed import Monitor, SilkBackend, MixedObject\n'), ((592, 614), 'silk.mixed.Monitor', 'Monitor', (['silk_backend3'], {}), '(silk_backend3)\n', (599, 614), False, 'from silk.mixed import Monitor, SilkBackend, MixedObject\n'), ((631, 656), 'silk.mixed.MixedObject', 'MixedObject', (['monitor3', '()'], {}), '(monitor3, ())\n', (642, 656), False, 'from silk.mixed import Monitor, SilkBackend, MixedObject\n'), ((662, 685), 'silk.Silk', 'Silk', ([], {'data': 'mixed_object'}), '(data=mixed_object)\n', (666, 685), False, 'from silk import Silk, ValidationError\n'), ((803, 844), 'silk.Silk', 'Silk', ([], {'data': 'mixed_object2', 'schema': 's.schema'}), '(data=mixed_object2, schema=s.schema)\n', (807, 844), False, 'from silk import Silk, ValidationError\n'), ((899, 941), 'silk.Silk', 'Silk', ([], {'data': 'mixed_object3', 'schema': 's2.schema'}), '(data=mixed_object3, schema=s2.schema)\n', (903, 941), False, 'from silk import Silk, ValidationError\n'), ((1708, 1724), 'pprint.pprint', 'pprint', (['s.schema'], {}), '(s.schema)\n', (1714, 1724), False, 'from pprint import pprint\n'), ((2202, 2225), 'silk.Silk', 'Silk', ([], {'data': 'mixed_object'}), '(data=mixed_object)\n', (2206, 2225), False, 'from silk import Silk, ValidationError\n'), ((2481, 2506), 'numpy.array', 'np.array', (['[1.0, 2.0, 3.0]'], {}), '([1.0, 2.0, 3.0])\n', (2489, 2506), True, 'import numpy as np\n'), ((2883, 2906), 'silk.Silk', 'Silk', ([], {'data': 'mixed_object'}), '(data=mixed_object)\n', (2887, 2906), False, 'from silk import Silk, ValidationError\n'), ((3232, 3255), 'silk.Silk', 'Silk', ([], {'data': 'mixed_object'}), '(data=mixed_object)\n', (3236, 3255), False, 'from silk import Silk, ValidationError\n'), ((3907, 3930), 'silk.Silk', 'Silk', ([], {'data': 'mixed_object'}), '(data=mixed_object)\n', (3911, 3930), False, 'from silk import Silk, ValidationError\n'), ((3979, 4000), 'pprint.pprint', 'pprint', (['a.coor.schema'], {}), '(a.coor.schema)\n', (3985, 4000), False, 'from pprint import pprint\n'), ((4035, 4056), 'numpy.array', 'np.array', (['a.coor.data'], {}), '(a.coor.data)\n', (4043, 4056), True, 'import numpy as np\n'), ((4270, 4294), 'silk.Silk', 'Silk', ([], {'data': 'mixed_object2'}), '(data=mixed_object2)\n', (4274, 4294), False, 'from silk import Silk, ValidationError\n'), ((5121, 5137), 'pprint.pprint', 'pprint', (['c.schema'], {}), '(c.schema)\n', (5127, 5137), False, 'from pprint import pprint\n'), ((5162, 5185), 'silk.Silk', 'Silk', ([], {'data': 'mixed_object'}), '(data=mixed_object)\n', (5166, 5185), False, 'from silk import Silk, ValidationError\n'), ((5558, 5577), 'pprint.pprint', 'pprint', (['test.schema'], {}), '(test.schema)\n', (5564, 5577), False, 'from pprint import pprint\n'), ((4063, 4084), 'numpy.array', 'np.array', (['a.coor.data'], {}), '(a.coor.data)\n', (4071, 4084), True, 'import numpy as np\n'), ((4147, 4166), 'numpy.array', 'np.array', (['self.data'], {}), '(self.data)\n', (4155, 4166), True, 'import numpy as np\n'), ((1240, 1253), 'math.sqrt', 'math.sqrt', (['xx'], {}), '(xx)\n', (1249, 1253), False, 'import math\n'), ((1675, 1691), 'pprint.pprint', 'pprint', (['s.schema'], {}), '(s.schema)\n', (1681, 1691), False, 'from pprint import pprint\n'), ((4182, 4198), 'numpy.sum', 'np.sum', (['(arr ** 2)'], {}), '(arr ** 2)\n', (4188, 4198), True, 'import numpy as np\n')] |
import numpy as np
from collections import OrderedDict
import logging
import astropy.units as apu
from astropy import table
from astropy.extern import six
from astropy import coordinates
from astropyp.utils import misc
logger = logging.getLogger('astropyp.catalog')
class Catalog(object):
"""
Wrapper for `~astropy.table.Table` catalogs of point sources.
This allows users to map class attributes to columns in the
source table, at times with different names, making it easier to
work with catalogs of objects with different column names.
Parameters
----------
sources: `~astropy.table.Table`
Astropy table that contains a list of point sources.
use_defaults: bool, optional
Whether or not to use the default column mapping
(x, y, ra, dec, e_ra, e_dec, aper_flux, aper_flux_err,
psf_flux, psf_flux_err). *Default=True*
kwargs: Keyword Arguments
Key,value pairs that assign static column names to
columns in the ``sources`` table. This can override any
names in defaults as well as add new column names.
"""
def __init__(self, sources=None, use_defaults=True, **kwargs):
default_columns = ['x','y','ra','dec','e_ra','e_dec',
'aper_flux','aper_flux_err','psf_flux','psf_flux_err']
self.sources = sources
# If using default columns, update them with any keyword
# arguments passed to init
if use_defaults:
self._columns = OrderedDict([(k,k) for k in default_columns])
self._columns.update(kwargs)
else:
self._columns = kwargs
# Add all of the static columns to the Catalog properties
for col in self._columns.keys():
setattr(self.__class__, col, self._add_property(col))
@property
def shape(self):
return (len(self.sources), len(self.sources.columns))
def _add_property(self, key):
"""
Creating a mapping from a key using the current
value in the lookup table
"""
# Get the static column
def getter(self):
prop = self._columns[key]
if prop in self.sources.columns:
result = self.sources[prop]
else:
result = None
return result
# Set the static column in the table
def setter(self, value):
prop = self._columns[key]
self.sources[prop] = value
# Delete the static column from the table
def deleter(self):
prop = self._columns[key]
if prop in self.sources.columns:
del self.sources[prop]
return property(getter, setter, deleter)
@property
def columns(self):
"""
Columns in the ``sources`` table
"""
return self.sources.columns
@property
def static_columns(self):
"""
Return a list of static column names, but only the ones
with a valid mapping to the ``sources`` table
"""
columns = [k for k,v in self._columns.items()
if v in self.sources.columns]
return columns
def update_static_column(self, static_name, column_name):
"""
Change the mapping from a static column to a column
in the ``sources`` table
"""
self._columns[static_name] = column_name
setattr(self.__class__, static_name,
self._add_property(static_name))
def __getitem__(self, arg):
"""
Index the Catalog the same as indexing an
astropy table
"""
keys = arg
for k,v in self._columns.items():
if hasattr(arg, '__iter__'):
keys = [v if key==k else key for key in keys]
else:
if k==arg:
keys = v
break
return self.sources[keys]
def __len__(self):
return len(self.sources)
def find_neighbors(radius, positions=None, kd_tree=None):
"""
Find all neighbors within radius of each source in a list of
positions or a KD-Tree.
Parameters
----------
radius: float
Maximum distance for a neighbor (center to center) to
be included
positions: array or list of tuples, optional
Array or list of coord1,coord2 positions to use for
neighbor search. If ``positions`` is not specified,
kd_tree must be given.
kd_tree: `~scipy.spatial.cKDTree`
KD Tree to use for the search. If this isn't specified
then a list of positions must be specified
Result
------
idx: np.array
List of indices of all sources with a neighbor. Sources with
multiple neighbors will have multiple entries, one for each neighbor
given in ``nidx``.
nidx: np.array
List of indices for neighbors matching each index in ``idx``.
"""
from scipy import spatial
if kd_tree is None:
if positions is not None:
KDTree = spatial.cKDTree
kd_tree = KDTree(positions)
else:
raise Exception("You must either specify a list "
"of positions or a kd_tree")
pairs = kd_tree.query_pairs(radius)
neighbors = np.array(list(pairs))
if len(neighbors)==0:
return np.array([], dtype=int), np.array([], dtype=int)
neighbors = np.vstack([neighbors,np.fliplr(neighbors)])
idx = neighbors[:,0]
nidx = neighbors[:,1]
sort_idx = np.argsort(idx)
return idx[sort_idx], nidx[sort_idx]
def get_merged_indices(coords, ref_coords=None, kdtree=None,
pool_size=None, separation=1/3600):
"""
Get the indices to merge two sets of coordinates together. This includes
the incides to match both sets, indices of the unmatched rows, and
indices of rows that had multiple matches.
Parameters
----------
coords: array-like
A 2D array with either 2 columns of coordinates (coord1, coord2),
where coord1 and coord2 are Nx1 arrays, or
N rows of coordinate pairs
[(coord1_1, coord1_2),(coord2_1,coord2_2),...]
where coordN_1 and coordN_2 are floats.
ref_coords: array-like, optional
A 2D array with either 2 columns of coordinates or N rows of
coordinate pairs (see ``coords`` for more).
Either ``ref_coords`` or ``kdtree`` must be specified.
kdtree: `spatial.cKDTREE`, optional
KDTree of the reference coordinates (this is an object to use
for matching positions of objects in k-dimensions, in 2 dimensions
it is a quad-tree).
Either ``ref_coords`` or ``kdtree`` must be specified.
pool_size: int, optional
Number of processors to use to match coordinates. If
``pool_size is None`` (default) then the maximum number
of processors is used.
separation: float, optional
Maximum distance between two coordinates for a match. The default
is ``1/3600``, or 1 arcsec.
Returns
-------
ref_indices: tuple(idx1, unmatched1)
Indices to match the reference coordinates to the observed
coordinates. So the rows of ref_coords[idx1]~coords[idx2],
ref_coords[unmatched1]~coords[unmatched2], if
ref_coords and coords are Nx2 arrays of coordinate pairs;
coord_indices: tuple(idx2, unmatched2)
Indices to match the coordinates to the reference coordinates.
duplicates: array-like
Indices of coordinates with multiple matches, so that
ref_coords[idx1][duplicates]~coords[idx2][duplicates]
"""
# If the user didn't specify a KDTREE,
if kdtree is None:
try:
from scipy import spatial
except ImportError:
raise ImportError(
"You must have 'scipy' installed to combine catalogs")
if ref_coords is not None:
if len(ref_coords)==2:
ref1,ref2 = ref_coords
pos1 = np.array([ref1,ref2])
pos1 = pos1.T
elif len(ref_coords[0])==2:
pos1 = ref_coords
else:
raise ValueError(
"Expected either a 2xN array or Nx2 array for ref_coords")
KDTree = spatial.cKDTree
kdtree = KDTree(pos1)
else:
raise Exception("Either ref_coords or kdtree must be specified")
if pool_size is None:
pool_size = -1
if len(coords)==2:
coord1, coord2 = coords
pos2 = np.array([coord1,coord2])
pos2 = pos2.T
elif len(coords[0])==2:
pos2 = coords
else:
raise ValueError("Expected either a 2xN array or Nx2 array for coords")
src_count1 = len(ref1)
src_count2 = len(coord1)
# Match all of the sources
d2,idx = kdtree.query(pos2, n_jobs=pool_size,
distance_upper_bound=separation)
matches = np.isfinite(d2)
idx1 = idx[matches]
idx2 = np.where(matches)[0]
# Flag all of the duplicate sources
unique, inverse, counts = np.unique(
idx1, return_inverse=True, return_counts=True)
u = unique.copy()
cidx = counts>1
u[cidx]=-1
didx = u[inverse]<0
duplicates = np.arange(len(idx1))[didx]
# Identify unmatched sources
unmatched1 = np.setdiff1d(range(src_count1), idx1)
unmatched2 = np.setdiff1d(range(src_count2), idx2)
#return (idx1, unmatched1, duplicate1), (idx2, unmatched2, duplicate2)
return (idx2, unmatched2),(idx1, unmatched1), duplicates
def get_all_merged_indices(all_coord1, all_coord2, pool_size=None,
separation=1/3600., merge_type='outer'):
"""
Get masked indices for a set of ra,dec that merge the
sources together using an outer join
Parameters
----------
all_coord1: list of array-like
List of arrays of values for the first coordinate (usually RA or X)
all_coord2: list of array-like
List of arrays of values for the second coordinate (usually DEC or Y)
pool_size: int, optional
Number of processors to use to match coordinates. If
``pool_size is None`` (default) then the maximum number
of processors is used.
separation: float, optional
Maximum distance between two coordinates for a match. The default
is ``1/3600``, or 1 arcsec.
merge_type: str, optional
Type of merge to use. This must be 'outer','inner', 'left', or 'right'.
The default is 'outer'.
Returns
-------
indices: list of masked arrays
Indices to match each catalog in all_coord1, all_coord2 to the
master catalog
matched: array
Indices of rows that has an entry for *every* set of coordinates
all_duplicates: array
Indices of rows that have duplicate values
mean_coord1: array
Average coord1 for each row
mean_coord2: array
Average coord2 for each row
"""
from astropyp.utils import misc
if merge_type not in ['outer','inner','left','right']:
raise ValueError(
"merge_type must be 'outer','inner', 'left', or 'right'")
# Initialize indices and coordinates
indices = [np.ma.array([n for n in
range(all_coord1[m].shape[0])], dtype=int)
for m in range(len(all_coord1))]
mean_coord1 = np.ma.array(all_coord1[0])
mean_coord2 = np.ma.array(all_coord2[0])
all_duplicates = np.zeros(mean_coord1.shape, dtype=bool)
# Create merged indices
for n in range(1,len(all_coord1)):
idx0, idx1, duplicates = get_merged_indices(
(all_coord1[n],all_coord2[n]),
ref_coords=(mean_coord1,mean_coord2),
pool_size=pool_size, separation=separation)
new_idx, new_unmatched = idx0
ref_idx, ref_unmatched = idx1
# Update list of duplicates
duplicates_unmatched = all_duplicates[ref_unmatched]
all_duplicates = all_duplicates[ref_idx]
all_duplicates[duplicates] = True
# Update indices
if merge_type=='outer' or merge_type=='left':
ref_idx = np.hstack([ref_idx, ref_unmatched])
new_idx = np.hstack([new_idx,
-np.ones(ref_unmatched.shape, dtype=int)])
all_duplicates = np.hstack([all_duplicates, duplicates_unmatched])
if merge_type=='outer' or merge_type=='right':
ref_idx = np.hstack([ref_idx,
-np.ones(new_unmatched.shape, dtype=int)])
new_idx = np.hstack([new_idx, new_unmatched])
all_duplicates = np.hstack([all_duplicates,
np.zeros(new_unmatched.shape, dtype=bool)])
# Mask indices
ref_mask = ref_idx<0
new_mask = new_idx<0
ref_idx = np.ma.array(ref_idx, mask=ref_mask)
new_idx = np.ma.array(new_idx, mask=new_mask)
# Update the mean coordinate values
mean_coord1 = misc.update_ma_idx(mean_coord1,ref_idx)
mean_coord2 = misc.update_ma_idx(mean_coord2,ref_idx)
new_coord1 = misc.update_ma_idx(all_coord1[n],new_idx)
new_coord2 = misc.update_ma_idx(all_coord2[n],new_idx)
mean_coord1 = np.ma.mean(
np.ma.vstack([mean_coord1, new_coord1]), axis=0)
mean_coord2 = np.ma.mean(
np.ma.vstack([mean_coord2, new_coord2]), axis=0)
# Update all of the indices with the new matches
for m in range(n):
indices[m] = misc.update_ma_idx(indices[m],ref_idx)
indices[n] = new_idx
matched = np.sum([i.mask for i in indices],axis=0)==0
return indices, matched, all_duplicates, mean_coord1, mean_coord2
def mask_catalog_columns(catalog, idx, columns=None,
catname=None, new_columns=None,
catalog_kwargs=None):
"""
Mask all of the rows in a table (or subset of columns in a table)
with a masked index and (optionally) rename the columns.
Parameters
----------
catalog: `~astropy.table.Table` or `~Catalog`
Catalog or Table to be masked
idx: `~numpy.ma.array`
Masked array of indices to use for updating the catalog
columns: list of strings, optional
Columns to include in the masked table. If ``columns is None``
(default) then all of the columns are included in the masked catalog.
catname: str, optional
Name of catalog. This is only necessary if you with to rename the
columns of the catalog for stacking later. See ``new_columns``
for more.
new_columns: list of strings, optional
New names for the columns. This may be useful if you are combining
catalogs and want to standardize the column names. If
``new_columns is None`` (default) then if a ``catname`` is provided all
of the columns are renamed to 'columnname_catname', otherwise
the original column names are used.
catalog_kwargs: dict
If the result is desired to be a `~astropyp.catalog.Catalog` then
these are the kwargs to specify when initializing the catalog
(for example names of the ra,dec,x,y columns). Otherwise
an `~astropy.table.Table` is returned
Returns
-------
tbl: `~astropy.table.Table` or `~astropyp.catalog.Catalog`
Catalog created by applying the masked index. The type of object
returned depends on if ``catalog_kwargs is None`` (default), which
returns a Table. Otherwise a Catalog is returned.
"""
from astropy.table import Table
if isinstance(catalog, Catalog):
tbl = catalog.sources
else:
tbl = catalog
new_tbl = Table(masked=True)
if columns is None:
columns = tbl.columns.keys()
if new_columns is None:
if catname is None:
new_columns = columns
else:
new_columns = ['{0}_{1}'.format(col,catname) for col in columns]
for n, col in enumerate(columns):
new_tbl[new_columns[n]] = misc.update_ma_idx(tbl[col], idx)
if catalog_kwargs is not None:
new_tbl = Catalog(new_tbl, **catalog_kwargs)
return new_tbl | [
"numpy.ones",
"astropy.table.Table",
"numpy.sum",
"numpy.zeros",
"numpy.isfinite",
"logging.getLogger",
"astropyp.utils.misc.update_ma_idx",
"numpy.argsort",
"numpy.ma.array",
"numpy.fliplr",
"numpy.where",
"numpy.array",
"numpy.hstack",
"numpy.ma.vstack",
"collections.OrderedDict",
"n... | [((230, 267), 'logging.getLogger', 'logging.getLogger', (['"""astropyp.catalog"""'], {}), "('astropyp.catalog')\n", (247, 267), False, 'import logging\n'), ((5558, 5573), 'numpy.argsort', 'np.argsort', (['idx'], {}), '(idx)\n', (5568, 5573), True, 'import numpy as np\n'), ((8984, 8999), 'numpy.isfinite', 'np.isfinite', (['d2'], {}), '(d2)\n', (8995, 8999), True, 'import numpy as np\n'), ((9126, 9182), 'numpy.unique', 'np.unique', (['idx1'], {'return_inverse': '(True)', 'return_counts': '(True)'}), '(idx1, return_inverse=True, return_counts=True)\n', (9135, 9182), True, 'import numpy as np\n'), ((11406, 11432), 'numpy.ma.array', 'np.ma.array', (['all_coord1[0]'], {}), '(all_coord1[0])\n', (11417, 11432), True, 'import numpy as np\n'), ((11451, 11477), 'numpy.ma.array', 'np.ma.array', (['all_coord2[0]'], {}), '(all_coord2[0])\n', (11462, 11477), True, 'import numpy as np\n'), ((11499, 11538), 'numpy.zeros', 'np.zeros', (['mean_coord1.shape'], {'dtype': 'bool'}), '(mean_coord1.shape, dtype=bool)\n', (11507, 11538), True, 'import numpy as np\n'), ((15713, 15731), 'astropy.table.Table', 'Table', ([], {'masked': '(True)'}), '(masked=True)\n', (15718, 15731), False, 'from astropy.table import Table\n'), ((8594, 8620), 'numpy.array', 'np.array', (['[coord1, coord2]'], {}), '([coord1, coord2])\n', (8602, 8620), True, 'import numpy as np\n'), ((9035, 9052), 'numpy.where', 'np.where', (['matches'], {}), '(matches)\n', (9043, 9052), True, 'import numpy as np\n'), ((12846, 12881), 'numpy.ma.array', 'np.ma.array', (['ref_idx'], {'mask': 'ref_mask'}), '(ref_idx, mask=ref_mask)\n', (12857, 12881), True, 'import numpy as np\n'), ((12900, 12935), 'numpy.ma.array', 'np.ma.array', (['new_idx'], {'mask': 'new_mask'}), '(new_idx, mask=new_mask)\n', (12911, 12935), True, 'import numpy as np\n'), ((13011, 13051), 'astropyp.utils.misc.update_ma_idx', 'misc.update_ma_idx', (['mean_coord1', 'ref_idx'], {}), '(mean_coord1, ref_idx)\n', (13029, 13051), False, 'from astropyp.utils import misc\n'), ((13073, 13113), 'astropyp.utils.misc.update_ma_idx', 'misc.update_ma_idx', (['mean_coord2', 'ref_idx'], {}), '(mean_coord2, ref_idx)\n', (13091, 13113), False, 'from astropyp.utils import misc\n'), ((13134, 13176), 'astropyp.utils.misc.update_ma_idx', 'misc.update_ma_idx', (['all_coord1[n]', 'new_idx'], {}), '(all_coord1[n], new_idx)\n', (13152, 13176), False, 'from astropyp.utils import misc\n'), ((13197, 13239), 'astropyp.utils.misc.update_ma_idx', 'misc.update_ma_idx', (['all_coord2[n]', 'new_idx'], {}), '(all_coord2[n], new_idx)\n', (13215, 13239), False, 'from astropyp.utils import misc\n'), ((13629, 13670), 'numpy.sum', 'np.sum', (['[i.mask for i in indices]'], {'axis': '(0)'}), '([i.mask for i in indices], axis=0)\n', (13635, 13670), True, 'import numpy as np\n'), ((16046, 16079), 'astropyp.utils.misc.update_ma_idx', 'misc.update_ma_idx', (['tbl[col]', 'idx'], {}), '(tbl[col], idx)\n', (16064, 16079), False, 'from astropyp.utils import misc\n'), ((1504, 1550), 'collections.OrderedDict', 'OrderedDict', (['[(k, k) for k in default_columns]'], {}), '([(k, k) for k in default_columns])\n', (1515, 1550), False, 'from collections import OrderedDict\n'), ((5383, 5406), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (5391, 5406), True, 'import numpy as np\n'), ((5408, 5431), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (5416, 5431), True, 'import numpy as np\n'), ((5469, 5489), 'numpy.fliplr', 'np.fliplr', (['neighbors'], {}), '(neighbors)\n', (5478, 5489), True, 'import numpy as np\n'), ((12198, 12233), 'numpy.hstack', 'np.hstack', (['[ref_idx, ref_unmatched]'], {}), '([ref_idx, ref_unmatched])\n', (12207, 12233), True, 'import numpy as np\n'), ((12365, 12414), 'numpy.hstack', 'np.hstack', (['[all_duplicates, duplicates_unmatched]'], {}), '([all_duplicates, duplicates_unmatched])\n', (12374, 12414), True, 'import numpy as np\n'), ((12594, 12629), 'numpy.hstack', 'np.hstack', (['[new_idx, new_unmatched]'], {}), '([new_idx, new_unmatched])\n', (12603, 12629), True, 'import numpy as np\n'), ((13285, 13324), 'numpy.ma.vstack', 'np.ma.vstack', (['[mean_coord1, new_coord1]'], {}), '([mean_coord1, new_coord1])\n', (13297, 13324), True, 'import numpy as np\n'), ((13380, 13419), 'numpy.ma.vstack', 'np.ma.vstack', (['[mean_coord2, new_coord2]'], {}), '([mean_coord2, new_coord2])\n', (13392, 13419), True, 'import numpy as np\n'), ((13538, 13577), 'astropyp.utils.misc.update_ma_idx', 'misc.update_ma_idx', (['indices[m]', 'ref_idx'], {}), '(indices[m], ref_idx)\n', (13556, 13577), False, 'from astropyp.utils import misc\n'), ((8051, 8073), 'numpy.array', 'np.array', (['[ref1, ref2]'], {}), '([ref1, ref2])\n', (8059, 8073), True, 'import numpy as np\n'), ((12703, 12744), 'numpy.zeros', 'np.zeros', (['new_unmatched.shape'], {'dtype': 'bool'}), '(new_unmatched.shape, dtype=bool)\n', (12711, 12744), True, 'import numpy as np\n'), ((12294, 12333), 'numpy.ones', 'np.ones', (['ref_unmatched.shape'], {'dtype': 'int'}), '(ref_unmatched.shape, dtype=int)\n', (12301, 12333), True, 'import numpy as np\n'), ((12530, 12569), 'numpy.ones', 'np.ones', (['new_unmatched.shape'], {'dtype': 'int'}), '(new_unmatched.shape, dtype=int)\n', (12537, 12569), True, 'import numpy as np\n')] |
from __future__ import annotations
import logging
from typing import (
Optional,
Union,
NewType,
List,
Any,
Callable
)
import numpy as np # type: ignore
import numba # type: ignore
from numba.core.typing import cffi_utils # type: ignore
from sunode import _cvodes
__all__ = [
"lib", "ffi", "ERRORS", "Borrows", "notnull",
"check", "check_ptr", "check_code", "as_numpy"
]
logger = logging.getLogger("sunode.basic")
lib: Any = _cvodes.lib
ffi: Any = _cvodes.ffi
cffi_utils.register_module(_cvodes)
cffi_utils.register_type(
ffi.typeof("N_Vector").item, numba.types.Opaque("N_Vector")
)
cffi_utils.register_type(
ffi.typeof("SUNMatrix").item, numba.types.Opaque("SUNMatrix")
)
_data_dtype = cffi_utils.map_type(ffi.typeof("realtype"))
_index_dtype = cffi_utils.map_type(ffi.typeof("sunindextype"))
data_dtype: Any = np.dtype(_data_dtype.name)
index_dtype: Any = np.dtype(_index_dtype.name)
CPointer = NewType("CPointer", int)
ERRORS = {}
for name in dir(lib):
item = getattr(lib, name)
if not isinstance(item, int):
continue
if name.startswith('CV_') or name.startswith('CVLS_') or name.startswith('SUN_NLS_'):
ERRORS[item] = name
class Borrows:
def __init__(self) -> None:
self._borrowed: List[Any] = []
def borrow(self, arg: Any) -> None:
self._borrowed.append(arg)
def release_borrowed_func(self) -> Callable[[], None]:
borrowed = self._borrowed
# Does not keep a reference to self
def release() -> None:
borrowed.clear()
return release
def notnull(ptr: CPointer, msg: Optional[str] = None) -> CPointer:
if ptr == ffi.NULL:
if msg is None:
raise ValueError("CPointer is NULL.")
else:
raise ValueError(msg)
return ptr
def check(retcode: Union[int, CPointer]) -> Union[None, CPointer]:
if isinstance(retcode, int) and retcode != 0:
raise ValueError('Bad return code from sundials: %s (%s)' % (ERRORS[retcode], retcode))
if isinstance(retcode, ffi.CData):
if retcode == ffi.NULL:
raise ValueError('Return value of sundials is NULL.')
return retcode
return None
def check_ptr(retval: CPointer) -> CPointer:
if retval == ffi.NULL:
raise ValueError('Return value of sundials is NULL.')
return retval
def check_code(retval: int) -> int:
if retval != 0:
raise ValueError('Bad return code from sundials: %s (%s)' % (ERRORS[retval], retval))
return retval
class RefCount:
def __init__(self) -> None:
self.count: int = 0
def borrow(self) -> None:
self.count += 1
def release(self) -> None:
assert self.count > 0
self.count -= 1
def is_zero(self) -> bool:
assert self.count >= 0
return self.count == 0
def as_numpy(
owner: Any,
ptr: CPointer,
size: int,
dtype: np.dtype,
counter: Optional[RefCount] = None,
) -> np.ndarray:
if size < 0:
raise ValueError("Array size must not be negative.")
if size != 0:
notnull(ptr)
def release(ptr: CPointer) -> None:
nonlocal owner
if counter is not None:
counter.release()
if counter is not None:
counter.borrow()
ptr = ffi.gc(ptr, release)
buffer = ffi.buffer(ptr, size * dtype.itemsize)
return np.frombuffer(buffer, dtype)
| [
"numba.core.typing.cffi_utils.register_module",
"numpy.frombuffer",
"numpy.dtype",
"numba.types.Opaque",
"typing.NewType",
"logging.getLogger"
] | [((423, 456), 'logging.getLogger', 'logging.getLogger', (['"""sunode.basic"""'], {}), "('sunode.basic')\n", (440, 456), False, 'import logging\n'), ((505, 540), 'numba.core.typing.cffi_utils.register_module', 'cffi_utils.register_module', (['_cvodes'], {}), '(_cvodes)\n', (531, 540), False, 'from numba.core.typing import cffi_utils\n'), ((867, 893), 'numpy.dtype', 'np.dtype', (['_data_dtype.name'], {}), '(_data_dtype.name)\n', (875, 893), True, 'import numpy as np\n'), ((913, 940), 'numpy.dtype', 'np.dtype', (['_index_dtype.name'], {}), '(_index_dtype.name)\n', (921, 940), True, 'import numpy as np\n'), ((954, 978), 'typing.NewType', 'NewType', (['"""CPointer"""', 'int'], {}), "('CPointer', int)\n", (961, 978), False, 'from typing import Optional, Union, NewType, List, Any, Callable\n'), ((600, 630), 'numba.types.Opaque', 'numba.types.Opaque', (['"""N_Vector"""'], {}), "('N_Vector')\n", (618, 630), False, 'import numba\n'), ((693, 724), 'numba.types.Opaque', 'numba.types.Opaque', (['"""SUNMatrix"""'], {}), "('SUNMatrix')\n", (711, 724), False, 'import numba\n'), ((3395, 3423), 'numpy.frombuffer', 'np.frombuffer', (['buffer', 'dtype'], {}), '(buffer, dtype)\n', (3408, 3423), True, 'import numpy as np\n')] |
"""
This script demonstrates initialisation, training, evaluation, and forecasting of ForecastNet. The dataset used for the
time-invariance test in section 6.1 of the ForecastNet paper is used for this demonstration.
Paper:
"ForecastNet: A Time-Variant Deep Feed-Forward Neural Network Architecture for Multi-Step-Ahead Time-Series Forecasting"
by <NAME>, <NAME>, and <NAME>
Link to the paper: https://arxiv.org/abs/2002.04155
"""
import numpy as np
import matplotlib.pyplot as plt
from forecastNet import forecastnet
from train import train
from evaluate import evaluate
from demoDataset import generate_data
#Use a fixed seed for repreducible results
np.random.seed(1)
# Generate the dataset
train_data, test_data, valid_data, period = generate_data(T=2750, period = 50)
# Model parameters
model_type = 'dense2' #'dense' or 'conv', 'dense2' or 'conv2'
in_seq_length = 2 * period
hidden_dim = 24
out_seq_length = period
learning_rate = 0.0001
n_epochs= 100
# Initialise model
fcstnet = forecastnet(in_seq_length=in_seq_length, out_seq_length=out_seq_length, hidden_dim=hidden_dim,
learning_rate=learning_rate, n_epochs=n_epochs, save_file='./forecastnet3.ckpt', model=model_type)
# Train the model
training_costs, validation_costs = train(fcstnet, train_data, valid_data)
# Plot the training curves
plt.figure()
plt.plot(training_costs)
plt.plot(validation_costs)
# Evaluate the model
mase, smape = evaluate(fcstnet, test_data, return_lists=False)
print('')
print('MASE:', mase)
print('SMAPE:', smape)
# Generate and plot forecasts for various samples from the test dataset
start_idx = 20
for start_idx in [0, 50, 100]:
test_sample = test_data[:, start_idx:]
# Models with a Gaussian Mixture Density Component output
if model_type == 'dense' or model_type == 'conv':
# Generate a set of n_samples forecasts (Monte Carlo Forecasts)
n_samples = 10 # 100 is a better value, but takes longer to compute
batch_size = test_sample.shape[0]
y_pred = np.zeros((batch_size, fcstnet.out_seq_length, n_samples))
mu = np.zeros((batch_size, fcstnet.out_seq_length, n_samples))
sigma = np.zeros((batch_size, fcstnet.out_seq_length, n_samples))
for i in range(n_samples):
print('Forecast sample', i)
y_pred[:, :, i], mu[:, :, i], sigma[:, :, i] = fcstnet.forecast(test_sample)
# Compute the Monte Carlo estimates of the mean and standard deviation
s_mean = np.mean(y_pred, axis=2)
s_std = np.std(y_pred, axis=2)
botVarLine = s_mean - s_std
topVarLine = s_mean + s_std
# Plot the Monte Carlo mean and standard deviation
plt.figure()
plt.plot(np.arange(0, fcstnet.in_seq_length + fcstnet.out_seq_length),
test_sample[0, 0:fcstnet.in_seq_length + fcstnet.out_seq_length],
'o-', label='test_data')
plt.plot(np.arange(fcstnet.in_seq_length, fcstnet.in_seq_length + fcstnet.out_seq_length),
s_mean[0, :],
'*-', linewidth=0.7, label='mean')
plt.fill_between(np.arange(fcstnet.in_seq_length, fcstnet.in_seq_length + fcstnet.out_seq_length),
botVarLine[0, :],
topVarLine[0, :],
color='gray', alpha=0.3, label='Uncertainty')
# Models with a linear output
elif model_type == 'dense2' or model_type == 'conv2':
# Generate a forecast
y_pred = fcstnet.forecast(test_sample)
# Plot the forecast
plt.figure()
plt.plot(np.arange(0, fcstnet.in_seq_length + fcstnet.out_seq_length),
test_sample[0, 0:fcstnet.in_seq_length + fcstnet.out_seq_length],
'o-', label='test_data')
plt.plot(np.arange(fcstnet.in_seq_length, fcstnet.in_seq_length + fcstnet.out_seq_length),
y_pred[0, :],
'*-', linewidth=0.7, label='mean')
plt.show()
| [
"forecastNet.forecastnet",
"numpy.random.seed",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.std",
"numpy.zeros",
"demoDataset.generate_data",
"evaluate.evaluate",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange",
"train.train"
] | [((656, 673), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (670, 673), True, 'import numpy as np\n'), ((742, 774), 'demoDataset.generate_data', 'generate_data', ([], {'T': '(2750)', 'period': '(50)'}), '(T=2750, period=50)\n', (755, 774), False, 'from demoDataset import generate_data\n'), ((993, 1194), 'forecastNet.forecastnet', 'forecastnet', ([], {'in_seq_length': 'in_seq_length', 'out_seq_length': 'out_seq_length', 'hidden_dim': 'hidden_dim', 'learning_rate': 'learning_rate', 'n_epochs': 'n_epochs', 'save_file': '"""./forecastnet3.ckpt"""', 'model': 'model_type'}), "(in_seq_length=in_seq_length, out_seq_length=out_seq_length,\n hidden_dim=hidden_dim, learning_rate=learning_rate, n_epochs=n_epochs,\n save_file='./forecastnet3.ckpt', model=model_type)\n", (1004, 1194), False, 'from forecastNet import forecastnet\n'), ((1258, 1296), 'train.train', 'train', (['fcstnet', 'train_data', 'valid_data'], {}), '(fcstnet, train_data, valid_data)\n', (1263, 1296), False, 'from train import train\n'), ((1324, 1336), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1334, 1336), True, 'import matplotlib.pyplot as plt\n'), ((1337, 1361), 'matplotlib.pyplot.plot', 'plt.plot', (['training_costs'], {}), '(training_costs)\n', (1345, 1361), True, 'import matplotlib.pyplot as plt\n'), ((1362, 1388), 'matplotlib.pyplot.plot', 'plt.plot', (['validation_costs'], {}), '(validation_costs)\n', (1370, 1388), True, 'import matplotlib.pyplot as plt\n'), ((1425, 1473), 'evaluate.evaluate', 'evaluate', (['fcstnet', 'test_data'], {'return_lists': '(False)'}), '(fcstnet, test_data, return_lists=False)\n', (1433, 1473), False, 'from evaluate import evaluate\n'), ((3951, 3961), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3959, 3961), True, 'import matplotlib.pyplot as plt\n'), ((2014, 2071), 'numpy.zeros', 'np.zeros', (['(batch_size, fcstnet.out_seq_length, n_samples)'], {}), '((batch_size, fcstnet.out_seq_length, n_samples))\n', (2022, 2071), True, 'import numpy as np\n'), ((2085, 2142), 'numpy.zeros', 'np.zeros', (['(batch_size, fcstnet.out_seq_length, n_samples)'], {}), '((batch_size, fcstnet.out_seq_length, n_samples))\n', (2093, 2142), True, 'import numpy as np\n'), ((2159, 2216), 'numpy.zeros', 'np.zeros', (['(batch_size, fcstnet.out_seq_length, n_samples)'], {}), '((batch_size, fcstnet.out_seq_length, n_samples))\n', (2167, 2216), True, 'import numpy as np\n'), ((2478, 2501), 'numpy.mean', 'np.mean', (['y_pred'], {'axis': '(2)'}), '(y_pred, axis=2)\n', (2485, 2501), True, 'import numpy as np\n'), ((2518, 2540), 'numpy.std', 'np.std', (['y_pred'], {'axis': '(2)'}), '(y_pred, axis=2)\n', (2524, 2540), True, 'import numpy as np\n'), ((2681, 2693), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2691, 2693), True, 'import matplotlib.pyplot as plt\n'), ((2711, 2771), 'numpy.arange', 'np.arange', (['(0)', '(fcstnet.in_seq_length + fcstnet.out_seq_length)'], {}), '(0, fcstnet.in_seq_length + fcstnet.out_seq_length)\n', (2720, 2771), True, 'import numpy as np\n'), ((2915, 3000), 'numpy.arange', 'np.arange', (['fcstnet.in_seq_length', '(fcstnet.in_seq_length + fcstnet.out_seq_length)'], {}), '(fcstnet.in_seq_length, fcstnet.in_seq_length + fcstnet.out_seq_length\n )\n', (2924, 3000), True, 'import numpy as np\n'), ((3105, 3190), 'numpy.arange', 'np.arange', (['fcstnet.in_seq_length', '(fcstnet.in_seq_length + fcstnet.out_seq_length)'], {}), '(fcstnet.in_seq_length, fcstnet.in_seq_length + fcstnet.out_seq_length\n )\n', (3114, 3190), True, 'import numpy as np\n'), ((3551, 3563), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3561, 3563), True, 'import matplotlib.pyplot as plt\n'), ((3581, 3641), 'numpy.arange', 'np.arange', (['(0)', '(fcstnet.in_seq_length + fcstnet.out_seq_length)'], {}), '(0, fcstnet.in_seq_length + fcstnet.out_seq_length)\n', (3590, 3641), True, 'import numpy as np\n'), ((3785, 3870), 'numpy.arange', 'np.arange', (['fcstnet.in_seq_length', '(fcstnet.in_seq_length + fcstnet.out_seq_length)'], {}), '(fcstnet.in_seq_length, fcstnet.in_seq_length + fcstnet.out_seq_length\n )\n', (3794, 3870), True, 'import numpy as np\n')] |
"""
This is the main class for the NARPS analysis
There are three classes defined here:
Narps: this is a class that wraps the entire dataset
NarpsTeam: this class is instantiated for each team
NarpsDirs: This class contains info about all
of the directories that are needed for this and
subsequent analyses
The code under the main loop at the bottom
runs all of the image preprocessing that is
needed for subsequent analyses
"""
import numpy
import pandas
import nibabel
import json
import os
import sys
import time
import glob
import datetime
import nilearn.image
import nilearn.input_data
import nilearn.plotting
import shutil
import warnings
import pickle
from nipype.interfaces.fsl.model import SmoothEstimate
import wget
import tarfile
from urllib.error import HTTPError
import hashlib
import inspect
from utils import get_metadata, TtoZ, get_map_metadata,\
log_to_file, stringify_dict
from ValueDiagnostics import compare_thresh_unthresh_values
# # set up data url - COMMENTING NOW, WILL REMOVE
# # this is necessary for now because the data are still private
# # once the data are public we can share the info.json file
# Hypotheses:
#
# Parametric effect of gain:
#
# 1. Positive effect in ventromedial PFC - equal indifference group
# 2. Positive effect in ventromedial PFC - equal range group
# 3. Positive effect in ventral striatum - equal indifference group
# 4. Positive effect in ventral striatum - equal range group
#
# Parametric effect of loss:
# - 5: Negative effect in VMPFC - equal indifference group
# - 6: Negative effect in VMPFC - equal range group
# - 7: Positive effect in amygdala - equal indifference group
# - 8: Positive effect in amygdala - equal range group
#
# Equal range vs. equal indifference:
#
# - 9: Greater positive response to losses in amygdala for equal range
# condition vs. equal indifference condition.
hypotheses = {1: '+gain: equal indiff',
2: '+gain: equal range',
3: '+gain: equal indiff',
4: '+gain: equal range',
5: '-loss: equal indiff',
6: '-loss: equal range',
7: '+loss: equal indiff',
8: '+loss: equal range',
9: '+loss:ER>EI'}
hypnums = [1, 2, 5, 6, 7, 8, 9]
# separate class to store base directories,
# since we need them in multiple places
class NarpsDirs(object):
"""
class defining directories for project
"""
def __init__(self, basedir, dataurl=None,
force_download=False, testing=False):
# set up a dictionary to contain all of the
# directories
self.dirs = {}
self.testing = testing
# check to make sure home of basedir exists
assert os.path.exists(os.path.dirname(basedir))
self.dirs['base'] = basedir
if not os.path.exists(basedir):
os.mkdir(basedir)
self.force_download = force_download
self.data_url = dataurl
dirs_to_add = ['output', 'metadata', 'templates',
'cached', 'figures', 'logs', 'orig',
'image_diagnostics_orig',
'image_diagnostics_zstat']
for d in dirs_to_add:
self.dirs[d] = os.path.join(self.dirs['base'], d)
self.dirs['fsl_templates'] = os.path.join(
os.environ['FSLDIR'],
'data/standard')
# autogenerate all of the directories
# except for the orig dir
for d in dirs_to_add:
if d != 'orig' and not os.path.exists(self.dirs[d]):
os.mkdir(self.dirs[d])
self.logfile = os.path.join(self.dirs['logs'], 'narps.txt')
if not self.testing:
log_to_file(
self.logfile,
'Running Narps main class',
flush=True)
output_dirs = ['resampled', 'rectified', 'zstat',
'thresh_mask_orig']
for o in output_dirs:
self.get_output_dir(o)
# if raw data don't exist, download them
if self.force_download and os.path.exists(self.dirs['orig']):
shutil.rmtree(self.dirs['orig'])
if not os.path.exists(self.dirs['orig']):
self.get_orig_data()
assert os.path.exists(self.dirs['orig'])
# make sure the necessary templates are present
# these should be downloaded with the raw data
self.MNI_mask = os.path.join(self.dirs['fsl_templates'],
'MNI152_T1_2mm_brain_mask.nii.gz')
assert os.path.exists(self.MNI_mask)
self.MNI_template = os.path.join(self.dirs['fsl_templates'],
'MNI152_T1_2mm.nii.gz')
assert os.path.exists(self.MNI_template)
self.full_mask_img = os.path.join(self.dirs['templates'],
'MNI152_all_voxels.nii.gz')
def get_output_dir(self, dirID, base='output'):
"""get the directory path for a particular ID. if it doesn't
exist then create it and save to the dirs list
dir names always match the dir ID exactly
"""
if dirID in self.dirs:
return(self.dirs[dirID])
else:
self.dirs[dirID] = os.path.join(
self.dirs[base],
dirID
)
if not os.path.exists(self.dirs[dirID]):
os.mkdir(self.dirs[dirID])
return(self.dirs[dirID])
def get_orig_data(self):
"""
download original data from repository
"""
log_to_file(
self.logfile,
'get_orig_data',
headspace=2)
log_to_file(self.logfile, 'DATA_URL: %s' % self.data_url)
MAX_TRIES = 5
if self.data_url is None:
raise Exception('no URL for original data, cannot download')
print('orig data do not exist, downloading...')
output_directory = self.dirs['base']
no_dl = True
ntries = 0
# try several times in case of http error
while no_dl:
try:
filename = wget.download(self.data_url, out=output_directory)
no_dl = False
except HTTPError:
ntries += 1
time.sleep(1) # wait a second
if ntries > MAX_TRIES:
raise Exception('Problem downloading original data')
# save a hash of the tarball for data integrity
filehash = hashlib.md5(open(filename, 'rb').read()).hexdigest()
log_to_file(self.logfile, 'hash of tar file: %s' % filehash)
tarfile_obj = tarfile.open(filename)
tarfile_obj.extractall(path=self.dirs['base'])
os.remove(filename)
class NarpsTeam(object):
"""
class defining team information
"""
def __init__(self, teamID, NV_collection_id, dirs, verbose=False):
self.dirs = dirs
self.teamID = teamID
self.NV_collection_id = NV_collection_id
self.datadir_label = '%s_%s' % (NV_collection_id, teamID)
# directory for the original maps
self.input_dir = os.path.join(self.dirs.dirs['orig'],
'%s_%s' % (NV_collection_id, teamID))
if not os.path.exists(self.input_dir):
print("Warning: Input dir (%s) does not exist" % self.input_dir)
self.verbose = verbose
self.image_json = None
self.jsonfile = None
self.has_all_images = None
self.logs = {}
# create image directory structure
output_dirs = {'thresh': ['orig', 'resampled', 'thresh_mask_orig'],
'unthresh': ['orig', 'resampled', 'rectified', 'zstat']}
self.images = {}
for imgtype in ['thresh', 'unthresh']:
self.images[imgtype] = {}
for o in output_dirs[imgtype]:
self.images[imgtype][o] = {}
self.n_nan_inmask_values = {}
self.n_zero_inmask_values = {}
self.has_resampled = None
self.has_binarized_masks = None
# populate the image data structure
self.get_orig_images()
# check whether image needs to be rectified
logfile = os.path.join(
self.dirs.dirs['logs'],
'image_diagnostics.log')
collection_string = '%s_%s' % (self.NV_collection_id, self.teamID)
if not os.path.exists(self.dirs.dirs['image_diagnostics_orig']):
os.mkdir(self.dirs.dirs['image_diagnostics_orig'])
self.image_diagnostics_file = os.path.join(
self.dirs.dirs['image_diagnostics_orig'],
'%s.csv' % collection_string
)
if not os.path.exists(self.image_diagnostics_file):
self.image_diagnostics = compare_thresh_unthresh_values(
dirs, collection_string, logfile)
self.image_diagnostics.to_csv(self.image_diagnostics_file)
else:
self.image_diagnostics = pandas.read_csv(
self.image_diagnostics_file)
# create a dict with the rectified values
# use answers from spreadsheet
self.rectify = {}
for i in self.image_diagnostics.index:
self.rectify[
self.image_diagnostics.loc[
i, 'hyp']] = self.image_diagnostics.loc[
i, 'reverse_contrast']
# manual fixes to rectify status per spreadsheet answers for hyp 9
if self.teamID in ['46CD']:
self.rectify[9] = True
def get_orig_images(self):
"""
find orig images
"""
self.has_all_images = {
'thresh': True,
'unthresh': True}
for hyp in hypotheses:
for imgtype in self.images:
imgfile = os.path.join(
self.input_dir,
'hypo%d_%s.nii.gz' % (hyp, imgtype))
if os.path.exists(imgfile):
self.images[imgtype]['orig'][hyp] = imgfile
else:
self.images[imgtype]['orig'][hyp] = None
self.has_all_images[imgtype] = False
def create_binarized_thresh_masks(self, thresh=1e-4,
overwrite=False,
replace_na=True):
"""
create binarized version of thresholded maps
"""
self.has_binarized_masks = True
if self.verbose:
print('creating binarized masks for', self.teamID)
for hyp in self.images['thresh']['orig']:
img = self.images['thresh']['orig'][hyp]
maskimg = os.path.join(
self.dirs.dirs['thresh_mask_orig'],
self.datadir_label,
os.path.basename(img))
self.images['thresh']['thresh_mask_orig'][hyp] = maskimg
if not os.path.exists(os.path.dirname(
maskimg)):
os.mkdir(os.path.dirname(maskimg))
if overwrite or not os.path.exists(maskimg):
# load the image and threshold/binarize it
threshimg = nibabel.load(img)
threshdata = threshimg.get_data()
# some images use nan instead of zero for the non-excursion
# voxels, so we need to replace with zeros
if replace_na:
threshdata = numpy.nan_to_num(threshdata)
threshdata_bin = numpy.zeros(threshdata.shape)
# if the team reported using a negative contrast,
# then we use the negative direction, otherwise
# use the positive direction.
# we use a small number instead of zero to address
# numeric issues
if self.rectify[hyp]:
# use negative
threshdata_bin[threshdata < -1*thresh] = 1
else:
# use positive
threshdata_bin[threshdata > thresh] = 1
# save back to a nifti image with same geometry
# as original
bin_img = nibabel.Nifti1Image(threshdata_bin,
affine=threshimg.affine)
bin_img.to_filename(maskimg)
else:
# if it already exists, just use existing
if not os.path.exists(maskimg):
bin_img = nibabel.load(maskimg)
if self.verbose:
print('copying existing binary mask for',
self.teamID)
def get_resampled_images(self, imgtype,
overwrite=False, replace_na=False):
"""
resample images into common space using nilearn
"""
self.has_resampled = True
# use linear interpolation for binarized maps, then threshold at 0.5
# this avoids empty voxels that can occur with NN interpolation
interp_type = {'thresh': 'linear', 'unthresh': 'continuous'}
data_dirname = {'thresh': 'thresh_mask_orig',
'unthresh': 'orig'}
resampled_dir = self.dirs.get_output_dir('resampled')
for hyp in hypotheses:
infile = os.path.join(
self.dirs.dirs[data_dirname[imgtype]],
self.datadir_label,
'hypo%d_%s.nii.gz' % (hyp, imgtype))
outfile = os.path.join(
resampled_dir,
self.datadir_label,
'hypo%d_%s.nii.gz' % (hyp, imgtype))
self.images[imgtype]['resampled'][hyp] = outfile
if not os.path.exists(os.path.dirname(outfile)):
os.mkdir(os.path.dirname(outfile))
if not os.path.exists(outfile) or overwrite:
if self.verbose:
print("resampling", infile)
# create resampled file
# ignore nilearn warnings
# these occur on some of the unthresholded images
# that contains NaN values
# we probably don't want to set those to zero
# because those would enter into interpolation
# and then would be treated as real zeros later
# rather than "missing data" which is the usual
# intention
with warnings.catch_warnings():
warnings.simplefilter("ignore")
resampled = nilearn.image.resample_to_img(
infile,
self.dirs.MNI_template,
interpolation=interp_type[imgtype])
if imgtype == 'thresh':
resampled = nilearn.image.math_img(
'img>0.5',
img=resampled)
resampled.to_filename(outfile)
else:
if self.verbose:
print('using existing resampled image for',
self.teamID)
class Narps(object):
"""
main class for NARPS analysis
"""
def __init__(self, basedir, metadata_file=None,
verbose=False, overwrite=False,
dataurl=None, testing=False):
self.basedir = basedir
self.dirs = NarpsDirs(basedir, dataurl=dataurl,
testing=testing)
self.verbose = verbose
self.teams = {}
self.overwrite = overwrite
self.started_at = datetime.datetime.now()
self.testing = testing
# create the full mask image if it doesn't already exist
if not os.path.exists(self.dirs.full_mask_img):
print('making full image mask')
self.mk_full_mask_img(self.dirs)
assert os.path.exists(self.dirs.full_mask_img)
# get input dirs for orig data
self.image_jsons = None
self.input_dirs = self.get_input_dirs(self.dirs)
# check images for each team
self.complete_image_sets = {}
self.get_orig_images(self.dirs)
for imgtype in ['thresh', 'unthresh']:
log_to_file(
self.dirs.logfile,
'found %d teams with complete original %s datasets' % (
len(self.complete_image_sets[imgtype]), imgtype))
# set up metadata
if metadata_file is None:
self.metadata_file = os.path.join(
self.dirs.dirs['orig'],
'analysis_pipelines_for_analysis.xlsx')
else:
self.metadata_file = metadata_file
self.metadata = get_metadata(self.metadata_file)
self.hypothesis_metadata = pandas.DataFrame(
columns=['teamID', 'hyp', 'n_na', 'n_zero'])
self.all_maps = {'thresh': {'resampled': None},
'unthresh': {'resampled': None}}
self.rectified_list = []
def mk_full_mask_img(self, dirs):
"""
create a mask image with ones in all voxels
"""
# make full image mask (all voxels)
mi = nibabel.load(self.dirs.MNI_mask)
d = numpy.ones(mi.shape)
full_mask = nibabel.Nifti1Image(d, affine=mi.affine)
full_mask.to_filename(self.dirs.full_mask_img)
def get_input_dirs(self, dirs, verbose=True, load_json=True):
"""
get orig dirs
- assumes that images.json is present for each valid dir
"""
input_files = glob.glob(
os.path.join(dirs.dirs['orig'], '*/hypo1_*thresh.nii.gz'))
input_dirs = [os.path.dirname(i) for i in input_files]
input_dirs = list(set(input_dirs)) # get unique dirs
log_to_file(
self.dirs.logfile,
'found %d input directories' % len(input_dirs))
for i in input_dirs:
collection_id = os.path.basename(i)
NV_collection_id, teamID = collection_id.split('_')
if teamID not in self.teams:
self.teams[teamID] = NarpsTeam(
teamID, NV_collection_id, dirs, verbose=self.verbose)
if os.path.exists(os.path.join(i, 'images.json')):
self.teams[teamID].jsonfile = os.path.join(
i, 'images.json')
with open(self.teams[teamID].jsonfile) as f:
self.teams[teamID].image_json = json.load(f)
def get_orig_images(self, dirs):
"""
load orig images
"""
self.complete_image_sets = {
'thresh': [],
'unthresh': []}
for teamID in self.teams:
self.teams[teamID].get_orig_images()
for imgtype in self.teams[teamID].images:
if self.teams[teamID].has_all_images[imgtype]:
self.complete_image_sets[imgtype].append(teamID)
# sort the teams - this is the order that will be used
for imgtype in self.teams[teamID].images:
self.complete_image_sets[imgtype].sort()
def get_binarized_thresh_masks(self):
"""
create binarized thresholded maps for each team
"""
log_to_file(
self.dirs.logfile,
sys._getframe().f_code.co_name,
headspace=2)
for teamID in self.complete_image_sets['thresh']:
self.teams[teamID].create_binarized_thresh_masks()
def get_resampled_images(self, overwrite=None):
"""
resample all images into FSL MNI space
"""
log_to_file(
self.dirs.logfile,
sys._getframe().f_code.co_name,
headspace=2)
if overwrite is None:
overwrite = self.overwrite
for imgtype in ['thresh', 'unthresh']:
for teamID in self.complete_image_sets[imgtype]:
self.teams[teamID].get_resampled_images(imgtype=imgtype)
def check_image_values(self, overwrite=None):
"""
get # of nonzero and NA voxels for each image
"""
log_to_file(
self.dirs.logfile,
sys._getframe().f_code.co_name,
headspace=2)
if overwrite is None:
overwrite = self.overwrite
image_metadata_file = os.path.join(
self.dirs.dirs['metadata'], 'image_metadata_df.csv')
if os.path.exists(image_metadata_file) and not overwrite:
print('using cached image metdata')
image_metadata_df = pandas.read_csv(image_metadata_file)
return(image_metadata_df)
# otherwise load from scractch
image_metadata = []
masker = nilearn.input_data.NiftiMasker(mask_img=self.dirs.MNI_mask)
for teamID in self.complete_image_sets['thresh']:
for hyp in self.teams[teamID].images['thresh']['resampled']:
threshfile = self.teams[teamID].images[
'thresh']['resampled'][hyp]
threshdata = masker.fit_transform(threshfile)
image_metadata.append(
[teamID, hyp, numpy.sum(numpy.isnan(threshdata)),
numpy.sum(threshdata == 0.0)])
image_metadata_df = pandas.DataFrame(
image_metadata, columns=['teamID', 'hyp', 'n_na', 'n_nonzero'])
image_metadata_df.to_csv(image_metadata_file)
return(image_metadata_df)
def create_concat_images(self, datatype='resampled',
create_voxel_map=False,
imgtypes=None,
overwrite=None):
"""
create images concatenated across teams
ordered by self.complete_image_sets
create_voxel_map: will create a map showing
proportion of nonzero teams at each voxel
"""
log_to_file(
self.dirs.logfile,
sys._getframe().f_code.co_name,
headspace=2)
func_args = inspect.getargvalues(
inspect.currentframe()).locals
log_to_file(
self.dirs.logfile,
stringify_dict(func_args))
if imgtypes is None:
imgtypes = ['thresh', 'unthresh']
if overwrite is None:
overwrite = self.overwrite
for imgtype in imgtypes:
concat_dir = self.dirs.get_output_dir(
'%s_concat_%s' % (imgtype, datatype))
for hyp in range(1, 10):
outfile = os.path.join(
concat_dir,
'hypo%d.nii.gz' % hyp)
if self.verbose:
print(outfile)
if not os.path.exists(outfile) or overwrite:
if self.verbose:
print('%s - hypo %d: creating concat file' % (
imgtype, hyp))
concat_teams = [
teamID for teamID in self.complete_image_sets[imgtype]
if os.path.exists(
self.teams[teamID].images[imgtype][datatype][hyp])]
self.all_maps[imgtype][datatype] = [
self.teams[teamID].images[imgtype][datatype][hyp]
for teamID in concat_teams]
# use nilearn NiftiMasker to load data
# and save to a new file
masker = nilearn.input_data.NiftiMasker(
mask_img=self.dirs.MNI_mask)
concat_data = masker.fit_transform(
self.all_maps[imgtype][datatype])
concat_img = masker.inverse_transform(concat_data)
concat_img.to_filename(outfile)
if create_voxel_map:
concat_data = nibabel.load(outfile).get_data()
voxel_map = numpy.mean(
numpy.abs(concat_data) > 1e-6, 3)
voxel_img = nibabel.Nifti1Image(
voxel_map, affine=concat_img.affine)
mapfile = outfile.replace(
'.nii.gz', '_voxelmap.nii.gz'
)
assert mapfile != outfile
voxel_img.to_filename(mapfile)
# save team ID and files to a label file for provenance
labelfile = outfile.replace('.nii.gz', '.labels')
with open(labelfile, 'w') as f:
for i, team in enumerate(concat_teams):
f.write('%s\t%s%s' % (
team,
self.all_maps[imgtype][datatype][i],
os.linesep))
else:
if self.verbose:
print('%s - hypo %d: using existing file' % (
imgtype, hyp))
return(self.all_maps)
def create_mean_thresholded_images(self, datatype='resampled',
overwrite=None, thresh=1e-5):
"""
create overlap maps for thresholded images
"""
log_to_file(
self.dirs.logfile,
sys._getframe().f_code.co_name,
headspace=2)
func_args = inspect.getargvalues(
inspect.currentframe()).locals
log_to_file(
self.dirs.logfile,
stringify_dict(func_args))
imgtype = 'thresh'
if overwrite is None:
overwrite = self.overwrite
output_dir = self.dirs.get_output_dir('overlap_binarized_thresh')
concat_dir = self.dirs.get_output_dir(
'%s_concat_%s' % (imgtype, datatype))
for hyp in range(1, 10):
outfile = os.path.join(
output_dir,
'hypo%d.nii.gz' % hyp)
if not os.path.exists(outfile) or overwrite:
if self.verbose:
print('%s - hypo %d: creating overlap file' % (
imgtype, hyp))
concat_file = os.path.join(
concat_dir,
'hypo%d.nii.gz' % hyp)
concat_img = nibabel.load(concat_file)
concat_data = concat_img.get_data()
concat_data = (concat_data > thresh).astype('float')
concat_mean = numpy.mean(concat_data, 3)
concat_mean_img = nibabel.Nifti1Image(concat_mean,
affine=concat_img.affine)
concat_mean_img.to_filename(outfile)
else:
if self.verbose:
print('%s - hypo %d: using existing file' % (
imgtype, hyp))
def create_rectified_images(self, map_metadata_file=None,
overwrite=None):
"""
create rectified images
- contrasts 5 and 6 were negative contrasts
some teams uploaded images where negative values
provided evidence in favor of the contrast
using metadata provided by teams, we identify these
images and flip their valence so that all maps
present positive evidence for each contrast
"""
log_to_file(
self.dirs.logfile,
sys._getframe().f_code.co_name,
headspace=2)
func_args = inspect.getargvalues(
inspect.currentframe()).locals
log_to_file(
self.dirs.logfile,
stringify_dict(func_args))
if overwrite is None:
overwrite = self.overwrite
for teamID in self.complete_image_sets['unthresh']:
if not hasattr(self.teams[teamID], 'rectify'):
print('no rectification data for %s, skipping' % teamID)
continue
for hyp in range(1, 10):
if hyp not in self.teams[teamID].rectify:
print('no rectification data for %s hyp%d, skipping' % (
teamID, hyp))
continue
rectify = self.teams[teamID].rectify[hyp]
# load data from unthresh map within
# positive voxels of thresholded mask
unthresh_file = self.teams[
teamID].images['unthresh']['resampled'][hyp]
self.teams[
teamID].images[
'unthresh']['rectified'][hyp] = os.path.join(
self.dirs.dirs['rectified'],
self.teams[teamID].datadir_label,
'hypo%d_unthresh.nii.gz' % hyp)
if not os.path.exists(
os.path.dirname(
self.teams[
teamID].images['unthresh']['rectified'][hyp])):
os.mkdir(os.path.dirname(
self.teams[teamID].images[
'unthresh']['rectified'][hyp]))
if overwrite or not os.path.exists(
self.teams[
teamID].images['unthresh']['rectified'][hyp]):
# if values were flipped for negative contrasts
if rectify:
print('rectifying hyp', hyp, 'for', teamID)
img = nibabel.load(unthresh_file)
img_rectified = nilearn.image.math_img(
'img*-1', img=img)
img_rectified.to_filename(
self.teams[
teamID].images['unthresh']['rectified'][hyp])
self.rectified_list.append((teamID, hyp))
else: # just copy original
shutil.copy(
unthresh_file,
self.teams[
teamID].images['unthresh']['rectified'][hyp])
# write list of rectified teams to disk
if len(self.rectified_list) > 0:
with open(os.path.join(self.dirs.dirs['metadata'],
'rectified_images_list.txt'), 'w') as f:
for l in self.rectified_list:
f.write('%s\t%s%s' % (l[0], l[1], os.linesep))
def compute_image_stats(self, datatype='zstat', overwrite=None):
"""
compute std and range on statistical images
"""
log_to_file(
self.dirs.logfile,
sys._getframe().f_code.co_name,
headspace=2)
func_args = inspect.getargvalues(
inspect.currentframe()).locals
log_to_file(
self.dirs.logfile,
stringify_dict(func_args))
if overwrite is None:
overwrite = self.overwrite
# set up directories
unthresh_concat_dir = self.dirs.get_output_dir(
'unthresh_concat_%s' % datatype)
unthresh_range_dir = self.dirs.get_output_dir(
'unthresh_range_%s' % datatype)
unthresh_std_dir = self.dirs.get_output_dir(
'unthresh_std_%s' % datatype)
for hyp in range(1, 10):
unthresh_file = os.path.join(
unthresh_concat_dir,
'hypo%d.nii.gz' % hyp)
range_outfile = os.path.join(
unthresh_range_dir,
'hypo%d.nii.gz' % hyp)
std_outfile = os.path.join(
unthresh_std_dir,
'hypo%d.nii.gz' % hyp)
if not os.path.exists(range_outfile) \
or not os.path.exists(std_outfile) \
or overwrite:
unthresh_img = nibabel.load(unthresh_file)
unthresh_data = unthresh_img.get_data()
concat_data = numpy.nan_to_num(unthresh_data)
# compute range
datarange = numpy.max(concat_data, axis=3) \
- numpy.min(concat_data, axis=3)
range_img = nibabel.Nifti1Image(
datarange,
affine=unthresh_img.affine)
range_img.to_filename(range_outfile)
# compute standard deviation
datastd = numpy.std(concat_data, axis=3)
std_img = nibabel.Nifti1Image(
datastd,
affine=unthresh_img.affine)
std_img.to_filename(std_outfile)
def convert_to_zscores(self, map_metadata_file=None, overwrite=None):
"""
convert rectified images to z scores
- unthresholded images could be either t or z images
- if they are already z then just copy
- use metadata supplied by teams to determine image type
"""
log_to_file(
self.dirs.logfile,
sys._getframe().f_code.co_name,
headspace=2)
func_args = inspect.getargvalues(
inspect.currentframe()).locals
log_to_file(
self.dirs.logfile,
stringify_dict(func_args))
if overwrite is None:
overwrite = self.overwrite
if map_metadata_file is None:
map_metadata_file = os.path.join(
self.dirs.dirs['orig'],
'narps_neurovault_images_details_responses_corrected.csv')
print('using map_metadata_file:', map_metadata_file)
unthresh_stat_type = get_map_metadata(map_metadata_file)
metadata = get_metadata(self.metadata_file)
n_participants = metadata[['n_participants', 'NV_collection_string']]
n_participants.index = metadata.teamID
unthresh_stat_type = unthresh_stat_type.merge(
n_participants, left_index=True, right_index=True)
for teamID in self.complete_image_sets['unthresh']:
if teamID not in unthresh_stat_type.index:
print('no map metadata for', teamID)
continue
# this is a bit of a kludge
# since some contrasts include all subjects
# but others only include some
# we don't have the number of participants in each
# group so we just use the entire number
n = unthresh_stat_type.loc[teamID, 'n_participants']
for hyp in range(1, 10):
infile = self.teams[
teamID].images['unthresh']['rectified'][hyp]
if not os.path.exists(infile):
print('skipping', infile)
continue
self.teams[
teamID].images['unthresh']['zstat'][hyp] = os.path.join(
self.dirs.dirs['zstat'],
self.teams[teamID].datadir_label,
'hypo%d_unthresh.nii.gz' % hyp)
if not overwrite and os.path.exists(
self.teams[teamID].images['unthresh']['zstat'][hyp]):
continue
if unthresh_stat_type.loc[
teamID, 'unthresh_type'].lower() == 't':
if not os.path.exists(
os.path.dirname(
self.teams[
teamID].images['unthresh']['zstat'][hyp])):
os.mkdir(os.path.dirname(
self.teams[
teamID].images['unthresh']['zstat'][hyp]))
print("converting %s (hyp %d) to z - %d participants" % (
teamID, hyp, n))
TtoZ(infile,
self.teams[teamID].images['unthresh']['zstat'][hyp],
n-1)
elif unthresh_stat_type.loc[teamID, 'unthresh_type'] == 'z':
if not os.path.exists(os.path.dirname(
self.teams[
teamID].images['unthresh']['zstat'][hyp])):
os.mkdir(os.path.dirname(
self.teams[
teamID].images['unthresh']['zstat'][hyp]))
if not os.path.exists(
self.teams[
teamID].images['unthresh']['zstat'][hyp]):
print('copying', teamID)
shutil.copy(
infile,
os.path.dirname(
self.teams[
teamID].images['unthresh']['zstat'][hyp]))
else:
# if it's not T or Z then we skip it as it's not usable
print('skipping %s - other data type' % teamID)
def estimate_smoothness(self, overwrite=None, imgtype='zstat'):
"""
estimate smoothness of Z maps using FSL's smoothness estimation
"""
log_to_file(
self.dirs.logfile,
sys._getframe().f_code.co_name,
headspace=2)
func_args = inspect.getargvalues(
inspect.currentframe()).locals
log_to_file(
self.dirs.logfile,
stringify_dict(func_args))
if overwrite is None:
overwrite = self.overwrite
output_file = os.path.join(self.dirs.dirs['metadata'],
'smoothness_est.csv')
if os.path.exists(output_file) and not overwrite:
if self.verbose:
print('using existing smoothness file')
smoothness_df = pandas.read_csv(output_file)
return(smoothness_df)
# use nipype's interface to the FSL smoothest command
est = SmoothEstimate()
smoothness = []
for teamID in self.complete_image_sets['unthresh']:
for hyp in range(1, 10):
if hyp not in self.teams[teamID].images['unthresh'][imgtype]:
# fill missing data with nan
print('no zstat present for', teamID, hyp)
smoothness.append([teamID, hyp, numpy.nan,
numpy.nan, numpy.nan])
continue
infile = self.teams[teamID].images['unthresh'][imgtype][hyp]
if not os.path.exists(infile):
print('no image present:', infile)
continue
else:
if self.verbose:
print('estimating smoothness for hyp', hyp)
est.inputs.zstat_file = infile
est.inputs.mask_file = self.dirs.MNI_mask
est.terminal_output = 'file_split'
smoothest_output = est.run()
smoothness.append([teamID, hyp,
smoothest_output.outputs.dlh,
smoothest_output.outputs.volume,
smoothest_output.outputs.resels])
self.teams[teamID].logs['smoothest'] = (
smoothest_output.runtime.stdout,
smoothest_output.runtime.stderr)
smoothness_df = pandas.DataFrame(
smoothness,
columns=['teamID', 'hyp', 'dhl', 'volume', 'resels'])
smoothness_df.to_csv(output_file)
return(smoothness_df)
def write_data(self, save_data=True, outfile=None):
"""
serialize important info and save to file
"""
info = {}
info['started_at'] = self.started_at
info['save_time'] = datetime.datetime.now()
info['dirs'] = self.dirs
info['teamlist'] = self.complete_image_sets
info['teams'] = {}
for teamID in self.complete_image_sets['thresh']:
info['teams'][teamID] = {
'images': self.teams[teamID].images,
'image_json': self.teams[teamID].image_json,
'input_dir': self.teams[teamID].input_dir,
'NV_collection_id': self.teams[teamID].NV_collection_id,
'jsonfile': self.teams[teamID].jsonfile}
if save_data:
if not os.path.exists(self.dirs.dirs['cached']):
os.mkdir(self.dirs.dirs['cached'])
if outfile is None:
outfile = os.path.join(self.dirs.dirs['cached'],
'narps_prepare_maps.pkl')
with open(outfile, 'wb') as f:
pickle.dump(info, f)
return(info)
def load_data(self, infile=None):
"""
load data from pickle
"""
if not infile:
infile = os.path.join(self.dirs.dirs['cached'],
'narps_prepare_maps.pkl')
assert os.path.exists(infile)
with open(infile, 'rb') as f:
info = pickle.load(f)
self.dirs = info['dirs']
self.complete_image_sets = info['teamlist']
for teamID in self.complete_image_sets['thresh']:
self.teams[teamID] = NarpsTeam(
teamID,
info['teams'][teamID]['NV_collection_id'],
info['dirs'],
verbose=self.verbose)
self.teams[teamID].jsonfile = info[
'teams'][teamID]['jsonfile']
self.teams[teamID].images = info[
'teams'][teamID]['images']
self.teams[teamID].image_json = info[
'teams'][teamID]['image_json']
self.teams[teamID].input_dir = info[
'teams'][teamID]['input_dir']
| [
"os.mkdir",
"os.remove",
"pickle.dump",
"numpy.sum",
"numpy.nan_to_num",
"numpy.abs",
"pandas.read_csv",
"numpy.ones",
"numpy.isnan",
"pickle.load",
"numpy.mean",
"shutil.rmtree",
"nipype.interfaces.fsl.model.SmoothEstimate",
"os.path.join",
"shutil.copy",
"pandas.DataFrame",
"utils.... | [((3282, 3333), 'os.path.join', 'os.path.join', (["os.environ['FSLDIR']", '"""data/standard"""'], {}), "(os.environ['FSLDIR'], 'data/standard')\n", (3294, 3333), False, 'import os\n'), ((3598, 3642), 'os.path.join', 'os.path.join', (["self.dirs['logs']", '"""narps.txt"""'], {}), "(self.dirs['logs'], 'narps.txt')\n", (3610, 3642), False, 'import os\n'), ((4230, 4263), 'os.path.exists', 'os.path.exists', (["self.dirs['orig']"], {}), "(self.dirs['orig'])\n", (4244, 4263), False, 'import os\n'), ((4400, 4475), 'os.path.join', 'os.path.join', (["self.dirs['fsl_templates']", '"""MNI152_T1_2mm_brain_mask.nii.gz"""'], {}), "(self.dirs['fsl_templates'], 'MNI152_T1_2mm_brain_mask.nii.gz')\n", (4412, 4475), False, 'import os\n'), ((4528, 4557), 'os.path.exists', 'os.path.exists', (['self.MNI_mask'], {}), '(self.MNI_mask)\n', (4542, 4557), False, 'import os\n'), ((4587, 4651), 'os.path.join', 'os.path.join', (["self.dirs['fsl_templates']", '"""MNI152_T1_2mm.nii.gz"""'], {}), "(self.dirs['fsl_templates'], 'MNI152_T1_2mm.nii.gz')\n", (4599, 4651), False, 'import os\n'), ((4708, 4741), 'os.path.exists', 'os.path.exists', (['self.MNI_template'], {}), '(self.MNI_template)\n', (4722, 4741), False, 'import os\n'), ((4772, 4836), 'os.path.join', 'os.path.join', (["self.dirs['templates']", '"""MNI152_all_voxels.nii.gz"""'], {}), "(self.dirs['templates'], 'MNI152_all_voxels.nii.gz')\n", (4784, 4836), False, 'import os\n'), ((5552, 5607), 'utils.log_to_file', 'log_to_file', (['self.logfile', '"""get_orig_data"""'], {'headspace': '(2)'}), "(self.logfile, 'get_orig_data', headspace=2)\n", (5563, 5607), False, 'from utils import get_metadata, TtoZ, get_map_metadata, log_to_file, stringify_dict\n'), ((5653, 5710), 'utils.log_to_file', 'log_to_file', (['self.logfile', "('DATA_URL: %s' % self.data_url)"], {}), "(self.logfile, 'DATA_URL: %s' % self.data_url)\n", (5664, 5710), False, 'from utils import get_metadata, TtoZ, get_map_metadata, log_to_file, stringify_dict\n'), ((6525, 6585), 'utils.log_to_file', 'log_to_file', (['self.logfile', "('hash of tar file: %s' % filehash)"], {}), "(self.logfile, 'hash of tar file: %s' % filehash)\n", (6536, 6585), False, 'from utils import get_metadata, TtoZ, get_map_metadata, log_to_file, stringify_dict\n'), ((6608, 6630), 'tarfile.open', 'tarfile.open', (['filename'], {}), '(filename)\n', (6620, 6630), False, 'import tarfile\n'), ((6694, 6713), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (6703, 6713), False, 'import os\n'), ((7100, 7174), 'os.path.join', 'os.path.join', (["self.dirs.dirs['orig']", "('%s_%s' % (NV_collection_id, teamID))"], {}), "(self.dirs.dirs['orig'], '%s_%s' % (NV_collection_id, teamID))\n", (7112, 7174), False, 'import os\n'), ((8183, 8244), 'os.path.join', 'os.path.join', (["self.dirs.dirs['logs']", '"""image_diagnostics.log"""'], {}), "(self.dirs.dirs['logs'], 'image_diagnostics.log')\n", (8195, 8244), False, 'import os\n'), ((8519, 8607), 'os.path.join', 'os.path.join', (["self.dirs.dirs['image_diagnostics_orig']", "('%s.csv' % collection_string)"], {}), "(self.dirs.dirs['image_diagnostics_orig'], '%s.csv' %\n collection_string)\n", (8531, 8607), False, 'import os\n'), ((15496, 15519), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (15517, 15519), False, 'import datetime\n'), ((15777, 15816), 'os.path.exists', 'os.path.exists', (['self.dirs.full_mask_img'], {}), '(self.dirs.full_mask_img)\n', (15791, 15816), False, 'import os\n'), ((16601, 16633), 'utils.get_metadata', 'get_metadata', (['self.metadata_file'], {}), '(self.metadata_file)\n', (16613, 16633), False, 'from utils import get_metadata, TtoZ, get_map_metadata, log_to_file, stringify_dict\n'), ((16670, 16731), 'pandas.DataFrame', 'pandas.DataFrame', ([], {'columns': "['teamID', 'hyp', 'n_na', 'n_zero']"}), "(columns=['teamID', 'hyp', 'n_na', 'n_zero'])\n", (16686, 16731), False, 'import pandas\n'), ((17065, 17097), 'nibabel.load', 'nibabel.load', (['self.dirs.MNI_mask'], {}), '(self.dirs.MNI_mask)\n', (17077, 17097), False, 'import nibabel\n'), ((17110, 17130), 'numpy.ones', 'numpy.ones', (['mi.shape'], {}), '(mi.shape)\n', (17120, 17130), False, 'import numpy\n'), ((17151, 17191), 'nibabel.Nifti1Image', 'nibabel.Nifti1Image', (['d'], {'affine': 'mi.affine'}), '(d, affine=mi.affine)\n', (17170, 17191), False, 'import nibabel\n'), ((20202, 20267), 'os.path.join', 'os.path.join', (["self.dirs.dirs['metadata']", '"""image_metadata_df.csv"""'], {}), "(self.dirs.dirs['metadata'], 'image_metadata_df.csv')\n", (20214, 20267), False, 'import os\n'), ((21133, 21218), 'pandas.DataFrame', 'pandas.DataFrame', (['image_metadata'], {'columns': "['teamID', 'hyp', 'n_na', 'n_nonzero']"}), "(image_metadata, columns=['teamID', 'hyp', 'n_na', 'n_nonzero']\n )\n", (21149, 21218), False, 'import pandas\n'), ((33400, 33435), 'utils.get_map_metadata', 'get_map_metadata', (['map_metadata_file'], {}), '(map_metadata_file)\n', (33416, 33435), False, 'from utils import get_metadata, TtoZ, get_map_metadata, log_to_file, stringify_dict\n'), ((33455, 33487), 'utils.get_metadata', 'get_metadata', (['self.metadata_file'], {}), '(self.metadata_file)\n', (33467, 33487), False, 'from utils import get_metadata, TtoZ, get_map_metadata, log_to_file, stringify_dict\n'), ((37269, 37331), 'os.path.join', 'os.path.join', (["self.dirs.dirs['metadata']", '"""smoothness_est.csv"""'], {}), "(self.dirs.dirs['metadata'], 'smoothness_est.csv')\n", (37281, 37331), False, 'import os\n'), ((37678, 37694), 'nipype.interfaces.fsl.model.SmoothEstimate', 'SmoothEstimate', ([], {}), '()\n', (37692, 37694), False, 'from nipype.interfaces.fsl.model import SmoothEstimate\n'), ((39179, 39265), 'pandas.DataFrame', 'pandas.DataFrame', (['smoothness'], {'columns': "['teamID', 'hyp', 'dhl', 'volume', 'resels']"}), "(smoothness, columns=['teamID', 'hyp', 'dhl', 'volume',\n 'resels'])\n", (39195, 39265), False, 'import pandas\n'), ((39581, 39604), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (39602, 39604), False, 'import datetime\n'), ((40765, 40787), 'os.path.exists', 'os.path.exists', (['infile'], {}), '(infile)\n', (40779, 40787), False, 'import os\n'), ((2725, 2749), 'os.path.dirname', 'os.path.dirname', (['basedir'], {}), '(basedir)\n', (2740, 2749), False, 'import os\n'), ((2802, 2825), 'os.path.exists', 'os.path.exists', (['basedir'], {}), '(basedir)\n', (2816, 2825), False, 'import os\n'), ((2839, 2856), 'os.mkdir', 'os.mkdir', (['basedir'], {}), '(basedir)\n', (2847, 2856), False, 'import os\n'), ((3209, 3243), 'os.path.join', 'os.path.join', (["self.dirs['base']", 'd'], {}), "(self.dirs['base'], d)\n", (3221, 3243), False, 'import os\n'), ((3684, 3749), 'utils.log_to_file', 'log_to_file', (['self.logfile', '"""Running Narps main class"""'], {'flush': '(True)'}), "(self.logfile, 'Running Narps main class', flush=True)\n", (3695, 3749), False, 'from utils import get_metadata, TtoZ, get_map_metadata, log_to_file, stringify_dict\n'), ((4052, 4085), 'os.path.exists', 'os.path.exists', (["self.dirs['orig']"], {}), "(self.dirs['orig'])\n", (4066, 4085), False, 'import os\n'), ((4099, 4131), 'shutil.rmtree', 'shutil.rmtree', (["self.dirs['orig']"], {}), "(self.dirs['orig'])\n", (4112, 4131), False, 'import shutil\n'), ((4147, 4180), 'os.path.exists', 'os.path.exists', (["self.dirs['orig']"], {}), "(self.dirs['orig'])\n", (4161, 4180), False, 'import os\n'), ((5231, 5267), 'os.path.join', 'os.path.join', (['self.dirs[base]', 'dirID'], {}), '(self.dirs[base], dirID)\n', (5243, 5267), False, 'import os\n'), ((7228, 7258), 'os.path.exists', 'os.path.exists', (['self.input_dir'], {}), '(self.input_dir)\n', (7242, 7258), False, 'import os\n'), ((8360, 8416), 'os.path.exists', 'os.path.exists', (["self.dirs.dirs['image_diagnostics_orig']"], {}), "(self.dirs.dirs['image_diagnostics_orig'])\n", (8374, 8416), False, 'import os\n'), ((8430, 8480), 'os.mkdir', 'os.mkdir', (["self.dirs.dirs['image_diagnostics_orig']"], {}), "(self.dirs.dirs['image_diagnostics_orig'])\n", (8438, 8480), False, 'import os\n'), ((8653, 8696), 'os.path.exists', 'os.path.exists', (['self.image_diagnostics_file'], {}), '(self.image_diagnostics_file)\n', (8667, 8696), False, 'import os\n'), ((8735, 8799), 'ValueDiagnostics.compare_thresh_unthresh_values', 'compare_thresh_unthresh_values', (['dirs', 'collection_string', 'logfile'], {}), '(dirs, collection_string, logfile)\n', (8765, 8799), False, 'from ValueDiagnostics import compare_thresh_unthresh_values\n'), ((8939, 8983), 'pandas.read_csv', 'pandas.read_csv', (['self.image_diagnostics_file'], {}), '(self.image_diagnostics_file)\n', (8954, 8983), False, 'import pandas\n'), ((13249, 13362), 'os.path.join', 'os.path.join', (['self.dirs.dirs[data_dirname[imgtype]]', 'self.datadir_label', "('hypo%d_%s.nii.gz' % (hyp, imgtype))"], {}), "(self.dirs.dirs[data_dirname[imgtype]], self.datadir_label, \n 'hypo%d_%s.nii.gz' % (hyp, imgtype))\n", (13261, 13362), False, 'import os\n'), ((13429, 13517), 'os.path.join', 'os.path.join', (['resampled_dir', 'self.datadir_label', "('hypo%d_%s.nii.gz' % (hyp, imgtype))"], {}), "(resampled_dir, self.datadir_label, 'hypo%d_%s.nii.gz' % (hyp,\n imgtype))\n", (13441, 13517), False, 'import os\n'), ((15632, 15671), 'os.path.exists', 'os.path.exists', (['self.dirs.full_mask_img'], {}), '(self.dirs.full_mask_img)\n', (15646, 15671), False, 'import os\n'), ((16405, 16481), 'os.path.join', 'os.path.join', (["self.dirs.dirs['orig']", '"""analysis_pipelines_for_analysis.xlsx"""'], {}), "(self.dirs.dirs['orig'], 'analysis_pipelines_for_analysis.xlsx')\n", (16417, 16481), False, 'import os\n'), ((17471, 17528), 'os.path.join', 'os.path.join', (["dirs.dirs['orig']", '"""*/hypo1_*thresh.nii.gz"""'], {}), "(dirs.dirs['orig'], '*/hypo1_*thresh.nii.gz')\n", (17483, 17528), False, 'import os\n'), ((17552, 17570), 'os.path.dirname', 'os.path.dirname', (['i'], {}), '(i)\n', (17567, 17570), False, 'import os\n'), ((17825, 17844), 'os.path.basename', 'os.path.basename', (['i'], {}), '(i)\n', (17841, 17844), False, 'import os\n'), ((20292, 20327), 'os.path.exists', 'os.path.exists', (['image_metadata_file'], {}), '(image_metadata_file)\n', (20306, 20327), False, 'import os\n'), ((20427, 20463), 'pandas.read_csv', 'pandas.read_csv', (['image_metadata_file'], {}), '(image_metadata_file)\n', (20442, 20463), False, 'import pandas\n'), ((22005, 22030), 'utils.stringify_dict', 'stringify_dict', (['func_args'], {}), '(func_args)\n', (22019, 22030), False, 'from utils import get_metadata, TtoZ, get_map_metadata, log_to_file, stringify_dict\n'), ((25359, 25384), 'utils.stringify_dict', 'stringify_dict', (['func_args'], {}), '(func_args)\n', (25373, 25384), False, 'from utils import get_metadata, TtoZ, get_map_metadata, log_to_file, stringify_dict\n'), ((25710, 25757), 'os.path.join', 'os.path.join', (['output_dir', "('hypo%d.nii.gz' % hyp)"], {}), "(output_dir, 'hypo%d.nii.gz' % hyp)\n", (25722, 25757), False, 'import os\n'), ((27462, 27487), 'utils.stringify_dict', 'stringify_dict', (['func_args'], {}), '(func_args)\n', (27476, 27487), False, 'from utils import get_metadata, TtoZ, get_map_metadata, log_to_file, stringify_dict\n'), ((30696, 30721), 'utils.stringify_dict', 'stringify_dict', (['func_args'], {}), '(func_args)\n', (30710, 30721), False, 'from utils import get_metadata, TtoZ, get_map_metadata, log_to_file, stringify_dict\n'), ((31181, 31237), 'os.path.join', 'os.path.join', (['unthresh_concat_dir', "('hypo%d.nii.gz' % hyp)"], {}), "(unthresh_concat_dir, 'hypo%d.nii.gz' % hyp)\n", (31193, 31237), False, 'import os\n'), ((31300, 31355), 'os.path.join', 'os.path.join', (['unthresh_range_dir', "('hypo%d.nii.gz' % hyp)"], {}), "(unthresh_range_dir, 'hypo%d.nii.gz' % hyp)\n", (31312, 31355), False, 'import os\n'), ((31416, 31469), 'os.path.join', 'os.path.join', (['unthresh_std_dir', "('hypo%d.nii.gz' % hyp)"], {}), "(unthresh_std_dir, 'hypo%d.nii.gz' % hyp)\n", (31428, 31469), False, 'import os\n'), ((33014, 33039), 'utils.stringify_dict', 'stringify_dict', (['func_args'], {}), '(func_args)\n', (33028, 33039), False, 'from utils import get_metadata, TtoZ, get_map_metadata, log_to_file, stringify_dict\n'), ((33181, 33280), 'os.path.join', 'os.path.join', (["self.dirs.dirs['orig']", '"""narps_neurovault_images_details_responses_corrected.csv"""'], {}), "(self.dirs.dirs['orig'],\n 'narps_neurovault_images_details_responses_corrected.csv')\n", (33193, 33280), False, 'import os\n'), ((37150, 37175), 'utils.stringify_dict', 'stringify_dict', (['func_args'], {}), '(func_args)\n', (37164, 37175), False, 'from utils import get_metadata, TtoZ, get_map_metadata, log_to_file, stringify_dict\n'), ((37378, 37405), 'os.path.exists', 'os.path.exists', (['output_file'], {}), '(output_file)\n', (37392, 37405), False, 'import os\n'), ((37538, 37566), 'pandas.read_csv', 'pandas.read_csv', (['output_file'], {}), '(output_file)\n', (37553, 37566), False, 'import pandas\n'), ((40651, 40715), 'os.path.join', 'os.path.join', (["self.dirs.dirs['cached']", '"""narps_prepare_maps.pkl"""'], {}), "(self.dirs.dirs['cached'], 'narps_prepare_maps.pkl')\n", (40663, 40715), False, 'import os\n'), ((40846, 40860), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (40857, 40860), False, 'import pickle\n'), ((3551, 3573), 'os.mkdir', 'os.mkdir', (['self.dirs[d]'], {}), '(self.dirs[d])\n', (3559, 3573), False, 'import os\n'), ((5333, 5365), 'os.path.exists', 'os.path.exists', (['self.dirs[dirID]'], {}), '(self.dirs[dirID])\n', (5347, 5365), False, 'import os\n'), ((5383, 5409), 'os.mkdir', 'os.mkdir', (['self.dirs[dirID]'], {}), '(self.dirs[dirID])\n', (5391, 5409), False, 'import os\n'), ((6098, 6148), 'wget.download', 'wget.download', (['self.data_url'], {'out': 'output_directory'}), '(self.data_url, out=output_directory)\n', (6111, 6148), False, 'import wget\n'), ((9755, 9820), 'os.path.join', 'os.path.join', (['self.input_dir', "('hypo%d_%s.nii.gz' % (hyp, imgtype))"], {}), "(self.input_dir, 'hypo%d_%s.nii.gz' % (hyp, imgtype))\n", (9767, 9820), False, 'import os\n'), ((9881, 9904), 'os.path.exists', 'os.path.exists', (['imgfile'], {}), '(imgfile)\n', (9895, 9904), False, 'import os\n'), ((10740, 10761), 'os.path.basename', 'os.path.basename', (['img'], {}), '(img)\n', (10756, 10761), False, 'import os\n'), ((11109, 11126), 'nibabel.load', 'nibabel.load', (['img'], {}), '(img)\n', (11121, 11126), False, 'import nibabel\n'), ((11438, 11467), 'numpy.zeros', 'numpy.zeros', (['threshdata.shape'], {}), '(threshdata.shape)\n', (11449, 11467), False, 'import numpy\n'), ((12119, 12179), 'nibabel.Nifti1Image', 'nibabel.Nifti1Image', (['threshdata_bin'], {'affine': 'threshimg.affine'}), '(threshdata_bin, affine=threshimg.affine)\n', (12138, 12179), False, 'import nibabel\n'), ((21910, 21932), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (21930, 21932), False, 'import inspect\n'), ((22378, 22425), 'os.path.join', 'os.path.join', (['concat_dir', "('hypo%d.nii.gz' % hyp)"], {}), "(concat_dir, 'hypo%d.nii.gz' % hyp)\n", (22390, 22425), False, 'import os\n'), ((25264, 25286), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (25284, 25286), False, 'import inspect\n'), ((26018, 26065), 'os.path.join', 'os.path.join', (['concat_dir', "('hypo%d.nii.gz' % hyp)"], {}), "(concat_dir, 'hypo%d.nii.gz' % hyp)\n", (26030, 26065), False, 'import os\n'), ((26136, 26161), 'nibabel.load', 'nibabel.load', (['concat_file'], {}), '(concat_file)\n', (26148, 26161), False, 'import nibabel\n'), ((26313, 26339), 'numpy.mean', 'numpy.mean', (['concat_data', '(3)'], {}), '(concat_data, 3)\n', (26323, 26339), False, 'import numpy\n'), ((26374, 26432), 'nibabel.Nifti1Image', 'nibabel.Nifti1Image', (['concat_mean'], {'affine': 'concat_img.affine'}), '(concat_mean, affine=concat_img.affine)\n', (26393, 26432), False, 'import nibabel\n'), ((27367, 27389), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (27387, 27389), False, 'import inspect\n'), ((28410, 28521), 'os.path.join', 'os.path.join', (["self.dirs.dirs['rectified']", 'self.teams[teamID].datadir_label', "('hypo%d_unthresh.nii.gz' % hyp)"], {}), "(self.dirs.dirs['rectified'], self.teams[teamID].datadir_label,\n 'hypo%d_unthresh.nii.gz' % hyp)\n", (28422, 28521), False, 'import os\n'), ((30601, 30623), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (30621, 30623), False, 'import inspect\n'), ((31677, 31704), 'nibabel.load', 'nibabel.load', (['unthresh_file'], {}), '(unthresh_file)\n', (31689, 31704), False, 'import nibabel\n'), ((31791, 31822), 'numpy.nan_to_num', 'numpy.nan_to_num', (['unthresh_data'], {}), '(unthresh_data)\n', (31807, 31822), False, 'import numpy\n'), ((31998, 32056), 'nibabel.Nifti1Image', 'nibabel.Nifti1Image', (['datarange'], {'affine': 'unthresh_img.affine'}), '(datarange, affine=unthresh_img.affine)\n', (32017, 32056), False, 'import nibabel\n'), ((32223, 32253), 'numpy.std', 'numpy.std', (['concat_data'], {'axis': '(3)'}), '(concat_data, axis=3)\n', (32232, 32253), False, 'import numpy\n'), ((32280, 32336), 'nibabel.Nifti1Image', 'nibabel.Nifti1Image', (['datastd'], {'affine': 'unthresh_img.affine'}), '(datastd, affine=unthresh_img.affine)\n', (32299, 32336), False, 'import nibabel\n'), ((32919, 32941), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (32939, 32941), False, 'import inspect\n'), ((34601, 34709), 'os.path.join', 'os.path.join', (["self.dirs.dirs['zstat']", 'self.teams[teamID].datadir_label', "('hypo%d_unthresh.nii.gz' % hyp)"], {}), "(self.dirs.dirs['zstat'], self.teams[teamID].datadir_label, \n 'hypo%d_unthresh.nii.gz' % hyp)\n", (34613, 34709), False, 'import os\n'), ((37055, 37077), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (37075, 37077), False, 'import inspect\n'), ((40158, 40198), 'os.path.exists', 'os.path.exists', (["self.dirs.dirs['cached']"], {}), "(self.dirs.dirs['cached'])\n", (40172, 40198), False, 'import os\n'), ((40216, 40250), 'os.mkdir', 'os.mkdir', (["self.dirs.dirs['cached']"], {}), "(self.dirs.dirs['cached'])\n", (40224, 40250), False, 'import os\n'), ((40309, 40373), 'os.path.join', 'os.path.join', (["self.dirs.dirs['cached']", '"""narps_prepare_maps.pkl"""'], {}), "(self.dirs.dirs['cached'], 'narps_prepare_maps.pkl')\n", (40321, 40373), False, 'import os\n'), ((40472, 40492), 'pickle.dump', 'pickle.dump', (['info', 'f'], {}), '(info, f)\n', (40483, 40492), False, 'import pickle\n'), ((3505, 3533), 'os.path.exists', 'os.path.exists', (['self.dirs[d]'], {}), '(self.dirs[d])\n', (3519, 3533), False, 'import os\n'), ((6253, 6266), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (6263, 6266), False, 'import time\n'), ((10866, 10890), 'os.path.dirname', 'os.path.dirname', (['maskimg'], {}), '(maskimg)\n', (10881, 10890), False, 'import os\n'), ((10939, 10963), 'os.path.dirname', 'os.path.dirname', (['maskimg'], {}), '(maskimg)\n', (10954, 10963), False, 'import os\n'), ((10997, 11020), 'os.path.exists', 'os.path.exists', (['maskimg'], {}), '(maskimg)\n', (11011, 11020), False, 'import os\n'), ((11376, 11404), 'numpy.nan_to_num', 'numpy.nan_to_num', (['threshdata'], {}), '(threshdata)\n', (11392, 11404), False, 'import numpy\n'), ((12370, 12393), 'os.path.exists', 'os.path.exists', (['maskimg'], {}), '(maskimg)\n', (12384, 12393), False, 'import os\n'), ((12425, 12446), 'nibabel.load', 'nibabel.load', (['maskimg'], {}), '(maskimg)\n', (12437, 12446), False, 'import nibabel\n'), ((13658, 13682), 'os.path.dirname', 'os.path.dirname', (['outfile'], {}), '(outfile)\n', (13673, 13682), False, 'import os\n'), ((13710, 13734), 'os.path.dirname', 'os.path.dirname', (['outfile'], {}), '(outfile)\n', (13725, 13734), False, 'import os\n'), ((13755, 13778), 'os.path.exists', 'os.path.exists', (['outfile'], {}), '(outfile)\n', (13769, 13778), False, 'import os\n'), ((14369, 14394), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (14392, 14394), False, 'import warnings\n'), ((14416, 14447), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (14437, 14447), False, 'import warnings\n'), ((18106, 18136), 'os.path.join', 'os.path.join', (['i', '"""images.json"""'], {}), "(i, 'images.json')\n", (18118, 18136), False, 'import os\n'), ((18189, 18219), 'os.path.join', 'os.path.join', (['i', '"""images.json"""'], {}), "(i, 'images.json')\n", (18201, 18219), False, 'import os\n'), ((19180, 19195), 'sys._getframe', 'sys._getframe', ([], {}), '()\n', (19193, 19195), False, 'import sys\n'), ((19546, 19561), 'sys._getframe', 'sys._getframe', ([], {}), '()\n', (19559, 19561), False, 'import sys\n'), ((20046, 20061), 'sys._getframe', 'sys._getframe', ([], {}), '()\n', (20059, 20061), False, 'import sys\n'), ((21799, 21814), 'sys._getframe', 'sys._getframe', ([], {}), '()\n', (21812, 21814), False, 'import sys\n'), ((25153, 25168), 'sys._getframe', 'sys._getframe', ([], {}), '()\n', (25166, 25168), False, 'import sys\n'), ((25810, 25833), 'os.path.exists', 'os.path.exists', (['outfile'], {}), '(outfile)\n', (25824, 25833), False, 'import os\n'), ((27256, 27271), 'sys._getframe', 'sys._getframe', ([], {}), '()\n', (27269, 27271), False, 'import sys\n'), ((30050, 30119), 'os.path.join', 'os.path.join', (["self.dirs.dirs['metadata']", '"""rectified_images_list.txt"""'], {}), "(self.dirs.dirs['metadata'], 'rectified_images_list.txt')\n", (30062, 30119), False, 'import os\n'), ((30490, 30505), 'sys._getframe', 'sys._getframe', ([], {}), '()\n', (30503, 30505), False, 'import sys\n'), ((31523, 31552), 'os.path.exists', 'os.path.exists', (['range_outfile'], {}), '(range_outfile)\n', (31537, 31552), False, 'import os\n'), ((31582, 31609), 'os.path.exists', 'os.path.exists', (['std_outfile'], {}), '(std_outfile)\n', (31596, 31609), False, 'import os\n'), ((31884, 31914), 'numpy.max', 'numpy.max', (['concat_data'], {'axis': '(3)'}), '(concat_data, axis=3)\n', (31893, 31914), False, 'import numpy\n'), ((31939, 31969), 'numpy.min', 'numpy.min', (['concat_data'], {'axis': '(3)'}), '(concat_data, axis=3)\n', (31948, 31969), False, 'import numpy\n'), ((32808, 32823), 'sys._getframe', 'sys._getframe', ([], {}), '()\n', (32821, 32823), False, 'import sys\n'), ((34411, 34433), 'os.path.exists', 'os.path.exists', (['infile'], {}), '(infile)\n', (34425, 34433), False, 'import os\n'), ((34815, 34882), 'os.path.exists', 'os.path.exists', (["self.teams[teamID].images['unthresh']['zstat'][hyp]"], {}), "(self.teams[teamID].images['unthresh']['zstat'][hyp])\n", (34829, 34882), False, 'import os\n'), ((35563, 35635), 'utils.TtoZ', 'TtoZ', (['infile', "self.teams[teamID].images['unthresh']['zstat'][hyp]", '(n - 1)'], {}), "(infile, self.teams[teamID].images['unthresh']['zstat'][hyp], n - 1)\n", (35567, 35635), False, 'from utils import get_metadata, TtoZ, get_map_metadata, log_to_file, stringify_dict\n'), ((36944, 36959), 'sys._getframe', 'sys._getframe', ([], {}), '()\n', (36957, 36959), False, 'import sys\n'), ((38260, 38282), 'os.path.exists', 'os.path.exists', (['infile'], {}), '(infile)\n', (38274, 38282), False, 'import os\n'), ((18366, 18378), 'json.load', 'json.load', (['f'], {}), '(f)\n', (18375, 18378), False, 'import json\n'), ((21073, 21101), 'numpy.sum', 'numpy.sum', (['(threshdata == 0.0)'], {}), '(threshdata == 0.0)\n', (21082, 21101), False, 'import numpy\n'), ((22558, 22581), 'os.path.exists', 'os.path.exists', (['outfile'], {}), '(outfile)\n', (22572, 22581), False, 'import os\n'), ((23883, 23939), 'nibabel.Nifti1Image', 'nibabel.Nifti1Image', (['voxel_map'], {'affine': 'concat_img.affine'}), '(voxel_map, affine=concat_img.affine)\n', (23902, 23939), False, 'import nibabel\n'), ((28663, 28735), 'os.path.dirname', 'os.path.dirname', (["self.teams[teamID].images['unthresh']['rectified'][hyp]"], {}), "(self.teams[teamID].images['unthresh']['rectified'][hyp])\n", (28678, 28735), False, 'import os\n'), ((28821, 28893), 'os.path.dirname', 'os.path.dirname', (["self.teams[teamID].images['unthresh']['rectified'][hyp]"], {}), "(self.teams[teamID].images['unthresh']['rectified'][hyp])\n", (28836, 28893), False, 'import os\n'), ((28994, 29065), 'os.path.exists', 'os.path.exists', (["self.teams[teamID].images['unthresh']['rectified'][hyp]"], {}), "(self.teams[teamID].images['unthresh']['rectified'][hyp])\n", (29008, 29065), False, 'import os\n'), ((29319, 29346), 'nibabel.load', 'nibabel.load', (['unthresh_file'], {}), '(unthresh_file)\n', (29331, 29346), False, 'import nibabel\n'), ((29765, 29853), 'shutil.copy', 'shutil.copy', (['unthresh_file', "self.teams[teamID].images['unthresh']['rectified'][hyp]"], {}), "(unthresh_file, self.teams[teamID].images['unthresh'][\n 'rectified'][hyp])\n", (29776, 29853), False, 'import shutil\n'), ((21026, 21049), 'numpy.isnan', 'numpy.isnan', (['threshdata'], {}), '(threshdata)\n', (21037, 21049), False, 'import numpy\n'), ((22890, 22955), 'os.path.exists', 'os.path.exists', (['self.teams[teamID].images[imgtype][datatype][hyp]'], {}), '(self.teams[teamID].images[imgtype][datatype][hyp])\n', (22904, 22955), False, 'import os\n'), ((35118, 35186), 'os.path.dirname', 'os.path.dirname', (["self.teams[teamID].images['unthresh']['zstat'][hyp]"], {}), "(self.teams[teamID].images['unthresh']['zstat'][hyp])\n", (35133, 35186), False, 'import os\n'), ((35292, 35360), 'os.path.dirname', 'os.path.dirname', (["self.teams[teamID].images['unthresh']['zstat'][hyp]"], {}), "(self.teams[teamID].images['unthresh']['zstat'][hyp])\n", (35307, 35360), False, 'import os\n'), ((36128, 36195), 'os.path.exists', 'os.path.exists', (["self.teams[teamID].images['unthresh']['zstat'][hyp]"], {}), "(self.teams[teamID].images['unthresh']['zstat'][hyp])\n", (36142, 36195), False, 'import os\n'), ((23704, 23725), 'nibabel.load', 'nibabel.load', (['outfile'], {}), '(outfile)\n', (23716, 23725), False, 'import nibabel\n'), ((23813, 23835), 'numpy.abs', 'numpy.abs', (['concat_data'], {}), '(concat_data)\n', (23822, 23835), False, 'import numpy\n'), ((35803, 35871), 'os.path.dirname', 'os.path.dirname', (["self.teams[teamID].images['unthresh']['zstat'][hyp]"], {}), "(self.teams[teamID].images['unthresh']['zstat'][hyp])\n", (35818, 35871), False, 'import os\n'), ((35969, 36037), 'os.path.dirname', 'os.path.dirname', (["self.teams[teamID].images['unthresh']['zstat'][hyp]"], {}), "(self.teams[teamID].images['unthresh']['zstat'][hyp])\n", (35984, 36037), False, 'import os\n'), ((36409, 36477), 'os.path.dirname', 'os.path.dirname', (["self.teams[teamID].images['unthresh']['zstat'][hyp]"], {}), "(self.teams[teamID].images['unthresh']['zstat'][hyp])\n", (36424, 36477), False, 'import os\n')] |
import optmod
import unittest
import numpy as np
class TestAdd(unittest.TestCase):
def test_contruction(self):
x = optmod.variable.VariableScalar(name='x')
f = optmod.function.add([x, optmod.expression.make_Expression(1.)])
self.assertEqual(f.name, 'add')
self.assertEqual(len(f.arguments), 2)
self.assertTrue(f.arguments[0] is x)
self.assertTrue(isinstance(f.arguments[1], optmod.constant.Constant))
self.assertEqual(f.arguments[1].get_value(), 1.)
self.assertRaises(AssertionError, optmod.function.add, [x])
self.assertRaises(AssertionError, optmod.function.add, [])
def test_constant_constant(self):
a = optmod.constant.Constant(4.)
b = optmod.constant.Constant(5.)
f = a + b
self.assertTrue(f.is_constant())
self.assertEqual(f.get_value(), 9.)
def test_scalar_scalar(self):
x = optmod.variable.VariableScalar(name='x', value=2.)
y = optmod.variable.VariableScalar(name='y', value=3.)
f = x + 1.
self.assertTrue(isinstance(f, optmod.function.add))
self.assertTrue(f.arguments[0] is x)
self.assertTrue(isinstance(f.arguments[1], optmod.constant.Constant))
self.assertEqual(f.arguments[1].get_value(), 1.)
self.assertEqual(f.get_value(), 3.)
self.assertEqual(str(f), 'x + %s' %optmod.utils.repr_number(1.))
f = 1. + x
self.assertTrue(isinstance(f, optmod.function.add))
self.assertTrue(f.arguments[0] is x)
self.assertTrue(isinstance(f.arguments[1], optmod.constant.Constant))
self.assertEqual(f.arguments[1].get_value(), 1.)
self.assertEqual(f.get_value(), 3.)
self.assertEqual(str(f), 'x + %s' %optmod.utils.repr_number(1.))
f = x + y
self.assertTrue(isinstance(f, optmod.function.add))
self.assertTrue(f.arguments[0] is x)
self.assertTrue(f.arguments[1] is y)
self.assertEqual(f.get_value(), 5.)
self.assertEqual(str(f), 'x + y')
f = 4. + x + y
self.assertTrue(isinstance(f, optmod.function.add))
self.assertTrue(isinstance(f.arguments[1], optmod.constant.Constant))
self.assertEqual(f.arguments[1].get_value(), 4.)
self.assertTrue(f.arguments[0] is x)
self.assertTrue(f.arguments[2] is y)
self.assertEqual(f.get_value(), 9.)
self.assertEqual(str(f), 'x + %s + y' %optmod.utils.repr_number(4.))
def test_scalar_matrix(self):
rn = optmod.utils.repr_number
value = [[1., 2., 3.], [4., 5., 6.]]
x = optmod.variable.VariableScalar(name='x', value=2.)
y = optmod.variable.VariableMatrix(name='y', value=value)
r = np.random.random((2,3))
f = x + r
self.assertTrue(isinstance(f, optmod.expression.ExpressionMatrix))
for i in range(2):
for j in range(3):
fij = f[i,j]
self.assertTrue(isinstance(fij, optmod.function.add))
self.assertTrue(fij.arguments[0] is x)
self.assertEqual(fij.arguments[1].get_value(), r[i,j])
self.assertTrue(isinstance(f.get_value(), np.matrix))
self.assertTrue(np.all(f.get_value() == 2. + r))
self.assertEqual(str(f),
('[[ x + %s, x + %s, x + %s ],\n' %(rn(r[0,0]), rn(r[0,1]), rn(r[0,2])) +
' [ x + %s, x + %s, x + %s ]]\n' %(rn(r[1,0]), rn(r[1,1]), rn(r[1,2]))))
f = r + x
self.assertTrue(isinstance(f, optmod.expression.ExpressionMatrix))
for i in range(2):
for j in range(3):
fij = f[i,j]
self.assertTrue(isinstance(fij, optmod.function.add))
self.assertTrue(fij.arguments[0] is x)
self.assertEqual(fij.arguments[1].get_value(), r[i,j])
self.assertTrue(isinstance(f.get_value(), np.matrix))
self.assertTrue(np.all(f.get_value() == 2. + r))
self.assertEqual(str(f),
('[[ x + %s, x + %s, x + %s ],\n' %(rn(r[0,0]), rn(r[0,1]), rn(r[0,2])) +
' [ x + %s, x + %s, x + %s ]]\n' %(rn(r[1,0]), rn(r[1,1]), rn(r[1,2]))))
f = x + np.matrix(r)
self.assertTrue(isinstance(f, optmod.expression.ExpressionMatrix))
self.assertTrue(np.all(f.get_value() == 2. + r))
self.assertEqual(str(f),
('[[ x + %s, x + %s, x + %s ],\n' %(rn(r[0,0]), rn(r[0,1]), rn(r[0,2])) +
' [ x + %s, x + %s, x + %s ]]\n' %(rn(r[1,0]), rn(r[1,1]), rn(r[1,2]))))
f = np.matrix(r) + x
self.assertTrue(isinstance(f, optmod.expression.ExpressionMatrix))
self.assertTrue(np.all(f.get_value() == 2. + r))
self.assertEqual(str(f),
('[[ x + %s, x + %s, x + %s ],\n' %(rn(r[0,0]), rn(r[0,1]), rn(r[0,2])) +
' [ x + %s, x + %s, x + %s ]]\n' %(rn(r[1,0]), rn(r[1,1]), rn(r[1,2]))))
f = y + 1
self.assertTrue(isinstance(f, optmod.expression.ExpressionMatrix))
for i in range(2):
for j in range(3):
fij = f[i,j]
self.assertTrue(isinstance(fij, optmod.function.add))
self.assertTrue(fij.arguments[0] is y[i,j])
self.assertEqual(fij.arguments[1].get_value(), 1.)
self.assertTrue(isinstance(f.get_value(), np.matrix))
self.assertTrue(np.all(f.get_value() == np.array(value) + 1))
self.assertEqual(str(f),
('[[ y[0,0] + %s, y[0,1] + %s, y[0,2] + %s ],\n' %(rn(1), rn(1), rn(1)) +
' [ y[1,0] + %s, y[1,1] + %s, y[1,2] + %s ]]\n' %(rn(1), rn(1), rn(1))))
f = 1 + y
self.assertTrue(isinstance(f, optmod.expression.ExpressionMatrix))
self.assertTrue(np.all(f.get_value() == np.array(value) + 1))
self.assertEqual(str(f),
('[[ y[0,0] + %s, y[0,1] + %s, y[0,2] + %s ],\n' %(rn(1), rn(1), rn(1)) +
' [ y[1,0] + %s, y[1,1] + %s, y[1,2] + %s ]]\n' %(rn(1), rn(1), rn(1))))
f = x + y
self.assertTrue(isinstance(f, optmod.expression.ExpressionMatrix))
for i in range(2):
for j in range(3):
fij = f[i,j]
self.assertTrue(isinstance(fij, optmod.function.add))
self.assertTrue(fij.arguments[0] is y[i,j])
self.assertTrue(fij.arguments[1] is x)
self.assertTrue(isinstance(f.get_value(), np.matrix))
self.assertTrue(np.all(f.get_value() == np.array(value) + 2.))
self.assertEqual(str(f),
('[[ y[0,0] + x, y[0,1] + x, y[0,2] + x ],\n' +
' [ y[1,0] + x, y[1,1] + x, y[1,2] + x ]]\n'))
f = y + x
self.assertTrue(isinstance(f, optmod.expression.ExpressionMatrix))
for i in range(2):
for j in range(3):
fij = f[i,j]
self.assertTrue(isinstance(fij, optmod.function.add))
self.assertTrue(fij.arguments[0] is y[i,j])
self.assertTrue(fij.arguments[1] is x)
self.assertTrue(isinstance(f.get_value(), np.matrix))
self.assertTrue(np.all(f.get_value() == np.array(value) + 2.))
self.assertEqual(str(f),
('[[ y[0,0] + x, y[0,1] + x, y[0,2] + x ],\n' +
' [ y[1,0] + x, y[1,1] + x, y[1,2] + x ]]\n'))
f = (y + 1) + (3 + x)
self.assertTrue(isinstance(f, optmod.expression.ExpressionMatrix))
for i in range(2):
for j in range(3):
self.assertEqual(str(f[i,j]), 'y[%d,%d] + %s + x + %s' %(i, j, rn(1), rn(3)))
self.assertTrue(f[i,j].arguments[0] is y[i,j])
self.assertTrue(f[i,j].arguments[1].is_constant())
self.assertTrue(f[i,j].arguments[2] is x)
self.assertTrue(f[i,j].arguments[3].is_constant())
self.assertTrue(np.all(f.get_value() == np.array(value) + 1. + 3. + 2.))
def test_matrix_matrix(self):
rn = optmod.utils.repr_number
value1 = [[1., 2., 3.], [4., 5., 6.]]
value2 = np.random.random((2,3))
x = optmod.variable.VariableMatrix(name='x', value=value1)
y = optmod.variable.VariableMatrix(name='y', value=value2)
f = x + value2
self.assertTrue(isinstance(f, optmod.expression.ExpressionMatrix))
self.assertTupleEqual(f.shape, (2,3))
for i in range(2):
for j in range(3):
fij = f[i,j]
self.assertEqual(str(fij), 'x[%d,%d] + %s' %(i, j, rn(value2[i,j])))
self.assertTrue(np.all(f.get_value() == np.matrix(value1) + value2))
f = value2 + x
self.assertTrue(isinstance(f, optmod.expression.ExpressionMatrix))
self.assertTupleEqual(f.shape, (2,3))
for i in range(2):
for j in range(3):
fij = f[i,j]
self.assertEqual(str(fij), 'x[%d,%d] + %s' %(i, j, rn(value2[i,j])))
self.assertTrue(np.all(f.get_value() == np.matrix(value1) + value2))
f = x + y
self.assertTrue(isinstance(f, optmod.expression.ExpressionMatrix))
self.assertTupleEqual(f.shape, (2,3))
for i in range(2):
for j in range(3):
fij = f[i,j]
self.assertEqual(str(fij), 'x[%d,%d] + y[%d,%d]' %(i, j, i, j))
self.assertTrue(np.all(f.get_value() == np.matrix(value1) + value2))
f = y + x
self.assertTrue(isinstance(f, optmod.expression.ExpressionMatrix))
self.assertTupleEqual(f.shape, (2,3))
for i in range(2):
for j in range(3):
fij = f[i,j]
self.assertEqual(str(fij), 'y[%d,%d] + x[%d,%d]' %(i, j, i, j))
self.assertTrue(np.all(f.get_value() == np.matrix(value1) + value2))
def test_zero(self):
x = optmod.variable.VariableScalar(name='x', value=3.)
f = x + 0
self.assertTrue(f is x)
f = 0 + x
self.assertTrue(f is x)
def test_derivative(self):
x = optmod.variable.VariableScalar(name='x', value=3.)
y = optmod.variable.VariableScalar(name='y', value=4.)
f = x + 1
fx = f.get_derivative(x)
fy = f.get_derivative(y)
self.assertTrue(isinstance(fx, optmod.constant.Constant))
self.assertEqual(fx.get_value(), 1.)
self.assertTrue(isinstance(fy, optmod.constant.Constant))
self.assertEqual(fy.get_value(), 0.)
f = x + y
fx = f.get_derivative(x)
fy = f.get_derivative(y)
self.assertTrue(isinstance(fx, optmod.constant.Constant))
self.assertEqual(fx.get_value(), 1.)
self.assertTrue(isinstance(fy, optmod.constant.Constant))
self.assertEqual(fy.get_value(), 1.)
f = (x + 1) + (x + 3) + (y + (x + 5.))
fx = f.get_derivative(x)
fy = f.get_derivative(y)
self.assertTrue(isinstance(fx, optmod.constant.Constant))
self.assertEqual(fx.get_value(), 3.)
self.assertTrue(isinstance(fy, optmod.constant.Constant))
self.assertEqual(fy.get_value(), 1.)
f = x + x
fx = f.get_derivative(x)
self.assertTrue(fx.is_constant(2.))
self.assertEqual(str(fx), optmod.utils.repr_number(2))
f1 = x + 1 + y
f2 = f1 + f1
f2x = f2.get_derivative(x)
f2y = f2.get_derivative(y)
self.assertEqual(f2.get_value(), 2.*(3.+1.+4.))
self.assertEqual(f2x.get_value(), 2.)
self.assertEqual(f2y.get_value(), 2.)
def test_analyze(self):
x = optmod.variable.VariableScalar('x')
y = optmod.variable.VariableScalar('y')
f = x + 1
prop = f.__analyze__()
self.assertTrue(prop['affine'])
self.assertEqual(prop['b'], 1.)
self.assertEqual(len(prop['a']), 1)
self.assertEqual(prop['a'][x], 1.)
f = 2 + x
prop = f.__analyze__()
self.assertTrue(prop['affine'])
self.assertEqual(prop['b'], 2.)
self.assertEqual(len(prop['a']), 1)
self.assertEqual(prop['a'][x], 1.)
f = x + y + x
prop = f.__analyze__()
self.assertTrue(prop['affine'])
self.assertEqual(prop['b'], 0.)
self.assertEqual(len(prop['a']), 2)
self.assertEqual(prop['a'][x], 2.)
self.assertEqual(prop['a'][y], 1.)
f = x + y + 10. + x
prop = f.__analyze__()
self.assertTrue(prop['affine'])
self.assertEqual(prop['b'], 10.)
self.assertEqual(len(prop['a']), 2)
self.assertEqual(prop['a'][x], 2.)
self.assertEqual(prop['a'][y], 1.)
def test_std_components(self):
x = optmod.variable.VariableScalar('x')
y = optmod.variable.VariableScalar('y')
f = x + y + x
comp = f.__get_std_components__()
phi = comp['phi']
gphi_list = comp['gphi_list']
Hphi_list = comp['Hphi_list']
self.assertTrue(phi is f)
self.assertEqual(len(gphi_list), 2)
v, exp = gphi_list[0]
self.assertTrue(v is x)
self.assertTrue(exp.is_constant())
self.assertEqual(exp.get_value(), 2.)
v, exp = gphi_list[1]
self.assertTrue(v is y)
self.assertTrue(exp.is_constant())
self.assertEqual(exp.get_value(), 1.)
self.assertEqual(len(Hphi_list), 0)
| [
"numpy.matrix",
"optmod.utils.repr_number",
"optmod.variable.VariableMatrix",
"optmod.variable.VariableScalar",
"optmod.constant.Constant",
"numpy.random.random",
"optmod.expression.make_Expression",
"numpy.array"
] | [((130, 170), 'optmod.variable.VariableScalar', 'optmod.variable.VariableScalar', ([], {'name': '"""x"""'}), "(name='x')\n", (160, 170), False, 'import optmod\n'), ((702, 731), 'optmod.constant.Constant', 'optmod.constant.Constant', (['(4.0)'], {}), '(4.0)\n', (726, 731), False, 'import optmod\n'), ((743, 772), 'optmod.constant.Constant', 'optmod.constant.Constant', (['(5.0)'], {}), '(5.0)\n', (767, 772), False, 'import optmod\n'), ((924, 975), 'optmod.variable.VariableScalar', 'optmod.variable.VariableScalar', ([], {'name': '"""x"""', 'value': '(2.0)'}), "(name='x', value=2.0)\n", (954, 975), False, 'import optmod\n'), ((987, 1038), 'optmod.variable.VariableScalar', 'optmod.variable.VariableScalar', ([], {'name': '"""y"""', 'value': '(3.0)'}), "(name='y', value=3.0)\n", (1017, 1038), False, 'import optmod\n'), ((2641, 2692), 'optmod.variable.VariableScalar', 'optmod.variable.VariableScalar', ([], {'name': '"""x"""', 'value': '(2.0)'}), "(name='x', value=2.0)\n", (2671, 2692), False, 'import optmod\n'), ((2704, 2757), 'optmod.variable.VariableMatrix', 'optmod.variable.VariableMatrix', ([], {'name': '"""y"""', 'value': 'value'}), "(name='y', value=value)\n", (2734, 2757), False, 'import optmod\n'), ((2770, 2794), 'numpy.random.random', 'np.random.random', (['(2, 3)'], {}), '((2, 3))\n', (2786, 2794), True, 'import numpy as np\n'), ((8274, 8298), 'numpy.random.random', 'np.random.random', (['(2, 3)'], {}), '((2, 3))\n', (8290, 8298), True, 'import numpy as np\n'), ((8310, 8364), 'optmod.variable.VariableMatrix', 'optmod.variable.VariableMatrix', ([], {'name': '"""x"""', 'value': 'value1'}), "(name='x', value=value1)\n", (8340, 8364), False, 'import optmod\n'), ((8377, 8431), 'optmod.variable.VariableMatrix', 'optmod.variable.VariableMatrix', ([], {'name': '"""y"""', 'value': 'value2'}), "(name='y', value=value2)\n", (8407, 8431), False, 'import optmod\n'), ((10043, 10094), 'optmod.variable.VariableScalar', 'optmod.variable.VariableScalar', ([], {'name': '"""x"""', 'value': '(3.0)'}), "(name='x', value=3.0)\n", (10073, 10094), False, 'import optmod\n'), ((10257, 10308), 'optmod.variable.VariableScalar', 'optmod.variable.VariableScalar', ([], {'name': '"""x"""', 'value': '(3.0)'}), "(name='x', value=3.0)\n", (10287, 10308), False, 'import optmod\n'), ((10320, 10371), 'optmod.variable.VariableScalar', 'optmod.variable.VariableScalar', ([], {'name': '"""y"""', 'value': '(4.0)'}), "(name='y', value=4.0)\n", (10350, 10371), False, 'import optmod\n'), ((11810, 11845), 'optmod.variable.VariableScalar', 'optmod.variable.VariableScalar', (['"""x"""'], {}), "('x')\n", (11840, 11845), False, 'import optmod\n'), ((11858, 11893), 'optmod.variable.VariableScalar', 'optmod.variable.VariableScalar', (['"""y"""'], {}), "('y')\n", (11888, 11893), False, 'import optmod\n'), ((12920, 12955), 'optmod.variable.VariableScalar', 'optmod.variable.VariableScalar', (['"""x"""'], {}), "('x')\n", (12950, 12955), False, 'import optmod\n'), ((12968, 13003), 'optmod.variable.VariableScalar', 'optmod.variable.VariableScalar', (['"""y"""'], {}), "('y')\n", (12998, 13003), False, 'import optmod\n'), ((4273, 4285), 'numpy.matrix', 'np.matrix', (['r'], {}), '(r)\n', (4282, 4285), True, 'import numpy as np\n'), ((4662, 4674), 'numpy.matrix', 'np.matrix', (['r'], {}), '(r)\n', (4671, 4674), True, 'import numpy as np\n'), ((11476, 11503), 'optmod.utils.repr_number', 'optmod.utils.repr_number', (['(2)'], {}), '(2)\n', (11500, 11503), False, 'import optmod\n'), ((208, 246), 'optmod.expression.make_Expression', 'optmod.expression.make_Expression', (['(1.0)'], {}), '(1.0)\n', (241, 246), False, 'import optmod\n'), ((1393, 1422), 'optmod.utils.repr_number', 'optmod.utils.repr_number', (['(1.0)'], {}), '(1.0)\n', (1417, 1422), False, 'import optmod\n'), ((1778, 1807), 'optmod.utils.repr_number', 'optmod.utils.repr_number', (['(1.0)'], {}), '(1.0)\n', (1802, 1807), False, 'import optmod\n'), ((2463, 2492), 'optmod.utils.repr_number', 'optmod.utils.repr_number', (['(4.0)'], {}), '(4.0)\n', (2487, 2492), False, 'import optmod\n'), ((5530, 5545), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (5538, 5545), True, 'import numpy as np\n'), ((5925, 5940), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (5933, 5940), True, 'import numpy as np\n'), ((6654, 6669), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (6662, 6669), True, 'import numpy as np\n'), ((7340, 7355), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (7348, 7355), True, 'import numpy as np\n'), ((8797, 8814), 'numpy.matrix', 'np.matrix', (['value1'], {}), '(value1)\n', (8806, 8814), True, 'import numpy as np\n'), ((9207, 9224), 'numpy.matrix', 'np.matrix', (['value1'], {}), '(value1)\n', (9216, 9224), True, 'import numpy as np\n'), ((9591, 9608), 'numpy.matrix', 'np.matrix', (['value1'], {}), '(value1)\n', (9600, 9608), True, 'import numpy as np\n'), ((9975, 9992), 'numpy.matrix', 'np.matrix', (['value1'], {}), '(value1)\n', (9984, 9992), True, 'import numpy as np\n'), ((8103, 8118), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (8111, 8118), True, 'import numpy as np\n')] |
__all__ = ['ANTsImage',
'LabelImage',
'copy_image_info',
'set_origin',
'get_origin',
'set_direction',
'get_direction',
'set_spacing',
'get_spacing',
'image_physical_space_consistency',
'image_type_cast',
'allclose']
import os
import numpy as np
import pandas as pd
try:
from functools import partialmethod
HAS_PY3 = True
except:
HAS_PY3 = False
import inspect
from .. import registration, segmentation, utils, viz
from . import ants_image_io as iio2
_supported_ptypes = {'unsigned char', 'unsigned int', 'float', 'double'}
_supported_dtypes = {'uint8', 'uint32', 'float32', 'float64'}
_itk_to_npy_map = {
'unsigned char': 'uint8',
'unsigned int': 'uint32',
'float': 'float32',
'double': 'float64'}
_npy_to_itk_map = {
'uint8': 'unsigned char',
'uint32':'unsigned int',
'float32': 'float',
'float64': 'double'}
class ANTsImage(object):
def __init__(self, pixeltype='float', dimension=3, components=1, pointer=None, is_rgb=False, label_image=None):
"""
Initialize an ANTsImage
Arguments
---------
pixeltype : string
ITK pixeltype of image
dimension : integer
number of image dimension. Does NOT include components dimension
components : integer
number of pixel components in the image
pointer : py::capsule (optional)
pybind11 capsule holding the pointer to the underlying ITK image object
label_image : LabelImage
a discrete label image for mapping locations to atlas regions
"""
## Attributes which cant change without creating a new ANTsImage object
self.pointer = pointer
self.pixeltype = pixeltype
self.dimension = dimension
self.components = components
self.has_components = self.components > 1
self.dtype = _itk_to_npy_map[self.pixeltype]
self.is_rgb = is_rgb
self._pixelclass = 'vector' if self.has_components else 'scalar'
self._shortpclass = 'V' if self._pixelclass == 'vector' else ''
if is_rgb:
self._pixelclass = 'rgb'
self._shortpclass = 'RGB'
self._libsuffix = '%s%s%i' % (self._shortpclass, utils.short_ptype(self.pixeltype), self.dimension)
self.shape = utils.get_lib_fn('getShape%s'%self._libsuffix)(self.pointer)
self.physical_shape = tuple([round(sh*sp,3) for sh,sp in zip(self.shape, self.spacing)])
if label_image is not None:
if not isinstance(label_image, LabelImage):
raise ValueError('label_image argument must be a LabelImage type')
self.label_image = label_image
self._array = None
@property
def spacing(self):
"""
Get image spacing
Returns
-------
tuple
"""
libfn = utils.get_lib_fn('getSpacing%s'%self._libsuffix)
return libfn(self.pointer)
def set_spacing(self, new_spacing):
"""
Set image spacing
Arguments
---------
new_spacing : tuple or list
updated spacing for the image.
should have one value for each dimension
Returns
-------
None
"""
if not isinstance(new_spacing, (tuple, list)):
raise ValueError('arg must be tuple or list')
if len(new_spacing) != self.dimension:
raise ValueError('must give a spacing value for each dimension (%i)' % self.dimension)
libfn = utils.get_lib_fn('setSpacing%s'%self._libsuffix)
libfn(self.pointer, new_spacing)
@property
def origin(self):
"""
Get image origin
Returns
-------
tuple
"""
libfn = utils.get_lib_fn('getOrigin%s'%self._libsuffix)
return libfn(self.pointer)
def set_origin(self, new_origin):
"""
Set image origin
Arguments
---------
new_origin : tuple or list
updated origin for the image.
should have one value for each dimension
Returns
-------
None
"""
if not isinstance(new_origin, (tuple, list)):
raise ValueError('arg must be tuple or list')
if len(new_origin) != self.dimension:
raise ValueError('must give a origin value for each dimension (%i)' % self.dimension)
libfn = utils.get_lib_fn('setOrigin%s'%self._libsuffix)
libfn(self.pointer, new_origin)
@property
def direction(self):
"""
Get image direction
Returns
-------
tuple
"""
libfn = utils.get_lib_fn('getDirection%s'%self._libsuffix)
return libfn(self.pointer)
def set_direction(self, new_direction):
"""
Set image direction
Arguments
---------
new_direction : numpy.ndarray or tuple or list
updated direction for the image.
should have one value for each dimension
Returns
-------
None
"""
if isinstance(new_direction, (tuple,list)):
new_direction = np.asarray(new_direction)
if not isinstance(new_direction, np.ndarray):
raise ValueError('arg must be np.ndarray or tuple or list')
if len(new_direction) != self.dimension:
raise ValueError('must give a origin value for each dimension (%i)' % self.dimension)
libfn = utils.get_lib_fn('setDirection%s'%self._libsuffix)
libfn(self.pointer, new_direction)
@property
def orientation(self):
if self.dimension == 3:
return self.get_orientation()
else:
return None
def view(self, single_components=False):
"""
Geet a numpy array providing direct, shared access to the image data.
IMPORTANT: If you alter the view, then the underlying image data
will also be altered.
Arguments
---------
single_components : boolean (default is False)
if True, keep the extra component dimension in returned array even
if image only has one component (i.e. self.has_components == False)
Returns
-------
ndarray
"""
if self.is_rgb:
img = self.rgb_to_vector()
else:
img = self
dtype = img.dtype
shape = img.shape[::-1]
if img.has_components or (single_components == True):
shape = list(shape) + [img.components]
libfn = utils.get_lib_fn('toNumpy%s'%img._libsuffix)
memview = libfn(img.pointer)
return np.asarray(memview).view(dtype = dtype).reshape(shape).view(np.ndarray).T
def numpy(self, single_components=False):
"""
Get a numpy array copy representing the underlying image data. Altering
this ndarray will have NO effect on the underlying image data.
Arguments
---------
single_components : boolean (default is False)
if True, keep the extra component dimension in returned array even
if image only has one component (i.e. self.has_components == False)
Returns
-------
ndarray
"""
array = np.array(self.view(single_components=single_components), copy=True, dtype=self.dtype)
if self.has_components or (single_components == True):
array = np.rollaxis(array, 0, self.dimension+1)
return array
def clone(self, pixeltype=None):
"""
Create a copy of the given ANTsImage with the same data and info, possibly with
a different data type for the image data. Only supports casting to
uint8 (unsigned char), uint32 (unsigned int), float32 (float), and float64 (double)
Arguments
---------
dtype: string (optional)
if None, the dtype will be the same as the cloned ANTsImage. Otherwise,
the data will be cast to this type. This can be a numpy type or an ITK
type.
Options:
'unsigned char' or 'uint8',
'unsigned int' or 'uint32',
'float' or 'float32',
'double' or 'float64'
Returns
-------
ANTsImage
"""
if pixeltype is None:
pixeltype = self.pixeltype
if pixeltype not in _supported_ptypes:
raise ValueError('Pixeltype %s not supported. Supported types are %s' % (pixeltype, _supported_ptypes))
if self.has_components and (not self.is_rgb):
comp_imgs = utils.split_channels(self)
comp_imgs_cloned = [comp_img.clone(pixeltype) for comp_img in comp_imgs]
return utils.merge_channels(comp_imgs_cloned)
else:
p1_short = utils.short_ptype(self.pixeltype)
p2_short = utils.short_ptype(pixeltype)
ndim = self.dimension
fn_suffix = '%s%i%s%i' % (p1_short,ndim,p2_short,ndim)
libfn = utils.get_lib_fn('antsImageClone%s'%fn_suffix)
pointer_cloned = libfn(self.pointer)
return ANTsImage(pixeltype=pixeltype,
dimension=self.dimension,
components=self.components,
is_rgb=self.is_rgb,
pointer=pointer_cloned)
# pythonic alias for `clone` is `copy`
copy = clone
def astype(self, dtype):
"""
Cast & clone an ANTsImage to a given numpy datatype.
Map:
uint8 : unsigned char
uint32 : unsigned int
float32 : float
float64 : double
"""
if dtype not in _supported_dtypes:
raise ValueError('Datatype %s not supported. Supported types are %s' % (dtype, _supported_dtypes))
pixeltype = _npy_to_itk_map[dtype]
return self.clone(pixeltype)
def new_image_like(self, data):
"""
Create a new ANTsImage with the same header information, but with
a new image array.
Arguments
---------
data : ndarray or py::capsule
New array or pointer for the image.
It must have the same shape as the current
image data.
Returns
-------
ANTsImage
"""
if not isinstance(data, np.ndarray):
raise ValueError('data must be a numpy array')
if not self.has_components:
if data.shape != self.shape:
raise ValueError('given array shape (%s) and image array shape (%s) do not match' % (data.shape, self.shape))
else:
if (data.shape[-1] != self.components) or (data.shape[:-1] != self.shape):
raise ValueError('given array shape (%s) and image array shape (%s) do not match' % (data.shape[1:], self.shape))
return iio2.from_numpy(data, origin=self.origin,
spacing=self.spacing, direction=self.direction,
has_components=self.has_components)
def to_file(self, filename):
"""
Write the ANTsImage to file
Args
----
filename : string
filepath to which the image will be written
"""
filename = os.path.expanduser(filename)
libfn = utils.get_lib_fn('toFile%s'%self._libsuffix)
libfn(self.pointer, filename)
to_filename = to_file
def apply(self, fn):
"""
Apply an arbitrary function to ANTsImage.
Args
----
fn : python function or lambda
function to apply to ENTIRE image at once
Returns
-------
ANTsImage
image with function applied to it
"""
this_array = self.numpy()
new_array = fn(this_array)
return self.new_image_like(new_array)
def as_label_image(self, label_info=None):
return LabelImage(image=self, label_info=label_info)
## NUMPY FUNCTIONS ##
def abs(self, axis=None):
""" Return absolute value of image """
return np.abs(self.numpy())
def mean(self, axis=None):
""" Return mean along specified axis """
return self.numpy().mean(axis=axis)
def median(self, axis=None):
""" Return median along specified axis """
return np.median(self.numpy(), axis=axis)
def std(self, axis=None):
""" Return std along specified axis """
return self.numpy().std(axis=axis)
def sum(self, axis=None, keepdims=False):
""" Return sum along specified axis """
return self.numpy().sum(axis=axis, keepdims=keepdims)
def min(self, axis=None):
""" Return min along specified axis """
return self.numpy().min(axis=axis)
def max(self, axis=None):
""" Return max along specified axis """
return self.numpy().max(axis=axis)
def range(self, axis=None):
""" Return range tuple along specified axis """
return (self.min(axis=axis), self.max(axis=axis))
def argmin(self, axis=None):
""" Return argmin along specified axis """
return self.numpy().argmin(axis=axis)
def argmax(self, axis=None):
""" Return argmax along specified axis """
return self.numpy().argmax(axis=axis)
def argrange(self, axis=None):
""" Return argrange along specified axis """
amin = self.argmin(axis=axis)
amax = self.argmax(axis=axis)
if axis is None:
return (amin, amax)
else:
return np.stack([amin, amax]).T
def flatten(self):
""" Flatten image data """
return self.numpy().flatten()
def nonzero(self):
""" Return non-zero indices of image """
return self.numpy().nonzero()
def unique(self, sort=False):
""" Return unique set of values in image """
unique_vals = np.unique(self.numpy())
if sort:
unique_vals = np.sort(unique_vals)
return unique_vals
## OVERLOADED OPERATORS ##
def __add__(self, other):
this_array = self.numpy()
if isinstance(other, ANTsImage):
if not image_physical_space_consistency(self, other):
raise ValueError('images do not occupy same physical space')
other = other.numpy()
new_array = this_array + other
return self.new_image_like(new_array)
def __sub__(self, other):
this_array = self.numpy()
if isinstance(other, ANTsImage):
if not image_physical_space_consistency(self, other):
raise ValueError('images do not occupy same physical space')
other = other.numpy()
new_array = this_array - other
return self.new_image_like(new_array)
def __mul__(self, other):
this_array = self.numpy()
if isinstance(other, ANTsImage):
if not image_physical_space_consistency(self, other):
raise ValueError('images do not occupy same physical space')
other = other.numpy()
new_array = this_array * other
return self.new_image_like(new_array)
def __truediv__(self, other):
this_array = self.numpy()
if isinstance(other, ANTsImage):
if not image_physical_space_consistency(self, other):
raise ValueError('images do not occupy same physical space')
other = other.numpy()
new_array = this_array / other
return self.new_image_like(new_array)
def __pow__(self, other):
this_array = self.numpy()
if isinstance(other, ANTsImage):
if not image_physical_space_consistency(self, other):
raise ValueError('images do not occupy same physical space')
other = other.numpy()
new_array = this_array ** other
return self.new_image_like(new_array)
def __gt__(self, other):
this_array = self.numpy()
if isinstance(other, ANTsImage):
if not image_physical_space_consistency(self, other):
raise ValueError('images do not occupy same physical space')
other = other.numpy()
new_array = this_array > other
return self.new_image_like(new_array.astype('uint8'))
def __ge__(self, other):
this_array = self.numpy()
if isinstance(other, ANTsImage):
if not image_physical_space_consistency(self, other):
raise ValueError('images do not occupy same physical space')
other = other.numpy()
new_array = this_array >= other
return self.new_image_like(new_array.astype('uint8'))
def __lt__(self, other):
this_array = self.numpy()
if isinstance(other, ANTsImage):
if not image_physical_space_consistency(self, other):
raise ValueError('images do not occupy same physical space')
other = other.numpy()
new_array = this_array < other
return self.new_image_like(new_array.astype('uint8'))
def __le__(self, other):
this_array = self.numpy()
if isinstance(other, ANTsImage):
if not image_physical_space_consistency(self, other):
raise ValueError('images do not occupy same physical space')
other = other.numpy()
new_array = this_array <= other
return self.new_image_like(new_array.astype('uint8'))
def __eq__(self, other):
this_array = self.numpy()
if isinstance(other, ANTsImage):
if not image_physical_space_consistency(self, other):
raise ValueError('images do not occupy same physical space')
other = other.numpy()
new_array = this_array == other
return self.new_image_like(new_array.astype('uint8'))
def __ne__(self, other):
this_array = self.numpy()
if isinstance(other, ANTsImage):
if not image_physical_space_consistency(self, other):
raise ValueError('images do not occupy same physical space')
other = other.numpy()
new_array = this_array != other
return self.new_image_like(new_array.astype('uint8'))
def __getitem__(self, idx):
if self._array is None:
self._array = self.numpy()
if isinstance(idx, ANTsImage):
if not image_physical_space_consistency(self, idx):
raise ValueError('images do not occupy same physical space')
return self._array.__getitem__(idx.numpy().astype('bool'))
else:
return self._array.__getitem__(idx)
def __setitem__(self, idx, value):
arr = self.view()
if isinstance(idx, ANTsImage):
if not image_physical_space_consistency(self, idx):
raise ValueError('images do not occupy same physical space')
arr.__setitem__(idx.numpy().astype('bool'), value)
else:
arr.__setitem__(idx, value)
def __repr__(self):
if self.dimension == 3:
s = 'ANTsImage ({})\n'.format(self.orientation)
else:
s = 'ANTsImage\n'
s = s +\
'\t {:<10} : {} ({})\n'.format('Pixel Type', self.pixeltype, self.dtype)+\
'\t {:<10} : {}{}\n'.format('Components', self.components, ' (RGB)' if 'RGB' in self._libsuffix else '')+\
'\t {:<10} : {}\n'.format('Dimensions', self.shape)+\
'\t {:<10} : {}\n'.format('Spacing', tuple([round(s,4) for s in self.spacing]))+\
'\t {:<10} : {}\n'.format('Origin', tuple([round(o,4) for o in self.origin]))+\
'\t {:<10} : {}\n'.format('Direction', np.round(self.direction.flatten(),4))
return s
if HAS_PY3:
# Set partial class methods for any functions which take an ANTsImage as the first argument
for k, v in utils.__dict__.items():
if callable(v):
args = inspect.getargspec(getattr(utils,k)).args
if (len(args) > 0) and (args[0] in {'img','image'}):
setattr(ANTsImage, k, partialmethod(v))
for k, v in registration.__dict__.items():
if callable(v):
args = inspect.getargspec(getattr(registration,k)).args
if (len(args) > 0) and (args[0] in {'img','image'}):
setattr(ANTsImage, k, partialmethod(v))
for k, v in segmentation.__dict__.items():
if callable(v):
args = inspect.getargspec(getattr(segmentation,k)).args
if (len(args) > 0) and (args[0] in {'img','image'}):
setattr(ANTsImage, k, partialmethod(v))
for k, v in viz.__dict__.items():
if callable(v):
args = inspect.getargspec(getattr(viz,k)).args
if (len(args) > 0) and (args[0] in {'img','image'}):
setattr(ANTsImage, k, partialmethod(v))
class Dictlist(dict):
def __setitem__(self, key, value):
try:
self[key]
except KeyError:
super(Dictlist, self).__setitem__(key, [])
self[key].append(value)
class LabelImage(ANTsImage):
"""
A LabelImage is a special class of ANTsImage which has discrete values
and string labels or other metadata (e.g. another string label such as the
"lobe" of the region) associated with each of the discrete values.
A canonical example of a LabelImage is a brain label_image or parcellation.
This class provides convenient functionality for manipulating and visualizing
images where you have real values associated with aggregated image regions (e.g.
if you have cortical thickness values associated with brain regions)
Commonly-used functionality for LabelImage types:
- create publication-quality figures of an label_image
Nomenclature
------------
- key : a string representing the name of the associated index in the atlas image
- e.g. if the index is 1001 and the key may be InferiorTemporalGyrus`
- value : an integer value in the atlas image
- metakey : a string representing one of the possible sets of label key
- e.g. 'Lobes' or 'Regions'
Notes
-----
- indexing works by creating a separate dict for each metakey, where
"""
def __init__(self, label_image, label_info=None, template=None):
"""
Initialize a LabelImage
ANTsR function: N/A
Arguments
---------
label_image : ANTsImage
discrete (integer) image as label_image
label_info : dict or pandas.DataFrame
mapping between discrete values in `image` and string names
- if dict, the keys should be the discrete integer label values
and the values should be the label names or another dict with
any metadata
- if pd.DataFrame, the index (df.index) should be the discrete integer
label values and the other column(s) should be the label names and
any metadata
template : ANTsImage
default real-valued image to use for plotting or creating new images.
This image should be in the same space as the `label_image` image and the
two should be aligned.
Example
-------
>>> import ants
>>> square = np.zeros((20,20))
>>> square[:10,:10] = 0
>>> square[:10,10:] = 1
>>> square[10:,:10] = 2
>>> square[10:,10:] = 3
>>> img = ants.from_numpy(square).astype('uint8')
>>> label_image = ants.LabelImage(label_image=img, label_info=label_dict)
"""
if label_image.pixeltype not in {'unsigned char', 'unsigned int'}:
raise ValueError('Label images must have discrete pixeltype - got %s' % label_image.pixeltype)
if label_image.components > 1:
raise ValueError('Label images must have only one component - got %i' % label_image.components)
if label_info is None:
label_info = {k:'Label%i'%k for k in range(len(label_image.unique()))}
if isinstance(label_info, pd.DataFrame):
pass
elif isinstance(label_info, dict):
if isinstance(label_info[list(label_info.keys())[0]], dict):
label_info = pd.DataFrame(label_info).T.to_dict()
else:
label_info = pd.DataFrame(label_info, index=np.arange(len(label_info))).T.to_dict()
else:
raise ValueError('label_label_info argument must be pd.DataFrame')
self.label_info = label_info
self.label_image = label_image
self.template = template
self.generate_data()
super(LabelImage, self).__init__(pixeltype=label_image.pixeltype, dimension=label_image.dimension,
components=label_image.components, pointer=label_image.pointer)
def generate_data(self):
self._metakeys = list(self.label_info.columns)
self._uniquekeys = {mk:list(np.unique(self.label_info[mk])) for mk in self._metakeys}
self._keys = {mk:list(self.label_info[mk]) for mk in self._metakeys}
self._values = list(self.label_info.index)
self._n_values = len(self._values)
items = {}
for mk in self._metakeys:
items[mk] = {}
for k, v in zip(self.keys(mk), self.values()):
if k in items[mk]:
if isinstance(items[mk][k], list):
items[mk][k].append(v)
else:
items[mk][k] = [items[mk][k]] + [v]
else:
items[mk][k] = v
self._items = items
def uniquekeys(self, metakey=None):
"""
Get keys for a given metakey
"""
if metakey is None:
return self._uniquekeys
else:
if metakey not in self.metakeys():
raise ValueError('metakey %s does not exist' % metakey)
return self._uniquekeys[metakey]
def keys(self, metakey=None):
if metakey is None:
return self._keys
else:
if metakey not in self.metakeys():
raise ValueError('metakey %s does not exist' % metakey)
return self._keys[metakey]
def metakeys(self):
return self._metakeys
def parentkey(self, key):
parent = None
for mk in self.metakeys():
if key in self.keys(mk):
parent = mk
if parent is None:
raise ValueError('key does not have a parent')
return parent
def values(self):
return self._values
def items(self, metakey):
if metakey not in self.metakeys():
raise ValueError('metakey %s does not exist' % metakey)
return self._items[metakey]
def n_values(self):
return self._n_values
def __getitem__(self, key):
# get metakey of key
metakey = self.parentkey(key)
# get key,value pairs for metakey
items = self.items(metakey)
# return value at the given key
return items[key]
def __setitem__(self, key, value):
label_value = self.__getitem__(key)
if isinstance(label_value, list):
if isinstance(value, list):
if len(value) != len(label_value):
raise ValueError('must give either single value or one value '+\
'for each index (got %i, expected %i)' % (len(value), len(label_value)))
for old_val, new_val in zip(label_value, value):
self.label_image[self.label_image==old_val] = new_val
else:
for lv in label_value:
self.label_image[self.label_image==lv] = value
else:
self.label_image[self.label_image==label_value] = value
def __repr__(self):
s = 'LabelImage\n' +\
'\t {:<10} : {} ({})\n'.format('Pixel Type', self.pixeltype, self.dtype)+\
'\t {:<10} : {}\n'.format('Components', self.components)+\
'\t {:<10} : {}\n'.format('Dimensions', self.shape)+\
'\t {:<10} : {}\n'.format('Spacing', self.spacing)+\
'\t {:<10} : {}\n'.format('Origin', self.origin)+\
'\t {:<10} : {}\n'.format('Direction', self.direction.flatten())+\
'\t {:<10} : {}\n'.format('Num Values', self.n_values())
return s
def copy_image_info(reference, target):
"""
Copy origin, direction, and spacing from one antsImage to another
ANTsR function: `antsCopyImageInfo`
Arguments
---------
reference : ANTsImage
Image to get values from.
target : ANTsImAGE
Image to copy values to
Returns
-------
ANTsImage
Target image with reference header information
"""
target.set_origin(reference.origin)
target.set_direction(reference.direction)
target.set_spacing(reference.spacing)
return target
def set_origin(image, origin):
"""
Set origin of ANTsImage
ANTsR function: `antsSetOrigin`
"""
image.set_origin(origin)
def get_origin(image):
"""
Get origin of ANTsImage
ANTsR function: `antsGetOrigin`
"""
return image.origin
def set_direction(image, direction):
"""
Set direction of ANTsImage
ANTsR function: `antsSetDirection`
"""
image.set_direction(direction)
def get_direction(image):
"""
Get direction of ANTsImage
ANTsR function: `antsGetDirection`
"""
return image.direction
def set_spacing(image, spacing):
"""
Set spacing of ANTsImage
ANTsR function: `antsSetSpacing`
"""
image.set_spacing(spacing)
def get_spacing(image):
"""
Get spacing of ANTsImage
ANTsR function: `antsGetSpacing`
"""
return image.spacing
def image_physical_space_consistency(image1, image2, tolerance=1e-2, datatype=False):
"""
Check if two or more ANTsImage objects occupy the same physical space
ANTsR function: `antsImagePhysicalSpaceConsistency`
Arguments
---------
*images : ANTsImages
images to compare
tolerance : float
tolerance when checking origin and spacing
data_type : boolean
If true, also check that the image data types are the same
Returns
-------
boolean
true if images share same physical space, false otherwise
"""
images = [image1, image2]
img1 = images[0]
for img2 in images[1:]:
if (not isinstance(img1, ANTsImage)) or (not isinstance(img2, ANTsImage)):
raise ValueError('Both images must be of class `AntsImage`')
# image dimension check
if img1.dimension != img2.dimension:
return False
# image spacing check
space_diffs = sum([abs(s1-s2)>tolerance for s1, s2 in zip(img1.spacing, img2.spacing)])
if space_diffs > 0:
return False
# image origin check
origin_diffs = sum([abs(s1-s2)>tolerance for s1, s2 in zip(img1.origin, img2.origin)])
if origin_diffs > 0:
return False
# image direction check
origin_diff = np.allclose(img1.direction, img2.direction, atol=tolerance)
if not origin_diff:
return False
# data type
if datatype == True:
if img1.pixeltype != img2.pixeltype:
return False
if img1.components != img2.components:
return False
return True
def image_type_cast(image_list, pixeltype=None):
"""
Cast a list of images to the highest pixeltype present in the list
or all to a specified type
ANTsR function: `antsImageTypeCast`
Arguments
---------
image_list : list/tuple
images to cast
pixeltype : string (optional)
pixeltype to cast to. If None, images will be cast to the highest
precision pixeltype found in image_list
Returns
-------
list of ANTsImages
given images casted to new type
"""
if not isinstance(image_list, (list,tuple)):
raise ValueError('image_list must be list of ANTsImage types')
pixtypes = []
for img in image_list:
pixtypes.append(img.pixeltype)
if pixeltype is None:
pixeltype = 'unsigned char'
for p in pixtypes:
if p == 'double':
pixeltype = 'double'
elif (p=='float') and (pixeltype!='double'):
pixeltype = 'float'
elif (p=='unsigned int') and (pixeltype!='float') and (pixeltype!='double'):
pixeltype = 'unsigned int'
out_images = []
for img in image_list:
if img.pixeltype == pixeltype:
out_images.append(img)
else:
out_images.append(img.clone(pixeltype))
return out_images
def allclose(image1, image2):
"""
Check if two images have the same array values
"""
return np.allclose(image1.numpy(), image2.numpy())
| [
"numpy.stack",
"pandas.DataFrame",
"functools.partialmethod",
"numpy.asarray",
"numpy.allclose",
"numpy.sort",
"numpy.rollaxis",
"os.path.expanduser",
"numpy.unique"
] | [((11412, 11440), 'os.path.expanduser', 'os.path.expanduser', (['filename'], {}), '(filename)\n', (11430, 11440), False, 'import os\n'), ((31292, 31351), 'numpy.allclose', 'np.allclose', (['img1.direction', 'img2.direction'], {'atol': 'tolerance'}), '(img1.direction, img2.direction, atol=tolerance)\n', (31303, 31351), True, 'import numpy as np\n'), ((5286, 5311), 'numpy.asarray', 'np.asarray', (['new_direction'], {}), '(new_direction)\n', (5296, 5311), True, 'import numpy as np\n'), ((7567, 7608), 'numpy.rollaxis', 'np.rollaxis', (['array', '(0)', '(self.dimension + 1)'], {}), '(array, 0, self.dimension + 1)\n', (7578, 7608), True, 'import numpy as np\n'), ((14091, 14111), 'numpy.sort', 'np.sort', (['unique_vals'], {}), '(unique_vals)\n', (14098, 14111), True, 'import numpy as np\n'), ((13684, 13706), 'numpy.stack', 'np.stack', (['[amin, amax]'], {}), '([amin, amax])\n', (13692, 13706), True, 'import numpy as np\n'), ((25066, 25096), 'numpy.unique', 'np.unique', (['self.label_info[mk]'], {}), '(self.label_info[mk])\n', (25075, 25096), True, 'import numpy as np\n'), ((20200, 20216), 'functools.partialmethod', 'partialmethod', (['v'], {}), '(v)\n', (20213, 20216), False, 'from functools import partialmethod\n'), ((20461, 20477), 'functools.partialmethod', 'partialmethod', (['v'], {}), '(v)\n', (20474, 20477), False, 'from functools import partialmethod\n'), ((20722, 20738), 'functools.partialmethod', 'partialmethod', (['v'], {}), '(v)\n', (20735, 20738), False, 'from functools import partialmethod\n'), ((20965, 20981), 'functools.partialmethod', 'partialmethod', (['v'], {}), '(v)\n', (20978, 20981), False, 'from functools import partialmethod\n'), ((24374, 24398), 'pandas.DataFrame', 'pd.DataFrame', (['label_info'], {}), '(label_info)\n', (24386, 24398), True, 'import pandas as pd\n'), ((6786, 6805), 'numpy.asarray', 'np.asarray', (['memview'], {}), '(memview)\n', (6796, 6805), True, 'import numpy as np\n')] |
"""
Tests for all functions in cost_function.py
"""
import numpy as np
from pyquil.quil import (QubitPlaceholder,
get_default_qubit_mapping)
from pyquil.api import WavefunctionSimulator
from pyquil import get_qc, Program
from pyquil.gates import RX, RY, X
from pyquil.paulis import PauliSum, PauliTerm
from entropica_qaoa.vqe.cost_function import (PrepareAndMeasureOnWFSim,
PrepareAndMeasureOnQVM)
def test_PrepareAndMeasureOnWFSim():
p = Program()
params = p.declare("params", memory_type="REAL", memory_size=2)
p.inst(RX(params[0], 0))
p.inst(RX(params[1], 1))
def make_memory_map(params):
return {"params": params}
# ham = PauliSum.from_compact_str("1.0*Z0 + 1.0*Z1")
term1 = PauliTerm("Z", 0)
term2 = PauliTerm("Z", 1)
ham = PauliSum([term1, term2])
sim = WavefunctionSimulator()
cost_fn = PrepareAndMeasureOnWFSim(p,
make_memory_map,
ham,
sim,
scalar_cost_function=False,
enable_logging=True)
out = cost_fn([np.pi, np.pi / 2])
print(cost_fn.log[0].fun)
assert np.allclose(cost_fn.log[0].fun, (-1.0, 0.0))
assert np.allclose(out, (-1, 0.0))
def test_PrepareAndMeasureOnWFSim_QubitPlaceholders():
q1, q2 = QubitPlaceholder(), QubitPlaceholder()
p = Program()
params = p.declare("params", memory_type="REAL", memory_size=2)
p.inst(RX(params[0], q1))
p.inst(RX(params[1], q2))
def make_memory_map(params):
return {"params": params}
ham = PauliSum([PauliTerm("Z", q1), PauliTerm("Z", q2)])
qubit_mapping = get_default_qubit_mapping(p)
sim = WavefunctionSimulator()
cost_fn = PrepareAndMeasureOnWFSim(p, make_memory_map, ham, sim,
enable_logging=True,
qubit_mapping=qubit_mapping,
scalar_cost_function=False,
)
out = cost_fn([np.pi, np.pi / 2])
assert np.allclose(cost_fn.log[0].fun, (-1.0, 0.0))
assert np.allclose(out, (-1, 0.0))
def test_PrepareAndMeasureOnQVM():
prepare_ansatz = Program()
param_register = prepare_ansatz.declare(
"params", memory_type="REAL", memory_size=2)
prepare_ansatz.inst(RX(param_register[0], 0))
prepare_ansatz.inst(RX(param_register[1], 1))
def make_memory_map(params):
return {"params": params}
# ham = PauliSum.from_compact_str("1.0*Z0 + 1.0*Z1")
term1 = PauliTerm("Z", 0)
term2 = PauliTerm("Z", 1)
ham = PauliSum([term1, term2])
qvm = get_qc("2q-qvm")
cost_fn = PrepareAndMeasureOnQVM(prepare_ansatz, make_memory_map, qvm=qvm,
hamiltonian=ham, enable_logging=True,
scalar_cost_function=True,
base_numshots=10,
nshots=10)
out = cost_fn([np.pi, np.pi / 2])
assert np.allclose(cost_fn.log[0].fun, (-1.0, 0.1), rtol=1.1)
assert np.allclose(out, -1, rtol=1.1)
def test_PrepareAndMeasureOnQVM_QubitPlaceholders():
q1, q2 = QubitPlaceholder(), QubitPlaceholder()
prepare_ansatz = Program()
param_register = prepare_ansatz.declare(
"params", memory_type="REAL", memory_size=2)
prepare_ansatz.inst(RX(param_register[0], q1))
prepare_ansatz.inst(RX(param_register[1], q2))
def make_memory_map(params):
return {"params": params}
ham = PauliSum([PauliTerm("Z", q1), PauliTerm("Z",q2)])
qubit_mapping = get_default_qubit_mapping(prepare_ansatz)
qvm = get_qc("2q-qvm")
cost_fn = PrepareAndMeasureOnQVM(prepare_ansatz, make_memory_map,
qvm=qvm,
hamiltonian=ham, enable_logging=True,
scalar_cost_function=False,
base_numshots=10,
qubit_mapping=qubit_mapping)
out = cost_fn([np.pi, np.pi / 2], nshots=10)
assert np.allclose(cost_fn.log[0].fun, (-1.0, 0.1), rtol=1.1)
assert np.allclose(out, (-1, 0.1), rtol=1.1)
def test_PrepareAndMeasureOnQVM_QubitPlaceholders_nondiag_hamiltonian():
q1, q2, q3 = QubitPlaceholder(), QubitPlaceholder(), QubitPlaceholder()
ham = PauliTerm("Y", q1)*PauliTerm("Z",q3)
ham += PauliTerm("Y", q1)*PauliTerm("Z",q2,-0.3)
ham += PauliTerm("Y", q1)*PauliTerm("X",q3, 2.0)
params = [3.0,0.4,4.5]
prepare_ansatz = Program()
param_register = prepare_ansatz.declare(
"params", memory_type="REAL", memory_size=3)
prepare_ansatz.inst(RX(param_register[0], q1))
prepare_ansatz.inst(RY(param_register[1], q2))
prepare_ansatz.inst(RY(param_register[2], q3))
def make_memory_map(params):
return {"params": params}
qubit_mapping = get_default_qubit_mapping(prepare_ansatz)
qvm = get_qc("3q-qvm")
cost_fn = PrepareAndMeasureOnQVM(prepare_ansatz, make_memory_map,
qvm=qvm,
hamiltonian=ham,
scalar_cost_function=False,
base_numshots=100,
qubit_mapping=qubit_mapping)
out = cost_fn(params, nshots=10)
assert np.allclose(out, (0.346, 0.07), rtol=1.1) | [
"pyquil.quil.QubitPlaceholder",
"entropica_qaoa.vqe.cost_function.PrepareAndMeasureOnQVM",
"pyquil.paulis.PauliTerm",
"numpy.allclose",
"pyquil.get_qc",
"entropica_qaoa.vqe.cost_function.PrepareAndMeasureOnWFSim",
"pyquil.api.WavefunctionSimulator",
"pyquil.gates.RY",
"pyquil.gates.RX",
"pyquil.Pr... | [((518, 527), 'pyquil.Program', 'Program', ([], {}), '()\n', (525, 527), False, 'from pyquil import get_qc, Program\n'), ((791, 808), 'pyquil.paulis.PauliTerm', 'PauliTerm', (['"""Z"""', '(0)'], {}), "('Z', 0)\n", (800, 808), False, 'from pyquil.paulis import PauliSum, PauliTerm\n'), ((821, 838), 'pyquil.paulis.PauliTerm', 'PauliTerm', (['"""Z"""', '(1)'], {}), "('Z', 1)\n", (830, 838), False, 'from pyquil.paulis import PauliSum, PauliTerm\n'), ((849, 873), 'pyquil.paulis.PauliSum', 'PauliSum', (['[term1, term2]'], {}), '([term1, term2])\n', (857, 873), False, 'from pyquil.paulis import PauliSum, PauliTerm\n'), ((884, 907), 'pyquil.api.WavefunctionSimulator', 'WavefunctionSimulator', ([], {}), '()\n', (905, 907), False, 'from pyquil.api import WavefunctionSimulator\n'), ((922, 1030), 'entropica_qaoa.vqe.cost_function.PrepareAndMeasureOnWFSim', 'PrepareAndMeasureOnWFSim', (['p', 'make_memory_map', 'ham', 'sim'], {'scalar_cost_function': '(False)', 'enable_logging': '(True)'}), '(p, make_memory_map, ham, sim, scalar_cost_function\n =False, enable_logging=True)\n', (946, 1030), False, 'from entropica_qaoa.vqe.cost_function import PrepareAndMeasureOnWFSim, PrepareAndMeasureOnQVM\n'), ((1300, 1344), 'numpy.allclose', 'np.allclose', (['cost_fn.log[0].fun', '(-1.0, 0.0)'], {}), '(cost_fn.log[0].fun, (-1.0, 0.0))\n', (1311, 1344), True, 'import numpy as np\n'), ((1356, 1383), 'numpy.allclose', 'np.allclose', (['out', '(-1, 0.0)'], {}), '(out, (-1, 0.0))\n', (1367, 1383), True, 'import numpy as np\n'), ((1501, 1510), 'pyquil.Program', 'Program', ([], {}), '()\n', (1508, 1510), False, 'from pyquil import get_qc, Program\n'), ((1789, 1817), 'pyquil.quil.get_default_qubit_mapping', 'get_default_qubit_mapping', (['p'], {}), '(p)\n', (1814, 1817), False, 'from pyquil.quil import QubitPlaceholder, get_default_qubit_mapping\n'), ((1828, 1851), 'pyquil.api.WavefunctionSimulator', 'WavefunctionSimulator', ([], {}), '()\n', (1849, 1851), False, 'from pyquil.api import WavefunctionSimulator\n'), ((1866, 2002), 'entropica_qaoa.vqe.cost_function.PrepareAndMeasureOnWFSim', 'PrepareAndMeasureOnWFSim', (['p', 'make_memory_map', 'ham', 'sim'], {'enable_logging': '(True)', 'qubit_mapping': 'qubit_mapping', 'scalar_cost_function': '(False)'}), '(p, make_memory_map, ham, sim, enable_logging=True,\n qubit_mapping=qubit_mapping, scalar_cost_function=False)\n', (1890, 2002), False, 'from entropica_qaoa.vqe.cost_function import PrepareAndMeasureOnWFSim, PrepareAndMeasureOnQVM\n'), ((2206, 2250), 'numpy.allclose', 'np.allclose', (['cost_fn.log[0].fun', '(-1.0, 0.0)'], {}), '(cost_fn.log[0].fun, (-1.0, 0.0))\n', (2217, 2250), True, 'import numpy as np\n'), ((2262, 2289), 'numpy.allclose', 'np.allclose', (['out', '(-1, 0.0)'], {}), '(out, (-1, 0.0))\n', (2273, 2289), True, 'import numpy as np\n'), ((2348, 2357), 'pyquil.Program', 'Program', ([], {}), '()\n', (2355, 2357), False, 'from pyquil import get_qc, Program\n'), ((2693, 2710), 'pyquil.paulis.PauliTerm', 'PauliTerm', (['"""Z"""', '(0)'], {}), "('Z', 0)\n", (2702, 2710), False, 'from pyquil.paulis import PauliSum, PauliTerm\n'), ((2723, 2740), 'pyquil.paulis.PauliTerm', 'PauliTerm', (['"""Z"""', '(1)'], {}), "('Z', 1)\n", (2732, 2740), False, 'from pyquil.paulis import PauliSum, PauliTerm\n'), ((2751, 2775), 'pyquil.paulis.PauliSum', 'PauliSum', (['[term1, term2]'], {}), '([term1, term2])\n', (2759, 2775), False, 'from pyquil.paulis import PauliSum, PauliTerm\n'), ((2786, 2802), 'pyquil.get_qc', 'get_qc', (['"""2q-qvm"""'], {}), "('2q-qvm')\n", (2792, 2802), False, 'from pyquil import get_qc, Program\n'), ((2817, 2983), 'entropica_qaoa.vqe.cost_function.PrepareAndMeasureOnQVM', 'PrepareAndMeasureOnQVM', (['prepare_ansatz', 'make_memory_map'], {'qvm': 'qvm', 'hamiltonian': 'ham', 'enable_logging': '(True)', 'scalar_cost_function': '(True)', 'base_numshots': '(10)', 'nshots': '(10)'}), '(prepare_ansatz, make_memory_map, qvm=qvm,\n hamiltonian=ham, enable_logging=True, scalar_cost_function=True,\n base_numshots=10, nshots=10)\n', (2839, 2983), False, 'from entropica_qaoa.vqe.cost_function import PrepareAndMeasureOnWFSim, PrepareAndMeasureOnQVM\n'), ((3173, 3227), 'numpy.allclose', 'np.allclose', (['cost_fn.log[0].fun', '(-1.0, 0.1)'], {'rtol': '(1.1)'}), '(cost_fn.log[0].fun, (-1.0, 0.1), rtol=1.1)\n', (3184, 3227), True, 'import numpy as np\n'), ((3239, 3269), 'numpy.allclose', 'np.allclose', (['out', '(-1)'], {'rtol': '(1.1)'}), '(out, -1, rtol=1.1)\n', (3250, 3269), True, 'import numpy as np\n'), ((3398, 3407), 'pyquil.Program', 'Program', ([], {}), '()\n', (3405, 3407), False, 'from pyquil import get_qc, Program\n'), ((3757, 3798), 'pyquil.quil.get_default_qubit_mapping', 'get_default_qubit_mapping', (['prepare_ansatz'], {}), '(prepare_ansatz)\n', (3782, 3798), False, 'from pyquil.quil import QubitPlaceholder, get_default_qubit_mapping\n'), ((3809, 3825), 'pyquil.get_qc', 'get_qc', (['"""2q-qvm"""'], {}), "('2q-qvm')\n", (3815, 3825), False, 'from pyquil import get_qc, Program\n'), ((3840, 4025), 'entropica_qaoa.vqe.cost_function.PrepareAndMeasureOnQVM', 'PrepareAndMeasureOnQVM', (['prepare_ansatz', 'make_memory_map'], {'qvm': 'qvm', 'hamiltonian': 'ham', 'enable_logging': '(True)', 'scalar_cost_function': '(False)', 'base_numshots': '(10)', 'qubit_mapping': 'qubit_mapping'}), '(prepare_ansatz, make_memory_map, qvm=qvm,\n hamiltonian=ham, enable_logging=True, scalar_cost_function=False,\n base_numshots=10, qubit_mapping=qubit_mapping)\n', (3862, 4025), False, 'from entropica_qaoa.vqe.cost_function import PrepareAndMeasureOnWFSim, PrepareAndMeasureOnQVM\n'), ((4263, 4317), 'numpy.allclose', 'np.allclose', (['cost_fn.log[0].fun', '(-1.0, 0.1)'], {'rtol': '(1.1)'}), '(cost_fn.log[0].fun, (-1.0, 0.1), rtol=1.1)\n', (4274, 4317), True, 'import numpy as np\n'), ((4329, 4366), 'numpy.allclose', 'np.allclose', (['out', '(-1, 0.1)'], {'rtol': '(1.1)'}), '(out, (-1, 0.1), rtol=1.1)\n', (4340, 4366), True, 'import numpy as np\n'), ((4720, 4729), 'pyquil.Program', 'Program', ([], {}), '()\n', (4727, 4729), False, 'from pyquil import get_qc, Program\n'), ((5070, 5111), 'pyquil.quil.get_default_qubit_mapping', 'get_default_qubit_mapping', (['prepare_ansatz'], {}), '(prepare_ansatz)\n', (5095, 5111), False, 'from pyquil.quil import QubitPlaceholder, get_default_qubit_mapping\n'), ((5122, 5138), 'pyquil.get_qc', 'get_qc', (['"""3q-qvm"""'], {}), "('3q-qvm')\n", (5128, 5138), False, 'from pyquil import get_qc, Program\n'), ((5153, 5318), 'entropica_qaoa.vqe.cost_function.PrepareAndMeasureOnQVM', 'PrepareAndMeasureOnQVM', (['prepare_ansatz', 'make_memory_map'], {'qvm': 'qvm', 'hamiltonian': 'ham', 'scalar_cost_function': '(False)', 'base_numshots': '(100)', 'qubit_mapping': 'qubit_mapping'}), '(prepare_ansatz, make_memory_map, qvm=qvm,\n hamiltonian=ham, scalar_cost_function=False, base_numshots=100,\n qubit_mapping=qubit_mapping)\n', (5175, 5318), False, 'from entropica_qaoa.vqe.cost_function import PrepareAndMeasureOnWFSim, PrepareAndMeasureOnQVM\n'), ((5544, 5585), 'numpy.allclose', 'np.allclose', (['out', '(0.346, 0.07)'], {'rtol': '(1.1)'}), '(out, (0.346, 0.07), rtol=1.1)\n', (5555, 5585), True, 'import numpy as np\n'), ((607, 623), 'pyquil.gates.RX', 'RX', (['params[0]', '(0)'], {}), '(params[0], 0)\n', (609, 623), False, 'from pyquil.gates import RX, RY, X\n'), ((636, 652), 'pyquil.gates.RX', 'RX', (['params[1]', '(1)'], {}), '(params[1], 1)\n', (638, 652), False, 'from pyquil.gates import RX, RY, X\n'), ((1454, 1472), 'pyquil.quil.QubitPlaceholder', 'QubitPlaceholder', ([], {}), '()\n', (1470, 1472), False, 'from pyquil.quil import QubitPlaceholder, get_default_qubit_mapping\n'), ((1474, 1492), 'pyquil.quil.QubitPlaceholder', 'QubitPlaceholder', ([], {}), '()\n', (1490, 1492), False, 'from pyquil.quil import QubitPlaceholder, get_default_qubit_mapping\n'), ((1590, 1607), 'pyquil.gates.RX', 'RX', (['params[0]', 'q1'], {}), '(params[0], q1)\n', (1592, 1607), False, 'from pyquil.gates import RX, RY, X\n'), ((1620, 1637), 'pyquil.gates.RX', 'RX', (['params[1]', 'q2'], {}), '(params[1], q2)\n', (1622, 1637), False, 'from pyquil.gates import RX, RY, X\n'), ((2480, 2504), 'pyquil.gates.RX', 'RX', (['param_register[0]', '(0)'], {}), '(param_register[0], 0)\n', (2482, 2504), False, 'from pyquil.gates import RX, RY, X\n'), ((2530, 2554), 'pyquil.gates.RX', 'RX', (['param_register[1]', '(1)'], {}), '(param_register[1], 1)\n', (2532, 2554), False, 'from pyquil.gates import RX, RY, X\n'), ((3338, 3356), 'pyquil.quil.QubitPlaceholder', 'QubitPlaceholder', ([], {}), '()\n', (3354, 3356), False, 'from pyquil.quil import QubitPlaceholder, get_default_qubit_mapping\n'), ((3358, 3376), 'pyquil.quil.QubitPlaceholder', 'QubitPlaceholder', ([], {}), '()\n', (3374, 3376), False, 'from pyquil.quil import QubitPlaceholder, get_default_qubit_mapping\n'), ((3530, 3555), 'pyquil.gates.RX', 'RX', (['param_register[0]', 'q1'], {}), '(param_register[0], q1)\n', (3532, 3555), False, 'from pyquil.gates import RX, RY, X\n'), ((3581, 3606), 'pyquil.gates.RX', 'RX', (['param_register[1]', 'q2'], {}), '(param_register[1], q2)\n', (3583, 3606), False, 'from pyquil.gates import RX, RY, X\n'), ((4459, 4477), 'pyquil.quil.QubitPlaceholder', 'QubitPlaceholder', ([], {}), '()\n', (4475, 4477), False, 'from pyquil.quil import QubitPlaceholder, get_default_qubit_mapping\n'), ((4479, 4497), 'pyquil.quil.QubitPlaceholder', 'QubitPlaceholder', ([], {}), '()\n', (4495, 4497), False, 'from pyquil.quil import QubitPlaceholder, get_default_qubit_mapping\n'), ((4499, 4517), 'pyquil.quil.QubitPlaceholder', 'QubitPlaceholder', ([], {}), '()\n', (4515, 4517), False, 'from pyquil.quil import QubitPlaceholder, get_default_qubit_mapping\n'), ((4528, 4546), 'pyquil.paulis.PauliTerm', 'PauliTerm', (['"""Y"""', 'q1'], {}), "('Y', q1)\n", (4537, 4546), False, 'from pyquil.paulis import PauliSum, PauliTerm\n'), ((4547, 4565), 'pyquil.paulis.PauliTerm', 'PauliTerm', (['"""Z"""', 'q3'], {}), "('Z', q3)\n", (4556, 4565), False, 'from pyquil.paulis import PauliSum, PauliTerm\n'), ((4576, 4594), 'pyquil.paulis.PauliTerm', 'PauliTerm', (['"""Y"""', 'q1'], {}), "('Y', q1)\n", (4585, 4594), False, 'from pyquil.paulis import PauliSum, PauliTerm\n'), ((4595, 4619), 'pyquil.paulis.PauliTerm', 'PauliTerm', (['"""Z"""', 'q2', '(-0.3)'], {}), "('Z', q2, -0.3)\n", (4604, 4619), False, 'from pyquil.paulis import PauliSum, PauliTerm\n'), ((4629, 4647), 'pyquil.paulis.PauliTerm', 'PauliTerm', (['"""Y"""', 'q1'], {}), "('Y', q1)\n", (4638, 4647), False, 'from pyquil.paulis import PauliSum, PauliTerm\n'), ((4648, 4671), 'pyquil.paulis.PauliTerm', 'PauliTerm', (['"""X"""', 'q3', '(2.0)'], {}), "('X', q3, 2.0)\n", (4657, 4671), False, 'from pyquil.paulis import PauliSum, PauliTerm\n'), ((4852, 4877), 'pyquil.gates.RX', 'RX', (['param_register[0]', 'q1'], {}), '(param_register[0], q1)\n', (4854, 4877), False, 'from pyquil.gates import RX, RY, X\n'), ((4903, 4928), 'pyquil.gates.RY', 'RY', (['param_register[1]', 'q2'], {}), '(param_register[1], q2)\n', (4905, 4928), False, 'from pyquil.gates import RX, RY, X\n'), ((4954, 4979), 'pyquil.gates.RY', 'RY', (['param_register[2]', 'q3'], {}), '(param_register[2], q3)\n', (4956, 4979), False, 'from pyquil.gates import RX, RY, X\n'), ((1728, 1746), 'pyquil.paulis.PauliTerm', 'PauliTerm', (['"""Z"""', 'q1'], {}), "('Z', q1)\n", (1737, 1746), False, 'from pyquil.paulis import PauliSum, PauliTerm\n'), ((1748, 1766), 'pyquil.paulis.PauliTerm', 'PauliTerm', (['"""Z"""', 'q2'], {}), "('Z', q2)\n", (1757, 1766), False, 'from pyquil.paulis import PauliSum, PauliTerm\n'), ((3697, 3715), 'pyquil.paulis.PauliTerm', 'PauliTerm', (['"""Z"""', 'q1'], {}), "('Z', q1)\n", (3706, 3715), False, 'from pyquil.paulis import PauliSum, PauliTerm\n'), ((3717, 3735), 'pyquil.paulis.PauliTerm', 'PauliTerm', (['"""Z"""', 'q2'], {}), "('Z', q2)\n", (3726, 3735), False, 'from pyquil.paulis import PauliSum, PauliTerm\n')] |
import sys
import numpy as np
from pprint import pprint
import time
import os
import argparse as argparse
import json
import queue
OBSTACLE = 100
INIT_VAL = 10000
DESTINATION = -1
def loadProject(projectFile):
with open(projectFile) as f:
return json.load(f)
def searchPath(curLoc, weight=0):
global project
global rawMap
global traversedMap
global destLoc
global visitedNodes
global numRows
global numCols
global pathExists
row = curLoc[0]
col = curLoc[1]
#print("[{},{}] [{}]".format(row,col,weight))
#if we have reached the destination
if row == destLoc[0] and col == destLoc[1]:
#print("reached destination")
traversedMap[destLoc[0]][destLoc[1]] = weight
pathExists = True
return
#proceed further if the weight to get here is lesser than already present weight
#if traversedMap[row][col] != INIT_VAL and traversedMap[row][col] <= weight:
if traversedMap[row][col] <= weight:
#print("limit")
return
traversedMap[row][col] = weight
#recurse east
if col+1 < numCols and rawMap[row][col+1] != OBSTACLE:
searchPath((row, col+1), weight+1)
#recurse west
if col-1 > 0 and rawMap[row][col-1] != OBSTACLE:
searchPath((row,col-1), weight+1)
#recurse north
if row-1 > 0 and rawMap[row-1][col] != OBSTACLE:
searchPath((row-1,col), weight+1)
#recurse south
if row+1 < numRows and rawMap[row+1][col] != OBSTACLE:
searchPath((row+1,col),weight+1)
# np.savetxt('traversed.txt', traversedMap, '%5d')
# exit(0)
#depth = 0
def extractPath(curloc, weight):
global pathTiles
# global depth
# depth = depth + 1
row = curloc[0]
col = curloc[1]
# pprint(pathTiles)
# if depth == 20:
# exit()
#return True if we managed to reach the origin
#return False if we faced a situation where the tiles did not decrease in either direction
if row == originLoc[0] and col == originLoc[1]:
return True
#check east
if col+1 < numCols and traversedMap[row][col+1] < weight:
pathTiles.append(curloc)
if extractPath((row,col+1), traversedMap[row][col]) == False:
pathTiles.pop()
else:
return
#check west
if col-1 > 0 and traversedMap[row][col-1] < weight:
pathTiles.append(curloc)
if extractPath((row,col-1), traversedMap[row][col]) == False:
pathTiles.pop()
else:
return
#check north
if row+1 > 0 and traversedMap[row-1][col] < weight:
pathTiles.append(curloc)
if extractPath((row-1,col), traversedMap[row][col]) == False:
pathTiles.pop()
else:
return
#check south
if row-1 < numRows and traversedMap[row+1][col] < weight:
pathTiles.append(curloc)
if extractPath((row+1,col), traversedMap[row][col]) == False:
pathTiles.pop()
else:
return
return False
def findPath(projectFile, fromRow, fromCol, toRow, toCol):
global project
global rawMap
global traversedMap
global destLoc
global originLoc
global visitedNodes
global numRows
global numCols
project = loadProject(projectFile)
#pprint(project)
destLoc = (toRow, toCol)
originLoc = (fromRow, fromCol)
numRows = int(project['numRows'])
numCols = int(project['numCols'])
#sanity check
rawMap = np.array(json.loads(project['map']))
rawMap = np.reshape(rawMap, (numRows, numCols))
traversedMap = np.full(rawMap.shape,INIT_VAL,dtype='int16')
if (rawMap[fromRow,fromCol] == 100):
print("Source is Obstacle, cannot navigate.")
if (rawMap[toRow,toCol] == 100):
print("Destination is Obstacle, cannot navigate.")
traversedMap[destLoc[0]][destLoc[1]] = DESTINATION
searchPath((fromRow,fromCol),0)
np.savetxt('traversed.txt', traversedMap, '%5d')
if pathExists:
extractPath(destLoc, traversedMap[destLoc[0]][destLoc[1]])
pprint(pathTiles)
project = None
rawMap = None
traversedMap = None
destLoc = None
originLoc = None
visitedNodes = 0
numRows = 0
numCols = 0
pathExists = False
pathTiles = []
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument("-pp", "--project.path", required=True,
help="Path to Find My Car Project file.")
ap.add_argument("-fr", "--from.row", required=True,
help="From Row.")
ap.add_argument("-fc", "--from.col", required=True,
help="From Column.")
ap.add_argument("-tr", "--to.row", required=True,
help="To Row.")
ap.add_argument("-tc", "--to.col", required=True,
help="To Column.")
args = vars(ap.parse_args())
findPath(args['project.path'],int(args['from.row']),int(args['from.col']),int(args['to.row']),int(args['to.col']))
print("\n\nDone...")
| [
"numpy.full",
"json.load",
"argparse.ArgumentParser",
"json.loads",
"numpy.savetxt",
"numpy.reshape",
"pprint.pprint"
] | [((3556, 3594), 'numpy.reshape', 'np.reshape', (['rawMap', '(numRows, numCols)'], {}), '(rawMap, (numRows, numCols))\n', (3566, 3594), True, 'import numpy as np\n'), ((3614, 3660), 'numpy.full', 'np.full', (['rawMap.shape', 'INIT_VAL'], {'dtype': '"""int16"""'}), "(rawMap.shape, INIT_VAL, dtype='int16')\n", (3621, 3660), True, 'import numpy as np\n'), ((3953, 4001), 'numpy.savetxt', 'np.savetxt', (['"""traversed.txt"""', 'traversedMap', '"""%5d"""'], {}), "('traversed.txt', traversedMap, '%5d')\n", (3963, 4001), True, 'import numpy as np\n'), ((4309, 4334), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4332, 4334), True, 'import argparse as argparse\n'), ((261, 273), 'json.load', 'json.load', (['f'], {}), '(f)\n', (270, 273), False, 'import json\n'), ((3515, 3541), 'json.loads', 'json.loads', (["project['map']"], {}), "(project['map'])\n", (3525, 3541), False, 'import json\n'), ((4097, 4114), 'pprint.pprint', 'pprint', (['pathTiles'], {}), '(pathTiles)\n', (4103, 4114), False, 'from pprint import pprint\n')] |
import pandas as pd
import h5py
import numpy as np
tng = 300
if tng==100:
extension = 'L75n1820'
elif tng == 300:
extension = 'L205n2500'
else:
extension = 'NotFound'
data_path = '/cosma5/data/dp004/hvrn44/HOD/'
if tng==100:
matching_file = f'MatchedHaloes_{extension}TNG.dat'
else:
matching_file = f'MatchedHaloes_{extension}.dat'
mergertree_file = f'MergerTree_{extension}TNG_DM_ext_New.hdf5'
output_path = '/cosma7/data/dp004/dc-cues1/tng_dataframes/'
output_file = f'TNG{tng}Dark_Hydro_MergerTree.hdf5'
# ------------------ Halo matching between dmo and hydro simulations
matching_df = pd.read_csv(data_path + matching_file,
delimiter = ' ', skiprows = 1,
names = ['ID_DMO', 'ID_HYDRO', 'M200_DMO', 'M200_HYDRO'])
# ----------- Read in properties from the merger trees
with h5py.File(data_path + mergertree_file, 'r') as hf:
formation_time = hf['Haloes']['z0p50'][:]
if tng == 300:
n_mergers = hf['Haloes']['NMerg'][:]
#mass_peak = hf['Haloes']['Mpeak'][:]
#vpeak = hf['Haloes']['Vpeak'][:]
mergertree_ids = hf['Haloes']['Index'][:]
if tng==300:
mergertree_data = np.vstack([mergertree_ids, formation_time, n_mergers,]).T
#mass_peak, vpeak]).T
else:
mergertree_data = np.vstack([mergertree_ids, formation_time]).T
if tng==300:
mergertree_df = pd.DataFrame(data = mergertree_data,
columns = ['ID_DMO', 'Formation Time', 'Nmergers',])#'MassPeak', 'vpeak'])
else:
mergertree_df = pd.DataFrame(data = mergertree_data,
columns = ['ID_DMO', 'Formation Time' ])
mergertree_df = pd.merge(matching_df, mergertree_df, on = ['ID_DMO'], how = 'inner')
print(mergertree_df.head(3))
mergertree_df.to_hdf(output_path+ output_file, key = 'df', mode = 'w')
| [
"pandas.DataFrame",
"h5py.File",
"pandas.read_csv",
"pandas.merge",
"numpy.vstack"
] | [((616, 742), 'pandas.read_csv', 'pd.read_csv', (['(data_path + matching_file)'], {'delimiter': '""" """', 'skiprows': '(1)', 'names': "['ID_DMO', 'ID_HYDRO', 'M200_DMO', 'M200_HYDRO']"}), "(data_path + matching_file, delimiter=' ', skiprows=1, names=[\n 'ID_DMO', 'ID_HYDRO', 'M200_DMO', 'M200_HYDRO'])\n", (627, 742), True, 'import pandas as pd\n'), ((1664, 1728), 'pandas.merge', 'pd.merge', (['matching_df', 'mergertree_df'], {'on': "['ID_DMO']", 'how': '"""inner"""'}), "(matching_df, mergertree_df, on=['ID_DMO'], how='inner')\n", (1672, 1728), True, 'import pandas as pd\n'), ((848, 891), 'h5py.File', 'h5py.File', (['(data_path + mergertree_file)', '"""r"""'], {}), "(data_path + mergertree_file, 'r')\n", (857, 891), False, 'import h5py\n'), ((1396, 1484), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'mergertree_data', 'columns': "['ID_DMO', 'Formation Time', 'Nmergers']"}), "(data=mergertree_data, columns=['ID_DMO', 'Formation Time',\n 'Nmergers'])\n", (1408, 1484), True, 'import pandas as pd\n'), ((1551, 1623), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'mergertree_data', 'columns': "['ID_DMO', 'Formation Time']"}), "(data=mergertree_data, columns=['ID_DMO', 'Formation Time'])\n", (1563, 1623), True, 'import pandas as pd\n'), ((1179, 1233), 'numpy.vstack', 'np.vstack', (['[mergertree_ids, formation_time, n_mergers]'], {}), '([mergertree_ids, formation_time, n_mergers])\n', (1188, 1233), True, 'import numpy as np\n'), ((1315, 1358), 'numpy.vstack', 'np.vstack', (['[mergertree_ids, formation_time]'], {}), '([mergertree_ids, formation_time])\n', (1324, 1358), True, 'import numpy as np\n')] |
from __future__ import absolute_import
import pytest
import numpy as np
from . import (
get_standard_values_images_box,
get_tensor_decomposition_images_box,
assert_output_properties_box,
assert_output_properties_box_linear,
)
import tensorflow.python.keras.backend as K
from tensorflow.keras.layers import Reshape, Permute
from decomon.layers.decomon_layers import to_monotonic
from decomon.layers.decomon_reshape import DecomonPermute, DecomonReshape
@pytest.mark.parametrize(
"odd, m_0, m_1, mode, floatx",
[
(0, 0, 1, "hybrid", 32),
(0, 0, 1, "forward", 32),
(0, 0, 1, "ibp", 32),
(0, 0, 1, "hybrid", 64),
(0, 0, 1, "forward", 64),
(0, 0, 1, "ibp", 64),
(0, 0, 1, "hybrid", 16),
(0, 0, 1, "forward", 16),
(0, 0, 1, "ibp", 16),
],
)
def test_Decomon_reshape_box(odd, m_0, m_1, mode, floatx):
K.set_floatx("float{}".format(floatx))
eps = K.epsilon()
decimal = 5
if floatx == 16:
K.set_epsilon(1e-2)
decimal = 1
# monotonic_layer = DecomonConv2D(10, kernel_size=(3, 3), activation="relu", dc_decomp=True, mode=mode,
# data_format=data_format)
inputs = get_tensor_decomposition_images_box("channels_last", odd)
inputs_ = get_standard_values_images_box("channels_last", odd, m0=m_0, m1=m_1)
x, y, z, u_c, W_u, b_u, l_c, W_l, b_l, h, g = inputs
x_ = inputs_[0]
z_ = inputs_[2]
target_shape = (np.prod(y.shape[1:]),)
y_ = np.reshape(inputs_[1], (-1, target_shape[0]))
monotonic_layer = DecomonReshape((target_shape), dc_decomp=True, mode=mode)
if mode == "hybrid":
output = monotonic_layer(inputs[2:])
if mode == "forward":
output = monotonic_layer([z, W_u, b_u, W_l, b_l, h, g])
if mode == "ibp":
output = monotonic_layer([u_c, l_c, h, g])
f_reshape = K.function(inputs[2:], output)
if mode == "hybrid":
z_, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_, h_, g_ = f_reshape(inputs_[2:])
if mode == "forward":
z_, w_u_, b_u_, w_l_, b_l_, h_, g_ = f_reshape(inputs_[2:])
u_c_ = None
l_c_ = None
if mode == "ibp":
u_c_, l_c_, h_, g_ = f_reshape(inputs_[2:])
w_u_, b_u_, w_l_, b_l_ = [None] * 4
assert_output_properties_box(
x_,
y_,
h_,
g_,
z_[:, 0],
z_[:, 1],
u_c_,
w_u_,
b_u_,
l_c_,
w_l_,
b_l_,
"reshape_{}_{}_{}".format(odd, m_0, m_1),
decimal=decimal,
)
K.set_floatx("float{}".format(32))
K.set_epsilon(eps)
@pytest.mark.parametrize(
"odd, m_0, m_1, mode, floatx",
[
(0, 0, 1, "hybrid", 32),
(0, 0, 1, "forward", 32),
(0, 0, 1, "ibp", 32),
(0, 0, 1, "hybrid", 64),
(0, 0, 1, "forward", 64),
(0, 0, 1, "ibp", 64),
(0, 0, 1, "hybrid", 16),
(0, 0, 1, "forward", 16),
(0, 0, 1, "ibp", 16),
],
)
def test_Decomon_reshape_box_nodc(odd, m_0, m_1, mode, floatx):
K.set_floatx("float{}".format(floatx))
eps = K.epsilon()
decimal = 5
if floatx == 16:
K.set_epsilon(1e-2)
decimal = 1
# monotonic_layer = DecomonConv2D(10, kernel_size=(3, 3), activation="relu", dc_decomp=True, mode=mode,
# data_format=data_format)
inputs = get_tensor_decomposition_images_box("channels_last", odd, dc_decomp=False)
inputs_ = get_standard_values_images_box("channels_last", odd, m0=m_0, m1=m_1, dc_decomp=False)
x, y, z, u_c, W_u, b_u, l_c, W_l, b_l = inputs
x_ = inputs_[0]
z_ = inputs_[2]
target_shape = (np.prod(y.shape[1:]),)
y_ = np.reshape(inputs_[1], (-1, target_shape[0]))
monotonic_layer = DecomonReshape((target_shape), dc_decomp=False, mode=mode)
if mode == "hybrid":
output = monotonic_layer(inputs[2:])
if mode == "forward":
output = monotonic_layer([z, W_u, b_u, W_l, b_l])
if mode == "ibp":
output = monotonic_layer([u_c, l_c])
f_reshape = K.function(inputs[2:], output)
if mode == "hybrid":
z_, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_ = f_reshape(inputs_[2:])
if mode == "forward":
z_, w_u_, b_u_, w_l_, b_l_ = f_reshape(inputs_[2:])
u_c_ = None
l_c_ = None
if mode == "ibp":
u_c_, l_c_ = f_reshape(inputs_[2:])
w_u_, b_u_, w_l_, b_l_ = [None] * 4
assert_output_properties_box(
x_,
y_,
None,
None,
z_[:, 0],
z_[:, 1],
u_c_,
w_u_,
b_u_,
l_c_,
w_l_,
b_l_,
"reshape_{}_{}_{}".format(odd, m_0, m_1),
decimal=decimal,
)
K.set_floatx("float{}".format(32))
K.set_epsilon(eps)
@pytest.mark.parametrize(
"odd, m_0, m_1, shared, floatx",
[
(0, 0, 1, False, 32),
(0, 0, 1, True, 32),
(0, 0, 1, False, 64),
(0, 0, 1, True, 64),
(0, 0, 1, False, 16),
(0, 0, 1, True, 16),
],
)
def test_Decomon_reshape_to_monotonic_box(odd, m_0, m_1, shared, floatx):
K.set_floatx("float{}".format(floatx))
eps = K.epsilon()
decimal = 4
if floatx == 16:
K.set_epsilon(1e-2)
decimal = 1
inputs = get_tensor_decomposition_images_box("channels_last", odd)
inputs_ = get_standard_values_images_box("channels_last", odd, m0=m_0, m1=m_1)
x, y, z, u_c, W_u, b_u, l_c, W_l, b_l, h, g = inputs
x_ = inputs_[0]
z_ = inputs_[2]
target_shape = (np.prod(y.shape[1:]),)
reshape_ref = Reshape(target_shape)
output_ref = reshape_ref(inputs[1])
input_dim = x_.shape[-1]
monotonic_layer = to_monotonic(reshape_ref, input_dim, dc_decomp=True, shared=shared)
output = monotonic_layer[0](inputs[2:])
if len(monotonic_layer) > 1:
output = monotonic_layer[1](output)
f_ref = K.function(inputs, output_ref)
f_reshape = K.function(inputs[2:], output)
y_ref = f_ref(inputs_)
z_, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_, h_, g_ = f_reshape(inputs_[2:])
assert_output_properties_box(
x_,
y_ref,
h_,
g_,
z_[:, 0],
z_[:, 1],
u_c_,
w_u_,
b_u_,
l_c_,
w_l_,
b_l_,
"reshape_{}_{}_{}".format(odd, m_0, m_1),
decimal=decimal,
)
K.set_floatx("float{}".format(32))
K.set_epsilon(eps)
# permute
@pytest.mark.parametrize(
"odd, m_0, m_1, mode, floatx",
[
(0, 0, 1, "hybrid", 32),
(0, 0, 1, "forward", 32),
(0, 0, 1, "ibp", 32),
(0, 0, 1, "hybrid", 64),
(0, 0, 1, "forward", 64),
(0, 0, 1, "ibp", 64),
(0, 0, 1, "hybrid", 16),
(0, 0, 1, "forward", 16),
(0, 0, 1, "ibp", 16),
],
)
def test_Decomon_permute_box(odd, m_0, m_1, mode, floatx):
K.set_floatx("float{}".format(floatx))
eps = K.epsilon()
decimal = 5
if floatx == 16:
K.set_epsilon(1e-2)
decimal = 1
inputs = get_tensor_decomposition_images_box("channels_last", odd)
inputs_ = get_standard_values_images_box("channels_last", odd, m0=m_0, m1=m_1)
x, y, z, u_c, W_u, b_u, l_c, W_l, b_l, h, g = inputs
x_ = inputs_[0]
z_ = inputs_[2]
n_dim = len(y.shape) - 1
target_shape = np.random.permutation(n_dim) + 1
target_shape_ = tuple([0] + list(target_shape))
y_ = np.transpose(inputs_[1], target_shape_)
monotonic_layer = DecomonPermute(target_shape, dc_decomp=True, mode=mode)
if mode == "hybrid":
output = monotonic_layer(inputs[2:])
if mode == "forward":
output = monotonic_layer([z, W_u, b_u, W_l, b_l, h, g])
if mode == "ibp":
output = monotonic_layer([u_c, l_c, h, g])
f_permute = K.function(inputs[2:], output)
if mode == "hybrid":
z_, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_, h_, g_ = f_permute(inputs_[2:])
if mode == "forward":
z_, w_u_, b_u_, w_l_, b_l_, h_, g_ = f_permute(inputs_[2:])
u_c_ = None
l_c_ = None
if mode == "ibp":
u_c_, l_c_, h_, g_ = f_permute(inputs_[2:])
w_u_, b_u_, w_l_, b_l_ = [None] * 4
assert_output_properties_box(
x_,
y_,
h_,
g_,
z_[:, 0],
z_[:, 1],
u_c_,
w_u_,
b_u_,
l_c_,
w_l_,
b_l_,
"reshape_{}_{}_{}".format(odd, m_0, m_1),
decimal=decimal,
)
K.set_floatx("float{}".format(32))
K.set_epsilon(eps)
@pytest.mark.parametrize(
"odd, m_0, m_1, mode, floatx",
[
(0, 0, 1, "hybrid", 32),
(0, 0, 1, "forward", 32),
(0, 0, 1, "ibp", 32),
(0, 0, 1, "hybrid", 64),
(0, 0, 1, "forward", 64),
(0, 0, 1, "ibp", 64),
(0, 0, 1, "hybrid", 16),
(0, 0, 1, "forward", 16),
(0, 0, 1, "ibp", 16),
],
)
def test_Decomon_permute_box_nodc(odd, m_0, m_1, mode, floatx):
K.set_floatx("float{}".format(floatx))
eps = K.epsilon()
decimal = 5
if floatx == 16:
K.set_epsilon(1e-2)
decimal = 1
inputs = get_tensor_decomposition_images_box("channels_last", odd, dc_decomp=False)
inputs_ = get_standard_values_images_box("channels_last", odd, m0=m_0, m1=m_1, dc_decomp=False)
x, y, z, u_c, W_u, b_u, l_c, W_l, b_l = inputs
x_ = inputs_[0]
z_ = inputs_[2]
n_dim = len(y.shape) - 1
target_shape = np.random.permutation(n_dim) + 1
target_shape_ = tuple([0] + list(target_shape))
y_ = np.transpose(inputs_[1], target_shape_)
monotonic_layer = DecomonPermute(target_shape, dc_decomp=False, mode=mode)
if mode == "hybrid":
output = monotonic_layer(inputs[2:])
if mode == "forward":
output = monotonic_layer([z, W_u, b_u, W_l, b_l])
if mode == "ibp":
output = monotonic_layer([u_c, l_c])
f_permute = K.function(inputs[2:], output)
if mode == "hybrid":
z_, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_ = f_permute(inputs_[2:])
if mode == "forward":
z_, w_u_, b_u_, w_l_, b_l_ = f_permute(inputs_[2:])
u_c_ = None
l_c_ = None
if mode == "ibp":
u_c_, l_c_ = f_permute(inputs_[2:])
w_u_, b_u_, w_l_, b_l_ = [None] * 4
assert_output_properties_box(
x_,
y_,
None,
None,
z_[:, 0],
z_[:, 1],
u_c_,
w_u_,
b_u_,
l_c_,
w_l_,
b_l_,
"reshape_{}_{}_{}".format(odd, m_0, m_1),
decimal=decimal,
)
K.set_floatx("float{}".format(32))
K.set_epsilon(eps)
@pytest.mark.parametrize(
"odd, m_0, m_1, shared, floatx",
[
(0, 0, 1, False, 32),
(0, 0, 1, True, 32),
(0, 0, 1, False, 64),
(0, 0, 1, True, 64),
(0, 0, 1, False, 16),
(0, 0, 1, True, 16),
],
)
def test_Decomon_permute_to_monotonic_box(odd, m_0, m_1, shared, floatx):
K.set_floatx("float{}".format(floatx))
eps = K.epsilon()
decimal = 4
if floatx == 16:
K.set_epsilon(1e-2)
decimal = 1
inputs = get_tensor_decomposition_images_box("channels_last", odd)
inputs_ = get_standard_values_images_box("channels_last", odd, m0=m_0, m1=m_1)
x, y, z, u_c, W_u, b_u, l_c, W_l, b_l, h, g = inputs
x_ = inputs_[0]
z_ = inputs_[2]
n_dim = len(y.shape) - 1
target_shape = np.random.permutation(n_dim) + 1
permute_ref = Permute(target_shape)
output_ref = permute_ref(inputs[1])
input_dim = x_.shape[-1]
monotonic_layer = to_monotonic(permute_ref, input_dim, dc_decomp=True, shared=shared)
output = monotonic_layer[0](inputs[2:])
if len(monotonic_layer) > 1:
output = monotonic_layer[1](output)
f_ref = K.function(inputs, output_ref)
f_permute = K.function(inputs[2:], output)
y_ref = f_ref(inputs_)
z_, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_, h_, g_ = f_permute(inputs_[2:])
assert_output_properties_box(
x_,
y_ref,
h_,
g_,
z_[:, 0],
z_[:, 1],
u_c_,
w_u_,
b_u_,
l_c_,
w_l_,
b_l_,
"reshape_{}_{}_{}".format(odd, m_0, m_1),
decimal=decimal,
)
K.set_floatx("float{}".format(32))
K.set_epsilon(eps)
| [
"tensorflow.keras.layers.Reshape",
"decomon.layers.decomon_layers.to_monotonic",
"decomon.layers.decomon_reshape.DecomonReshape",
"numpy.transpose",
"tensorflow.keras.layers.Permute",
"tensorflow.python.keras.backend.epsilon",
"tensorflow.python.keras.backend.function",
"numpy.reshape",
"decomon.lay... | [((471, 758), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""odd, m_0, m_1, mode, floatx"""', "[(0, 0, 1, 'hybrid', 32), (0, 0, 1, 'forward', 32), (0, 0, 1, 'ibp', 32), (\n 0, 0, 1, 'hybrid', 64), (0, 0, 1, 'forward', 64), (0, 0, 1, 'ibp', 64),\n (0, 0, 1, 'hybrid', 16), (0, 0, 1, 'forward', 16), (0, 0, 1, 'ibp', 16)]"], {}), "('odd, m_0, m_1, mode, floatx', [(0, 0, 1, 'hybrid',\n 32), (0, 0, 1, 'forward', 32), (0, 0, 1, 'ibp', 32), (0, 0, 1, 'hybrid',\n 64), (0, 0, 1, 'forward', 64), (0, 0, 1, 'ibp', 64), (0, 0, 1, 'hybrid',\n 16), (0, 0, 1, 'forward', 16), (0, 0, 1, 'ibp', 16)])\n", (494, 758), False, 'import pytest\n'), ((2638, 2925), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""odd, m_0, m_1, mode, floatx"""', "[(0, 0, 1, 'hybrid', 32), (0, 0, 1, 'forward', 32), (0, 0, 1, 'ibp', 32), (\n 0, 0, 1, 'hybrid', 64), (0, 0, 1, 'forward', 64), (0, 0, 1, 'ibp', 64),\n (0, 0, 1, 'hybrid', 16), (0, 0, 1, 'forward', 16), (0, 0, 1, 'ibp', 16)]"], {}), "('odd, m_0, m_1, mode, floatx', [(0, 0, 1, 'hybrid',\n 32), (0, 0, 1, 'forward', 32), (0, 0, 1, 'ibp', 32), (0, 0, 1, 'hybrid',\n 64), (0, 0, 1, 'forward', 64), (0, 0, 1, 'ibp', 64), (0, 0, 1, 'hybrid',\n 16), (0, 0, 1, 'forward', 16), (0, 0, 1, 'ibp', 16)])\n", (2661, 2925), False, 'import pytest\n'), ((4807, 5003), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""odd, m_0, m_1, shared, floatx"""', '[(0, 0, 1, False, 32), (0, 0, 1, True, 32), (0, 0, 1, False, 64), (0, 0, 1,\n True, 64), (0, 0, 1, False, 16), (0, 0, 1, True, 16)]'], {}), "('odd, m_0, m_1, shared, floatx', [(0, 0, 1, False, \n 32), (0, 0, 1, True, 32), (0, 0, 1, False, 64), (0, 0, 1, True, 64), (0,\n 0, 1, False, 16), (0, 0, 1, True, 16)])\n", (4830, 5003), False, 'import pytest\n'), ((6465, 6752), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""odd, m_0, m_1, mode, floatx"""', "[(0, 0, 1, 'hybrid', 32), (0, 0, 1, 'forward', 32), (0, 0, 1, 'ibp', 32), (\n 0, 0, 1, 'hybrid', 64), (0, 0, 1, 'forward', 64), (0, 0, 1, 'ibp', 64),\n (0, 0, 1, 'hybrid', 16), (0, 0, 1, 'forward', 16), (0, 0, 1, 'ibp', 16)]"], {}), "('odd, m_0, m_1, mode, floatx', [(0, 0, 1, 'hybrid',\n 32), (0, 0, 1, 'forward', 32), (0, 0, 1, 'ibp', 32), (0, 0, 1, 'hybrid',\n 64), (0, 0, 1, 'forward', 64), (0, 0, 1, 'ibp', 64), (0, 0, 1, 'hybrid',\n 16), (0, 0, 1, 'forward', 16), (0, 0, 1, 'ibp', 16)])\n", (6488, 6752), False, 'import pytest\n'), ((8544, 8831), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""odd, m_0, m_1, mode, floatx"""', "[(0, 0, 1, 'hybrid', 32), (0, 0, 1, 'forward', 32), (0, 0, 1, 'ibp', 32), (\n 0, 0, 1, 'hybrid', 64), (0, 0, 1, 'forward', 64), (0, 0, 1, 'ibp', 64),\n (0, 0, 1, 'hybrid', 16), (0, 0, 1, 'forward', 16), (0, 0, 1, 'ibp', 16)]"], {}), "('odd, m_0, m_1, mode, floatx', [(0, 0, 1, 'hybrid',\n 32), (0, 0, 1, 'forward', 32), (0, 0, 1, 'ibp', 32), (0, 0, 1, 'hybrid',\n 64), (0, 0, 1, 'forward', 64), (0, 0, 1, 'ibp', 64), (0, 0, 1, 'hybrid',\n 16), (0, 0, 1, 'forward', 16), (0, 0, 1, 'ibp', 16)])\n", (8567, 8831), False, 'import pytest\n'), ((10625, 10821), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""odd, m_0, m_1, shared, floatx"""', '[(0, 0, 1, False, 32), (0, 0, 1, True, 32), (0, 0, 1, False, 64), (0, 0, 1,\n True, 64), (0, 0, 1, False, 16), (0, 0, 1, True, 16)]'], {}), "('odd, m_0, m_1, shared, floatx', [(0, 0, 1, False, \n 32), (0, 0, 1, True, 32), (0, 0, 1, False, 64), (0, 0, 1, True, 64), (0,\n 0, 1, False, 16), (0, 0, 1, True, 16)])\n", (10648, 10821), False, 'import pytest\n'), ((950, 961), 'tensorflow.python.keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (959, 961), True, 'import tensorflow.python.keras.backend as K\n'), ((1522, 1567), 'numpy.reshape', 'np.reshape', (['inputs_[1]', '(-1, target_shape[0])'], {}), '(inputs_[1], (-1, target_shape[0]))\n', (1532, 1567), True, 'import numpy as np\n'), ((1591, 1646), 'decomon.layers.decomon_reshape.DecomonReshape', 'DecomonReshape', (['target_shape'], {'dc_decomp': '(True)', 'mode': 'mode'}), '(target_shape, dc_decomp=True, mode=mode)\n', (1605, 1646), False, 'from decomon.layers.decomon_reshape import DecomonPermute, DecomonReshape\n'), ((1900, 1930), 'tensorflow.python.keras.backend.function', 'K.function', (['inputs[2:]', 'output'], {}), '(inputs[2:], output)\n', (1910, 1930), True, 'import tensorflow.python.keras.backend as K\n'), ((2616, 2634), 'tensorflow.python.keras.backend.set_epsilon', 'K.set_epsilon', (['eps'], {}), '(eps)\n', (2629, 2634), True, 'import tensorflow.python.keras.backend as K\n'), ((3122, 3133), 'tensorflow.python.keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (3131, 3133), True, 'import tensorflow.python.keras.backend as K\n'), ((3722, 3767), 'numpy.reshape', 'np.reshape', (['inputs_[1]', '(-1, target_shape[0])'], {}), '(inputs_[1], (-1, target_shape[0]))\n', (3732, 3767), True, 'import numpy as np\n'), ((3791, 3847), 'decomon.layers.decomon_reshape.DecomonReshape', 'DecomonReshape', (['target_shape'], {'dc_decomp': '(False)', 'mode': 'mode'}), '(target_shape, dc_decomp=False, mode=mode)\n', (3805, 3847), False, 'from decomon.layers.decomon_reshape import DecomonPermute, DecomonReshape\n'), ((4089, 4119), 'tensorflow.python.keras.backend.function', 'K.function', (['inputs[2:]', 'output'], {}), '(inputs[2:], output)\n', (4099, 4119), True, 'import tensorflow.python.keras.backend as K\n'), ((4785, 4803), 'tensorflow.python.keras.backend.set_epsilon', 'K.set_epsilon', (['eps'], {}), '(eps)\n', (4798, 4803), True, 'import tensorflow.python.keras.backend as K\n'), ((5189, 5200), 'tensorflow.python.keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (5198, 5200), True, 'import tensorflow.python.keras.backend as K\n'), ((5600, 5621), 'tensorflow.keras.layers.Reshape', 'Reshape', (['target_shape'], {}), '(target_shape)\n', (5607, 5621), False, 'from tensorflow.keras.layers import Reshape, Permute\n'), ((5714, 5781), 'decomon.layers.decomon_layers.to_monotonic', 'to_monotonic', (['reshape_ref', 'input_dim'], {'dc_decomp': '(True)', 'shared': 'shared'}), '(reshape_ref, input_dim, dc_decomp=True, shared=shared)\n', (5726, 5781), False, 'from decomon.layers.decomon_layers import to_monotonic\n'), ((5917, 5947), 'tensorflow.python.keras.backend.function', 'K.function', (['inputs', 'output_ref'], {}), '(inputs, output_ref)\n', (5927, 5947), True, 'import tensorflow.python.keras.backend as K\n'), ((5965, 5995), 'tensorflow.python.keras.backend.function', 'K.function', (['inputs[2:]', 'output'], {}), '(inputs[2:], output)\n', (5975, 5995), True, 'import tensorflow.python.keras.backend as K\n'), ((6431, 6449), 'tensorflow.python.keras.backend.set_epsilon', 'K.set_epsilon', (['eps'], {}), '(eps)\n', (6444, 6449), True, 'import tensorflow.python.keras.backend as K\n'), ((6944, 6955), 'tensorflow.python.keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (6953, 6955), True, 'import tensorflow.python.keras.backend as K\n'), ((7436, 7475), 'numpy.transpose', 'np.transpose', (['inputs_[1]', 'target_shape_'], {}), '(inputs_[1], target_shape_)\n', (7448, 7475), True, 'import numpy as np\n'), ((7499, 7554), 'decomon.layers.decomon_reshape.DecomonPermute', 'DecomonPermute', (['target_shape'], {'dc_decomp': '(True)', 'mode': 'mode'}), '(target_shape, dc_decomp=True, mode=mode)\n', (7513, 7554), False, 'from decomon.layers.decomon_reshape import DecomonPermute, DecomonReshape\n'), ((7806, 7836), 'tensorflow.python.keras.backend.function', 'K.function', (['inputs[2:]', 'output'], {}), '(inputs[2:], output)\n', (7816, 7836), True, 'import tensorflow.python.keras.backend as K\n'), ((8522, 8540), 'tensorflow.python.keras.backend.set_epsilon', 'K.set_epsilon', (['eps'], {}), '(eps)\n', (8535, 8540), True, 'import tensorflow.python.keras.backend as K\n'), ((9028, 9039), 'tensorflow.python.keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (9037, 9039), True, 'import tensorflow.python.keras.backend as K\n'), ((9548, 9587), 'numpy.transpose', 'np.transpose', (['inputs_[1]', 'target_shape_'], {}), '(inputs_[1], target_shape_)\n', (9560, 9587), True, 'import numpy as np\n'), ((9611, 9667), 'decomon.layers.decomon_reshape.DecomonPermute', 'DecomonPermute', (['target_shape'], {'dc_decomp': '(False)', 'mode': 'mode'}), '(target_shape, dc_decomp=False, mode=mode)\n', (9625, 9667), False, 'from decomon.layers.decomon_reshape import DecomonPermute, DecomonReshape\n'), ((9907, 9937), 'tensorflow.python.keras.backend.function', 'K.function', (['inputs[2:]', 'output'], {}), '(inputs[2:], output)\n', (9917, 9937), True, 'import tensorflow.python.keras.backend as K\n'), ((10603, 10621), 'tensorflow.python.keras.backend.set_epsilon', 'K.set_epsilon', (['eps'], {}), '(eps)\n', (10616, 10621), True, 'import tensorflow.python.keras.backend as K\n'), ((11007, 11018), 'tensorflow.python.keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (11016, 11018), True, 'import tensorflow.python.keras.backend as K\n'), ((11456, 11477), 'tensorflow.keras.layers.Permute', 'Permute', (['target_shape'], {}), '(target_shape)\n', (11463, 11477), False, 'from tensorflow.keras.layers import Reshape, Permute\n'), ((11570, 11637), 'decomon.layers.decomon_layers.to_monotonic', 'to_monotonic', (['permute_ref', 'input_dim'], {'dc_decomp': '(True)', 'shared': 'shared'}), '(permute_ref, input_dim, dc_decomp=True, shared=shared)\n', (11582, 11637), False, 'from decomon.layers.decomon_layers import to_monotonic\n'), ((11773, 11803), 'tensorflow.python.keras.backend.function', 'K.function', (['inputs', 'output_ref'], {}), '(inputs, output_ref)\n', (11783, 11803), True, 'import tensorflow.python.keras.backend as K\n'), ((11821, 11851), 'tensorflow.python.keras.backend.function', 'K.function', (['inputs[2:]', 'output'], {}), '(inputs[2:], output)\n', (11831, 11851), True, 'import tensorflow.python.keras.backend as K\n'), ((12287, 12305), 'tensorflow.python.keras.backend.set_epsilon', 'K.set_epsilon', (['eps'], {}), '(eps)\n', (12300, 12305), True, 'import tensorflow.python.keras.backend as K\n'), ((1007, 1026), 'tensorflow.python.keras.backend.set_epsilon', 'K.set_epsilon', (['(0.01)'], {}), '(0.01)\n', (1020, 1026), True, 'import tensorflow.python.keras.backend as K\n'), ((1490, 1510), 'numpy.prod', 'np.prod', (['y.shape[1:]'], {}), '(y.shape[1:])\n', (1497, 1510), True, 'import numpy as np\n'), ((3179, 3198), 'tensorflow.python.keras.backend.set_epsilon', 'K.set_epsilon', (['(0.01)'], {}), '(0.01)\n', (3192, 3198), True, 'import tensorflow.python.keras.backend as K\n'), ((3690, 3710), 'numpy.prod', 'np.prod', (['y.shape[1:]'], {}), '(y.shape[1:])\n', (3697, 3710), True, 'import numpy as np\n'), ((5246, 5265), 'tensorflow.python.keras.backend.set_epsilon', 'K.set_epsilon', (['(0.01)'], {}), '(0.01)\n', (5259, 5265), True, 'import tensorflow.python.keras.backend as K\n'), ((5558, 5578), 'numpy.prod', 'np.prod', (['y.shape[1:]'], {}), '(y.shape[1:])\n', (5565, 5578), True, 'import numpy as np\n'), ((7001, 7020), 'tensorflow.python.keras.backend.set_epsilon', 'K.set_epsilon', (['(0.01)'], {}), '(0.01)\n', (7014, 7020), True, 'import tensorflow.python.keras.backend as K\n'), ((7341, 7369), 'numpy.random.permutation', 'np.random.permutation', (['n_dim'], {}), '(n_dim)\n', (7362, 7369), True, 'import numpy as np\n'), ((9085, 9104), 'tensorflow.python.keras.backend.set_epsilon', 'K.set_epsilon', (['(0.01)'], {}), '(0.01)\n', (9098, 9104), True, 'import tensorflow.python.keras.backend as K\n'), ((9453, 9481), 'numpy.random.permutation', 'np.random.permutation', (['n_dim'], {}), '(n_dim)\n', (9474, 9481), True, 'import numpy as np\n'), ((11064, 11083), 'tensorflow.python.keras.backend.set_epsilon', 'K.set_epsilon', (['(0.01)'], {}), '(0.01)\n', (11077, 11083), True, 'import tensorflow.python.keras.backend as K\n'), ((11404, 11432), 'numpy.random.permutation', 'np.random.permutation', (['n_dim'], {}), '(n_dim)\n', (11425, 11432), True, 'import numpy as np\n')] |
import functions as fun
import exceptions as exc
import matplotlib.pyplot as plt
import numpy as np
"""This file tests multiple functions used in the algorithm."""
def testStdevAndMeanOfWholeImage(image,area):
'''Calculates the mean and the standard deviation of an image in a sampling window from 0 to the value entered as area.
The results are printed for each matrix to verify is they are the same whatever the value of the sampling window.
If the entered image is a black image, then the standard deviation and the mean is 0, whatever the size of the sampling window is.
If the entered image is white, then the standard deviation is 0 for all pixel and the mean is 255, whatever the size of the sampling window.'''
testImage = fun.image(image)
i = 0
while i <= area:
stdev, mean = fun.stdevAndMeanWholeImage(image=testImage, samplingWindow=i)
print(i)
print("STDEV : {}".format(stdev))
print("MEAN : {}".format(mean))
i += 1
return
def testAbsAndSum(shape1, shape2):
# 1 : Produce an image with values of -1 everywhere
testImage = np.zeros(shape=(shape1, shape2))
element1 = 0
element2 = 0
while element1 < testImage.shape[0]:
while element2 < testImage.shape[1]:
testImage[element1][element2] = -1
element2 += 1
element2 =0
element1 += 1
# Step 2 : Make a manual sum and use the function. Compare the two values.
manualSum = np.sum(np.absolute(testImage))
functionSum = fun.absSumAllPixels(testImage)
exc.areValuesEqual(value1=manualSum, value2=functionSum)
return
if __name__ == "__main__":
#testStdevAndMeanOfWholeImage(image="/Users/valeriepineaunoel/Documents/HiLo-Python/Data/testWhiteImage.tif", area=7)
testAbsAndSum(10,10)
| [
"numpy.absolute",
"functions.stdevAndMeanWholeImage",
"exceptions.areValuesEqual",
"numpy.zeros",
"functions.absSumAllPixels",
"functions.image"
] | [((740, 756), 'functions.image', 'fun.image', (['image'], {}), '(image)\n', (749, 756), True, 'import functions as fun\n'), ((1061, 1093), 'numpy.zeros', 'np.zeros', ([], {'shape': '(shape1, shape2)'}), '(shape=(shape1, shape2))\n', (1069, 1093), True, 'import numpy as np\n'), ((1423, 1453), 'functions.absSumAllPixels', 'fun.absSumAllPixels', (['testImage'], {}), '(testImage)\n', (1442, 1453), True, 'import functions as fun\n'), ((1455, 1511), 'exceptions.areValuesEqual', 'exc.areValuesEqual', ([], {'value1': 'manualSum', 'value2': 'functionSum'}), '(value1=manualSum, value2=functionSum)\n', (1473, 1511), True, 'import exceptions as exc\n'), ((798, 859), 'functions.stdevAndMeanWholeImage', 'fun.stdevAndMeanWholeImage', ([], {'image': 'testImage', 'samplingWindow': 'i'}), '(image=testImage, samplingWindow=i)\n', (824, 859), True, 'import functions as fun\n'), ((1384, 1406), 'numpy.absolute', 'np.absolute', (['testImage'], {}), '(testImage)\n', (1395, 1406), True, 'import numpy as np\n')] |
import numpy
num = int(input('Digite um número para calcular seu fatorial: '))
print('Calculando {}! = {}'.format(num, num), end = ' x ')
list = [num, ]
while num != 1:
num = num - 1
list.append(num)
print(num, end=' ')
print(' x' if num > 1 else ' = ', end = ' ')
resultado = numpy.prod(list)
print(resultado)
| [
"numpy.prod"
] | [((302, 318), 'numpy.prod', 'numpy.prod', (['list'], {}), '(list)\n', (312, 318), False, 'import numpy\n')] |
# Despy: A discrete event simulation framework for Python
# Version 0.1
# Released under the MIT License (MIT)
# Copyright (c) 2015, <NAME>
"""
*********************
despy.model.simulation
*********************
.. autosummary::
Simulation
FutureEvent
NoEventsRemainingError
.. todo
Modify run and resume methods to accept triggers as parameters.
Have _set_triggers compare current time to until parameter.
Update documentation to state that seed can raise a TypeError.
Revise internal function names -- get rid of underscore prefix and
replace with "dp_" prefix to indicate an internal framework
method.
Write test for resume_on_next_rep parameter.
Change name of despy.stats.random to avoid clash with library module.
Move statistic module to stats package?
Add Simulation log that records time of initialization, setup,
errors, etc.
"""
from heapq import heappush, heappop
from itertools import count
import datetime, random
from collections import namedtuple, OrderedDict
import numpy as np
from despy.session import Session
from despy.output.results import Results
# from despy.output.report import Datatype
from despy.fel.event import Priority
from despy.model.trigger import AbstractTrigger, TimeTrigger
from despy.output.counter import Counter
import despy.output.console as console
class NoEventsRemainingError(Exception):
"""Raised by despy.simulation._step() when FEL is empty.
"""
pass
class FutureEvent(namedtuple('FutureEventTuple',
['time', 'event', 'priority'])):
"""A event that has been placed on the future event list (FEL).
Every item on the FEL must be an instance of FutureEvent. A
FutureEvent consists of the event, the scheduled time, and priority.
**Properties**
* :attr:`time`: The time that the event is scheduled for
execution. Type: a non-negative integer.
* :attr:`event`: An instance of
:class:`despy.model.event.Event`.
* :attr:`priority`: A priority constant from the
:mod:`despy.base.named_object2` module, or an integer between
-4 and +4, inclusive.
"""
#
#
# class Dispatcher():
# def __init__(self):
# self._session = Session()
# self._con = Console()
#
# def send_data(self, key, data, label = None):
# processed_label = self._session.results.set_value(key, data, label)
# self._con.display(processed_label, data)
#
# def announce_phase(self, phase):
# self._con.display_header(phase)
#
# def announce(self, message):
# self._con.display_message(message)
class Simulation():
"""Schedule events and manage the future event list (FEL).
Every Despy simulation must have one instance of the
``Simulation`` class. The ``Simulation`` class initializes the
top-level model and its components, manages the simulation
clock and FEL, and executes events.
**Properties**
.. autosummary::
session
config
model
rep
now
event
pri
triggers
run_start_time
run_stop_time
**Public Methods**
.. autosummary::
reset
add_trigger
initialize
finalize
peek
schedule
run
irun
irunf
runf
add_message
get_data
**Private Methods**
.. autosummary::
_setup
_teardown
_set_triggers
_step
_check_triggers
"""
def __init__(self, model = None, config = None):
"""Creates a Simulation object.
*Arguments*
``model:`` (:class:`despy.model.component`)
Optional. Assigns a model to the simulation. If omitted,
designer must assign the model using the 'Simulation.model'
property before initializing the simulation.
``config:`` (:class:`despy.session.config`)
Optional. Config object contains numerous simulation parameters.
If omitted, a config object is created automatically with
default settings. Configuration options can be set or read via
either the 'Simulation.config' or 'Session.config' properties.
"""
self._session = Session()
self._session.sim = self
if model is not None:
self._session.model = model
if config is not None:
self._session.config = config
self._rep = 0
# self.dispatcher = Dispatcher()
self.reset()
def reset(self):
"""Resets the simulation to its initial state.
Clears triggers, sets current rep to 0, sets time to the
initial time, and clears the FEL and traces. Note that reset
does not reset the model to its initial condition.
"""
self._triggers = OrderedDict()
self._rep = 0
self._setups = 0
self._evt = None
self._now = self._session.config.initial_time * 10
self._pri = 0
self._futureEventList = []
self._counter = count()
self.results = Results(self)
self.results.stats["event_counter"] = Counter("event_counter")
@property
def session(self):
"""Returns the current Session object. Read-only.
*Type:* :class:`despy.session.Session`
"""
return self._session
@property
def config(self):
"""The assigned :class:`despy.session.Config` object.
"""
return self._session.config
@config.setter
def config(self, config):
self._session.config = config
@property
def model(self):
"""The model that is assigned to the Simulation object.
*Type:* :class:`despy.model.component.Component`
"""
return self._session.model
@model.setter
def model(self, model):
self._session.model = model
@property
def rep(self):
"""Integer representing the current replication. Read-only.
Starts at zero, i.e., rep = 0 for the first replication.
"""
return self._rep
@property
def now(self):
"""The current time for the current replication.
*Type:* Integer
By default, a simulation starts at time zero and continues until no
events remain on the FEL or until the simulation detects a stop
condition. The unit of time represented by this integer has no impact
on the simulation.
Internally, Despy multiplies the time by a factor of ten and
each _step in the simulation is a multiple of ten. This allows
assignment of priorities to each event. For example, for events
scheduled to run at time now = 4 (internal time = 40),
``Priority.EARLY`` events would be scheduled to run at time 39,
``Priority.DEFAULT`` events at time 40, and ``Priority.LATE``
events at time 41. Despy would indicate that all of these events
occurred at time = 4 in standard reports and output. This
practice simplifies the run() method because
events are placed on the FEL in the order that they will
actually be run, taking priorities into account.
"""
return int(self._now / 10)
@now.setter
def now(self, time):
self._now = time * 10
@property
def event(self):
"""The event that is currently being executed by the simulation.
*Type:* :class:`despy.event.Event`
The event property equals 'None' except when an event's do_event()
method is executing (see Simulation._step() method).
"""
return self._evt
@property
def pri(self):
"""The priority of the current or most recently completed event.
*Type:* Integer
"""
return self._pri
@property
def triggers(self):
"""Ordered Dictionary containing simulation triggers.
*Type:* {:class:`despy.model.trigger.AbstractTrigger`}
A trigger is a method that will run whenever certain conditions are met.
The simulation checks triggers after every event to see if the
trigger conditions (runs AbstractTrigger.check())are met and if so,
executes the trigger (runs AbstractTrigger.pull().
"""
return self._triggers
#
# def display(self, data):
# self._con.display(data)
def add_trigger(self, key, trigger):
err_msg = ("{0} object provided to Simulation.add_trigger() "
"method must be a subclass of "
"despy.model.trigger.Trigger or registered as a "
"subclass using the Trigger.register() method")
if issubclass(trigger.__class__, AbstractTrigger):
self.triggers[key] = trigger
else:
raise TypeError(err_msg.format(repr(trigger)))
def initialize(self):
"""Initializes all model components and seeds random number generators.
The designer calls the initialize() method once, prior to running the
simulation, regardless of the number of simulation replications. Code
for setting up initial conditions for each replication should be placed
in the model or component's setup() method, which will be called
automatically by the Simulation.run() method.
Designers may explicitly call initialize() method, or implicitly by
by calling Simulation.irun() or simulation.irunf().
"""
console.display_header("Initializing")
np.random.seed(self._session.config.seed)
random.seed(self._session.config.seed)
self.results.set_value('seed', self.config.seed)
self._now = self._session.config.initial_time * 10
self.results.set_value('initial_time', self.now)
self.model.dp_initialize()
console.display_message("All Components Initialized.")
def _setup(self):
"""Resets simulation for the next rep and calls model setup() methods.
Called automatically by Simulation.run() method at the beginning of
each replication.
* Clears the FEL
* Resets counters.
* Resets time to config.initial_time.
* Calls every model component's setup() method.
"""
console.display_header("Setup Rep #{} ".format(self.rep))
if self.rep > 0:
self._now = self._session.config.initial_time * 10
self._pri = 0
self._futureEventList = []
self._counter = count()
self._session.model.dp_setup()
for _, stat in self.results.stats.items():
stat.setup()
def _teardown(self):
"""Calls all Component.teardown() methods at the end of each rep.
"""
console.display_header("Teardown Rep #{} ".format(self.rep))
self._session.model.dp_teardown(self.now)
for _, stat in self.results.stats.items():
stat.teardown()
def finalize(self):
"""Calls Component.finalize() methods, returns a results object.
*Returns:* :class:`despy.output.results`
The designer can call finalize() explicitly following the run() method,
or the designer can call finalize implicitly by calling irunf() or
runf().
"""
console.display_header("Finalizing")
self._session.model.dp_finalize()
for _, stat in self.results.stats.items():
stat.finalize()
self.results.set_full_path()
self._session.results = self.results
return self.results
def peek(self, prioritized=True):
"""Return the time of the next scheduled event.
*Arguments*
prioritized (Boolean):
If ``True``, the time will reflect the event's priority.
For example, for a ``Priority.EARLY`` event scheduled to
run at time = 25, ``peek`` will return 24.9 if the
``prioritized`` attribute is set to ``True``. If
``False``, then our example would return 25, the nominal
scheduled time. The default value is ``True``.
*Returns*
An integer or float value if the FEL contains events.
Infinity if there are no remaining events.
"""
try:
if prioritized:
return int((self._futureEventList[0].time - \
self._futureEventList[0].priority) / 10)
else:
return self._futureEventList[0].time / 10
except IndexError:
return float('Infinity')
def schedule(self, event, delay=0, priority=Priority.STANDARD):
""" Add an event to the FEL.
*Arguments*
event (:class:`despy.event.Event`):
An instance or subclass of the ``Event`` class.
delay (integer):
A non-negative integer that defaults to zero. If zero,
the event will be scheduled to occur immediately.
priority (integer)
An attribute of the
:class:`despy.event.Priority` enumeration, or
an integer ranging from -5 to +5. The default is
``Priority.STANDARD``, which is equivalent to
zero.
"""
# Ensures delay value is always an integer.
delay = round(delay)
# Places a tuple onto the FEL, consisting of the event time, ID,
# and event object.
scheduleTime = self._now + (delay * 10) + priority
heappush(self._futureEventList,
FutureEvent(time=scheduleTime, event=event,
priority=priority))
def run(self, until=None, resume_on_next_rep = False):
""" Execute events on the FEL until reaching a stop condition.
The ``run`` method will advance simulation time and execute each
replication until the FEL is empty or until the time specified
in the ``until`` parameter is reached.
*Arguments*
until (integer):
A non-negative integer specifying the simulation time
at which the simulation will stop. Defaults to 'None',
meaning the simulation will run until there are no
remaining events on the FEL. The events at the time
specified in ``until`` will be executed. For example, if
until = 100 and there are events scheduled at time
100, those events will be executed, but events at time
101 or later will not.
resume_on_next_rep (Boolean):
Default is false. When resuming a simulation, if
resume_on_next_rep is 'True', the simulation will skip
any remaining events in the current rep and skip to the
next rep.
"""
console.display_header("Running")
self._set_triggers(until)
run_start_time = datetime.datetime.today()
self.results.set_value('run_start_time', run_start_time,
overwrite = True)
if resume_on_next_rep:
self._rep += 1
start_rep = self._rep
for rep in range(start_rep, self._session.config.reps):
self._rep = rep
if self._setups <= self._rep:
self._setup()
self._setups += 1
# Step through events on FEL and check triggers.
continue_rep = True
while continue_rep:
try:
self._step()
except NoEventsRemainingError:
break
continue_rep = self._check_triggers()
# Finalize model and setup for next replication
self._teardown()
console.display_header("Simulation Completed")
run_stop_time = datetime.datetime.today()
self.results.set_value('run_stop_time', run_stop_time,
overwrite = True)
self.results.set_value('elapsed_time', run_stop_time - run_start_time,
overwrite = True)
def _set_triggers(self, until):
"""Sets a TimeTrigger that ends the simulation at time = until.
*Arguments*
until (integer):
A non-negative integer specifying the simulation time
at which the simulation will stop. If 'None', deletes
any existing TimeTriggers.
"""
if until is None:
try:
del self.triggers["dp_untilTrigger"]
except KeyError:
pass
elif (until > 0):
self.add_trigger("dp_untilTrigger", TimeTrigger(until))
else:
raise AttributeError("Simulation.run() until argument "
"should be None or integer > 0. {} "
"passed instead".format(until))
def _step(self):
"""Advance simulation time and execute the next event.
*Raises*
NoEventsRemaining:
Occurs if no more events are scheduled on the FEL.
*Returns*
:class:`despy.simulation.FutureEvent`
"""
# Get next event from FEL and advance current simulation time.
try:
fel_item = heappop(self._futureEventList)
except IndexError:
raise NoEventsRemainingError
else:
self.now = int((fel_item.time - \
fel_item.priority) / 10)
self._pri = fel_item.priority
# Run event
self._evt = fel_item.event
fel_item.event.dp_do_event()
self._evt = None
self.results.stats["event_counter"].increment()
return fel_item
def _check_triggers(self):
"""Checks all simulation triggers, returning False ends rep.
*Returns*
Boolean. True if replication should continue, False
otherwise.
"""
continue_rep = True
for _, trigger in self.triggers.items():
if trigger.check():
if not trigger.pull():
continue_rep = False
break
return continue_rep
def irun(self, until = None):
"""Initializes and runs the simulation, but does not finalize.
*Arguments*
until (integer):
A non-negative integer specifying the simulation time
at which the simulation will stop. Defaults to 'None'.
See Simulation.run() for additional information.
"""
self.initialize()
self.run(until = until)
def irunf(self, until = None):
"""Initializes, runs, and finalizes the simulation.
*Arguments*
until (integer):
A non-negative integer specifying the simulation time
at which the simulation will stop. Defaults to 'None'.
See Simulation.run() for additional information.
"""
self.initialize()
self.run(until = until)
return self.finalize()
def runf(self, until = None, resume_on_next_rep = False):
"""Runs and finalizes the simulation.
*Arguments*
until (integer):
A non-negative integer specifying the simulation time
at which the simulation will stop. Defaults to 'None'.
See Simulation.run() for additional information.
"""
self.run(until = until, resume_on_next_rep = resume_on_next_rep)
return self.finalize()
def add_message(self, message, fields):
"""Add a message to the trace report.
*Arguments:*
`message` (String)
A short message that will be saved to the Trace.
`fields` (Python dictionary)
Custom fields that will be added to the TraceRecord.
Optional. Defaults to None.
"""
if self.event is None:
self.results.trace.add_message(message, fields)
else:
self.event.add_message(message, fields)
#
# def get_data(self):
# """ Get a Python list with simulation parameters and results.
#
# The despy.output.results.write_files() method calls the get_data
# method of the simulation and model objects and places the data
# in the simulation report. The method will also call the user-
# defined get_data methods from all of the model components.
#
# *Returns*
# A Python list of tuples. The first item of each tuple is
# a member of the :class:`despy.output.datatype.Datatype`
# enumeration, which describes the structure of the data
# item (e.g., paragraph, list, etc.).
#
# """
# output = [(Datatype.title, "Simulation"),
# (Datatype.param_list,
# [('Generator Folder',
# self._session.config.folder_basename),
# ('Seed', self.results.seed),
# ('Start Time', self.results.run_start_time),
# ('Stop Time', self.results.run_stop_time),
# ('Elapsed Time', self.results.elapsed_time)])
# ]
# return output | [
"numpy.random.seed",
"datetime.datetime.today",
"despy.model.trigger.TimeTrigger",
"itertools.count",
"heapq.heappop",
"despy.output.results.Results",
"despy.output.console.display_header",
"collections.namedtuple",
"random.seed",
"despy.output.console.display_message",
"collections.OrderedDict"... | [((1539, 1600), 'collections.namedtuple', 'namedtuple', (['"""FutureEventTuple"""', "['time', 'event', 'priority']"], {}), "('FutureEventTuple', ['time', 'event', 'priority'])\n", (1549, 1600), False, 'from collections import namedtuple, OrderedDict\n'), ((4493, 4502), 'despy.session.Session', 'Session', ([], {}), '()\n', (4500, 4502), False, 'from despy.session import Session\n'), ((5098, 5111), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5109, 5111), False, 'from collections import namedtuple, OrderedDict\n'), ((5324, 5331), 'itertools.count', 'count', ([], {}), '()\n', (5329, 5331), False, 'from itertools import count\n'), ((5355, 5368), 'despy.output.results.Results', 'Results', (['self'], {}), '(self)\n', (5362, 5368), False, 'from despy.output.results import Results\n'), ((5415, 5439), 'despy.output.counter.Counter', 'Counter', (['"""event_counter"""'], {}), "('event_counter')\n", (5422, 5439), False, 'from despy.output.counter import Counter\n'), ((9891, 9929), 'despy.output.console.display_header', 'console.display_header', (['"""Initializing"""'], {}), "('Initializing')\n", (9913, 9929), True, 'import despy.output.console as console\n'), ((9955, 9996), 'numpy.random.seed', 'np.random.seed', (['self._session.config.seed'], {}), '(self._session.config.seed)\n', (9969, 9996), True, 'import numpy as np\n'), ((10005, 10043), 'random.seed', 'random.seed', (['self._session.config.seed'], {}), '(self._session.config.seed)\n', (10016, 10043), False, 'import datetime, random\n'), ((10290, 10344), 'despy.output.console.display_message', 'console.display_message', (['"""All Components Initialized."""'], {}), "('All Components Initialized.')\n", (10313, 10344), True, 'import despy.output.console as console\n'), ((11777, 11813), 'despy.output.console.display_header', 'console.display_header', (['"""Finalizing"""'], {}), "('Finalizing')\n", (11799, 11813), True, 'import despy.output.console as console\n'), ((15402, 15435), 'despy.output.console.display_header', 'console.display_header', (['"""Running"""'], {}), "('Running')\n", (15424, 15435), True, 'import despy.output.console as console\n'), ((15495, 15520), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (15518, 15520), False, 'import datetime, random\n'), ((16356, 16402), 'despy.output.console.display_header', 'console.display_header', (['"""Simulation Completed"""'], {}), "('Simulation Completed')\n", (16378, 16402), True, 'import despy.output.console as console\n'), ((16427, 16452), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (16450, 16452), False, 'import datetime, random\n'), ((10963, 10970), 'itertools.count', 'count', ([], {}), '()\n', (10968, 10970), False, 'from itertools import count\n'), ((17931, 17961), 'heapq.heappop', 'heappop', (['self._futureEventList'], {}), '(self._futureEventList)\n', (17938, 17961), False, 'from heapq import heappush, heappop\n'), ((17287, 17305), 'despy.model.trigger.TimeTrigger', 'TimeTrigger', (['until'], {}), '(until)\n', (17298, 17305), False, 'from despy.model.trigger import AbstractTrigger, TimeTrigger\n')] |
# coding: utf-8
# # Sampling High-Dimensional Vectors
# <NAME> (January 15, 2016)
# In[ ]:
import numpy as np
import pylab
try:
import seaborn as sns # optional; prettier graphs
except ImportError:
sns = None
import nengo
from nengolib.compat import get_activities
from nengolib.stats import ScatteredHypersphere, Sobol
uniform_ball = nengo.dists.UniformHypersphere(surface=False)
uniform_sphere = nengo.dists.UniformHypersphere(surface=True)
scatter_ball = ScatteredHypersphere(surface=False)
scatter_sphere = ScatteredHypersphere(surface=True)
# ## Abstract
#
# The **Monte Carlo (MC)** method of sampling is notoriously bad at reproducing the same statistics as the distribution being sampled.
# In[ ]:
def plot_dist(dist, title, num_samples=500):
pylab.figure(figsize=(4, 4))
pylab.title(title)
pylab.scatter(*dist.sample(num_samples, 2).T, s=10, alpha=0.7)
pylab.xlim(-1, 1)
pylab.ylim(-1, 1)
pylab.show()
plot_dist(uniform_ball, 'Uniform 2-Ball')
# Intuitively, MC sampling gives lots of "gaps" and "clumps", while instead what we want is more of a "**scattering**" of points uniformly about the sphere.
# In[ ]:
plot_dist(scatter_ball, 'Scattered 2-Ball')
# We currently have three reasons to sample vectors in Nengo:
# 1. Choosing the **encoders** for a population
# 2. Choosing the **evaluation points** to solve for decoders
# 3. Choosing the **semantic pointers** in a vocabulary
#
# MC is bad for problem 1, because the neurons should be uniformly representing all parts of the vector space.
# MC sampling is _also_ bad for problem 2, because the "**empirical distribution**" does not match the actual distribution unless there are a large number of samples, and so the decoders are biased to minimize the approximation error of certain vectors over others. A scattered distribution overcomes this problem by giving a closer match to the uniform distribution with fewer samples.
#
# In fact, problems 1 and 2 are basically equivalent. When sampling encoders, we are effectively choosing which vectors should have the least error (by principle (1) they fire the most, and then by principle (2) they contribute the most to the estimate). The only 'real' difference is that encoders are on the $D$-sphere, while evaluation points are on the $D$-ball. These two problems can be solved efficiently by the **number-theoretic method (NTM)**, also known as the **quasi Monte Carlo method**, to generate scattered points. These solutions can then be used to sample encoders and evaluation points, to improve the representation of a population and its decoders.
# In[ ]:
def do_trial(seed, encoders, eval_points, n_eval_points, test_points, n_test_points,
n_neurons, dims):
with nengo.Network(seed=seed) as model:
# Make a single ensemble and connection
ens = nengo.Ensemble(
n_neurons, dims, encoders=encoders, eval_points=eval_points,
n_eval_points=n_eval_points)
conn = nengo.Connection(ens, nengo.Node(size_in=dims))
# Build the model
built = nengo.builder.Model(decoder_cache=nengo.cache.NoDecoderCache())
built.build(model)
sim = nengo.Simulator(None, model=built)
# Find the optimal decoders and their corresponding RMSE on the eval_points
decoders = sim.data[conn].weights
eval_rmses = np.mean(sim.data[conn].solver_info['rmses'])
# Sample some new test_points and test them on the same decoders
x = test_points.sample(n_test_points, dims, rng=np.random.RandomState(seed))
a = get_activities(sim.model, ens, x)
x_hat = np.dot(a, decoders.T)
test_rmses = nengo.utils.numpy.rmse(x, x_hat, axis=1)
# Return the average training and test errors
return np.mean(eval_rmses), np.mean(test_rmses)
def do_experiment(n_neurons, dims, n_eval_points=500, test_points=uniform_ball,
n_test_points=500, trials=100):
fig, ax = pylab.subplots(1, 2, sharey=True, figsize=(15, 6))
ax[0].set_title('Train Error')
ax[1].set_title('Test Error')
default_means = None
for i, (label, encoders, eval_points) in enumerate((
('Default', uniform_sphere, uniform_ball),
('Encoders', scatter_sphere, uniform_ball),
('Eval Points', uniform_sphere, scatter_ball),
('Both', scatter_sphere, scatter_ball))):
errors = np.empty((trials, 2))
for seed in range(trials):
errors[seed] = do_trial(
seed, encoders, eval_points, n_eval_points,
test_points, n_test_points, n_neurons, dims)
means = np.mean(errors, axis=0)
if default_means is None:
default_means = means
for j in range(2):
l = '%s (%d%%)' % (label, default_means[j] / means[j] * 100)
if sns is None:
ax[j].hist(errors[:, j], label=l, lw=1, alpha=0.3)
else:
sns.kdeplot(errors[:, j], ax=ax[j], label=l, lw=4, alpha=0.6)
ax[0].legend()
ax[1].legend()
pylab.show()
do_experiment(n_neurons=100, dims=16)
# However, problem 3 is _strictly_ harder (and in fact almost completely different), and so we will save that talk for a later day.
#
# ## The Number-Theoretic Method (NTM)
#
# This exact same problem showed up as early as 1961 and was studied extensively in the 1980s [1] for applications in **experimental design** and **statistical finance**, in which the task boils down to evaluating a high-dimensional integral:
#
# $$\int_{S} f({\bf u})\,{\rm d}{\bf u}$$
#
# where $S$ is some $D$-dimensional space. Due to the **curse of dimensionality**, even modestly sized $D$ requires too many points to evaluate using standard numerical integration techniques like the trapezoidal rule. Instead, the standard approach is to choose a sub-sampling of **representative points** $\{{\bf x_1}, \ldots, {\bf x_N}\}$ over the domain of $S$, and compute:
#
# $$\approx \frac{1}{N}\,\sum_{i=1}^N f({\bf x_i})$$
#
# This works well as long as we can sample these points uniformly. It has been theoretically proven that the approximation error from using the NTM is superior to that of MC sampling.
#
# ---
#
# To make the connection back to our original problem explicit, when solving for the decoders we are minimizing the mean squared error given by:
#
# $$\int_{S} ({\bf x} - {\bf \hat{x}})^2 \,{\rm d}{\bf x}$$
#
# where $S$ is the $D$-ball. Thus, $f({\bf u}) = ({\bf u} - {\bf \hat{u}})^2$ is the function we need to integrate. And the points $\{{\bf x_1}, \ldots, {\bf x_N}\}$ are precisely the set of $N$ evaluation points that we are, in effect, choosing to approximate this integral. This explains more formally why this new approach out-performs the old approach.
#
# ---
#
# Now the NTM goes by a number of roughly equivalent names:
# - uniform design
# - NT-net
# - quasi Monte Carlo
# - quasi-random
# - low-discrepancy sequence
# - representative points
# - uniformly scattered points
#
# We will refer to the collective theory as the NTM, and to a specific sampling as a uniform scattering.
#
# There are many algorithms to generate scattered points in the literature:
# - Faure
# - Good Points
# - Good Lattice Points
# - ~~Latin Square~~
# - Haber
# - Halton (<NAME>)
# - Hammersley
# - <NAME> (cyclotomic field)
# - Niederreiter
# - ~~Poisson Disk~~
# - Sobol
#
# Since **Sobol** had the most readily-available Python library (and the largest Wikipedia page), I used it for all my experiments.
#
# All of these approaches (except the Latin Square and Poisson Disk sampling) attempt to minimize the **discrepancy** of the samples. Informally, the discrepancy measure tells us how much the sample distribution differs from the underlying distribution. Formally, we define the empirical distribution as the cumulative distribution function (CDF) $F_N({\bf x}) = P({\bf X} \le {\bf x})$, where:
#
# $$P({\bf X} = {\bf x_i}) = \frac{1}{N}, \quad i = 1 \ldots N$$
#
# Then the discrepancy of this set is defined as:
#
# $$D(N) = sup_{x \in \mathbb{R}^D} |F_N({\bf x}) - F({\bf x})|$$
#
# where $F$ is the true CDF. This gives an upper-bound on how well the empirical CDF approximates the true CDF. The lower this is, the better our sample set represents the true distribution at all points (not just the points that were sampled). And all of these approaches (again, except for Latin/Poisson) have worst-case (guaranteed) bounds that are asymptotically dominated by MC in theory (and for lower $N$ as well in practice).
#
# $$D(N) = \begin{cases}
# O(\frac{1}{\sqrt{N}}) & \text{MC Sampling} \\
# O(\frac{(log N)^D}{N}) & \text{NTM Sampling}
# \end{cases}$$
#
# The $\sqrt{N} = o(N)$ is the important part. The numerator is a worst-case that is reported to be a small constant in practice.
#
# The nice fact that comes out of all of this, is that: the discrepancy is related to the error when computing the above integral by a constant factor (fixed for a given $f$), and thus reflects the error in approximating the decoders' true RMSE (its generalizability).
#
# $$\left|\underbrace{\int_{S} ({\bf x} - {\bf \hat{x}})^2 \,{\rm d}{\bf x}}_{\text{Testing Error}} - \underbrace{\frac{1}{N}\,\sum_{i=1}^N ({\bf x_i} - {\bf \hat{x_i}})^2}_{\text{Training Error}} \right| \le \underbrace{C(f) \, D(N)}_{\text{Generalization Error}}$$
#
# where $C(f)$ is a constant that depends on the ensemble and the function being optimized, and $D(N)$ is the discrepancy of the $N$ evaluation points. When choosing evaluation points, we are fixing $C(f)$ and trying to minimize $D(N)$. Therefore, when using NTMs over MC sampling, we only need on the order of $\sqrt{N}$ as many evaluation points to get the same level of performance; NTM squares the effective number of evaluation points!
#
# Now, everything is fine and dandy. It's a simple one-line call in Python to generate a sequence that does exactly what we need. However... all of these methods generate points on the $D$-cube, but not the $D$-sphere or $D$-ball as required. Fortunately, [1] describes an **inverse transform method** which allows us to generate scattered points on any distribution provided we can represent ${\bf x}$ as a set of independent random variables with some known inverse CDF. Furthermore, the authors have already done this for the $D$-sphere and $D$-ball using the **spherical coordinate transformation**!
# ## The Inverse Transform Method
#
# It is not at all clear how to map scattered points from the $D$-cube to the $D$-sphere or $D$-ball. If we just try to normalize the vectors to the sphere, then the results are poor.
# In[ ]:
def plot_3d(xyz, title, s=10, alpha=0.7):
from mpl_toolkits.mplot3d import Axes3D
fig = pylab.figure(figsize=(7, 7))
ax = fig.add_subplot(111, projection='3d')
ax.set_title(title)
ax.scatter(*xyz.T, alpha=alpha, s=s)
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
ax.set_zlim(-1, 1)
ax.view_init(elev=35, azim=35)
pylab.show()
def normalize(x):
return x / nengo.utils.numpy.norm(x, axis=1, keepdims=True)
n_samples = 500
sample = Sobol().sample(n_samples, 3)
plot_3d(sample, 'Sobol 3-Cube')
plot_3d(normalize(sample - 0.5), 'Normalized Sobol 3-Cube')
# And it actually gets worse as the dimensionality goes up, because the volume of a $D$-cube is concentrated outside the region of the $D$-ball (and so vectors tend to get normalized to the corners)!
#
# The following procedure (referred to as the "inverse transform method") is a general way to sample an arbitrary multivariate random variable ${\bf X}$, using only the hyper-cube:
# 1. Pick a transformation $T$ that maps each ${\bf x}$ to a vector ${\bf y} = T({\bf x})$ such that its components are mutually independent (might be identity).
# 2. Given the CDF of ${\bf X}$ and the chosen transformation, determine the CDF $F$ of ${\bf Y}$ by a substitution of variables (hard part).
# 3. Then ${\bf x} = T^{-1}(F^{-1}({\bf z}))$ samples ${\bf X}$ uniformly, when ${\bf z}$ is sampled from a hyper-cube with the same dimensionality as ${\bf Y}$ (numerical part).
#
# In[ ]:
plot_3d(scatter_sphere.sample(n_samples, 3), 'Scattered 3-Sphere')
# ## Spherical Coordinate Transformation
#
# There's a 10 page derivation in [1] for both the $D$-ball and $D$-sphere. The sphere case proceeds as follows:
# 1. The transformation $T$ is the spherical coordinate transformation, such that ${\bf y}$ is a $(D-1)$-dimensional vector of angles.
# 2. The distribution of the $i^{th}$ element of ${\bf y}$ is:
#
# $$F_i(y_i) = \begin{cases}
# \frac{1}{2} B(sin(\pi y_i)^2; \frac{D-i}{2}, \frac{1}{2}) & y_i < \frac{1}{2} \\
# 1 - \frac{1}{2} B(sin(\pi y_i)^2; \frac{D-i}{2}, \frac{1}{2}) & otherwise
# \end{cases}, \quad i=1 \ldots D-1$$
#
# where $B$ is the regularized incomplete beta function.
# 3. This distribution can be inverted using scipy's `betaincinv` function. Also, $T$ is easy to invert. Then take a scattered sample from the $(D-1)$-cube and apply the inverse functions.
#
# To modify this to work for the $D$-ball, we instead sample from the $D$-cube and take the last component to be a normalization coefficient for the resulting vector (by raising it to the power of $\frac{1}{D}$). See code for details.
#
# Note: The distribution given by $F$ is closely related to Jan's `SqrtBeta` distribution of subvector lengths. They are identical after substituting the variable $x = sin(\pi y_i)$ with $n=1$ and $m=D-i$, scaling by $2$, and dealing with the reflection about $y_i = \frac{1}{2}$.
# In[ ]:
plot_3d(scatter_ball.sample(10000, 3), 'Scattered 3-Ball', 1)
plot_3d(uniform_ball.sample(10000, 3), 'Uniform 3-Ball', 1)
# ## Acknowledgements
#
# Many thanks to <NAME> from the SpiNNaker group in Manchester for providing me with all of the relevant background and reading material.
#
# [1] <NAME> and <NAME>, _Number-Theoretic Methods in Statistics_. Chapman & Hall, 1994.
| [
"seaborn.kdeplot",
"numpy.empty",
"nengo.utils.numpy.norm",
"pylab.subplots",
"numpy.mean",
"pylab.figure",
"nengolib.stats.Sobol",
"nengo.Simulator",
"nengolib.stats.ScatteredHypersphere",
"pylab.title",
"nengo.Node",
"nengo.dists.UniformHypersphere",
"numpy.random.RandomState",
"pylab.yl... | [((351, 396), 'nengo.dists.UniformHypersphere', 'nengo.dists.UniformHypersphere', ([], {'surface': '(False)'}), '(surface=False)\n', (381, 396), False, 'import nengo\n'), ((414, 458), 'nengo.dists.UniformHypersphere', 'nengo.dists.UniformHypersphere', ([], {'surface': '(True)'}), '(surface=True)\n', (444, 458), False, 'import nengo\n'), ((474, 509), 'nengolib.stats.ScatteredHypersphere', 'ScatteredHypersphere', ([], {'surface': '(False)'}), '(surface=False)\n', (494, 509), False, 'from nengolib.stats import ScatteredHypersphere, Sobol\n'), ((527, 561), 'nengolib.stats.ScatteredHypersphere', 'ScatteredHypersphere', ([], {'surface': '(True)'}), '(surface=True)\n', (547, 561), False, 'from nengolib.stats import ScatteredHypersphere, Sobol\n'), ((777, 805), 'pylab.figure', 'pylab.figure', ([], {'figsize': '(4, 4)'}), '(figsize=(4, 4))\n', (789, 805), False, 'import pylab\n'), ((810, 828), 'pylab.title', 'pylab.title', (['title'], {}), '(title)\n', (821, 828), False, 'import pylab\n'), ((900, 917), 'pylab.xlim', 'pylab.xlim', (['(-1)', '(1)'], {}), '(-1, 1)\n', (910, 917), False, 'import pylab\n'), ((922, 939), 'pylab.ylim', 'pylab.ylim', (['(-1)', '(1)'], {}), '(-1, 1)\n', (932, 939), False, 'import pylab\n'), ((944, 956), 'pylab.show', 'pylab.show', ([], {}), '()\n', (954, 956), False, 'import pylab\n'), ((4025, 4075), 'pylab.subplots', 'pylab.subplots', (['(1)', '(2)'], {'sharey': '(True)', 'figsize': '(15, 6)'}), '(1, 2, sharey=True, figsize=(15, 6))\n', (4039, 4075), False, 'import pylab\n'), ((5138, 5150), 'pylab.show', 'pylab.show', ([], {}), '()\n', (5148, 5150), False, 'import pylab\n'), ((10862, 10890), 'pylab.figure', 'pylab.figure', ([], {'figsize': '(7, 7)'}), '(figsize=(7, 7))\n', (10874, 10890), False, 'import pylab\n'), ((11111, 11123), 'pylab.show', 'pylab.show', ([], {}), '()\n', (11121, 11123), False, 'import pylab\n'), ((2766, 2790), 'nengo.Network', 'nengo.Network', ([], {'seed': 'seed'}), '(seed=seed)\n', (2779, 2790), False, 'import nengo\n'), ((2863, 2971), 'nengo.Ensemble', 'nengo.Ensemble', (['n_neurons', 'dims'], {'encoders': 'encoders', 'eval_points': 'eval_points', 'n_eval_points': 'n_eval_points'}), '(n_neurons, dims, encoders=encoders, eval_points=eval_points,\n n_eval_points=n_eval_points)\n', (2877, 2971), False, 'import nengo\n'), ((3212, 3246), 'nengo.Simulator', 'nengo.Simulator', (['None'], {'model': 'built'}), '(None, model=built)\n', (3227, 3246), False, 'import nengo\n'), ((3403, 3447), 'numpy.mean', 'np.mean', (["sim.data[conn].solver_info['rmses']"], {}), "(sim.data[conn].solver_info['rmses'])\n", (3410, 3447), True, 'import numpy as np\n'), ((3627, 3660), 'nengolib.compat.get_activities', 'get_activities', (['sim.model', 'ens', 'x'], {}), '(sim.model, ens, x)\n', (3641, 3660), False, 'from nengolib.compat import get_activities\n'), ((3677, 3698), 'numpy.dot', 'np.dot', (['a', 'decoders.T'], {}), '(a, decoders.T)\n', (3683, 3698), True, 'import numpy as np\n'), ((3720, 3760), 'nengo.utils.numpy.rmse', 'nengo.utils.numpy.rmse', (['x', 'x_hat'], {'axis': '(1)'}), '(x, x_hat, axis=1)\n', (3742, 3760), False, 'import nengo\n'), ((4482, 4503), 'numpy.empty', 'np.empty', (['(trials, 2)'], {}), '((trials, 2))\n', (4490, 4503), True, 'import numpy as np\n'), ((4713, 4736), 'numpy.mean', 'np.mean', (['errors'], {'axis': '(0)'}), '(errors, axis=0)\n', (4720, 4736), True, 'import numpy as np\n'), ((11158, 11206), 'nengo.utils.numpy.norm', 'nengo.utils.numpy.norm', (['x'], {'axis': '(1)', 'keepdims': '(True)'}), '(x, axis=1, keepdims=True)\n', (11180, 11206), False, 'import nengo\n'), ((11233, 11240), 'nengolib.stats.Sobol', 'Sobol', ([], {}), '()\n', (11238, 11240), False, 'from nengolib.stats import ScatteredHypersphere, Sobol\n'), ((3030, 3054), 'nengo.Node', 'nengo.Node', ([], {'size_in': 'dims'}), '(size_in=dims)\n', (3040, 3054), False, 'import nengo\n'), ((3839, 3858), 'numpy.mean', 'np.mean', (['eval_rmses'], {}), '(eval_rmses)\n', (3846, 3858), True, 'import numpy as np\n'), ((3860, 3879), 'numpy.mean', 'np.mean', (['test_rmses'], {}), '(test_rmses)\n', (3867, 3879), True, 'import numpy as np\n'), ((3141, 3169), 'nengo.cache.NoDecoderCache', 'nengo.cache.NoDecoderCache', ([], {}), '()\n', (3167, 3169), False, 'import nengo\n'), ((3586, 3613), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (3607, 3613), True, 'import numpy as np\n'), ((5034, 5095), 'seaborn.kdeplot', 'sns.kdeplot', (['errors[:, j]'], {'ax': 'ax[j]', 'label': 'l', 'lw': '(4)', 'alpha': '(0.6)'}), '(errors[:, j], ax=ax[j], label=l, lw=4, alpha=0.6)\n', (5045, 5095), True, 'import seaborn as sns\n')] |
"""
aero_csm_component.py
Created by NWTC Systems Engineering Sub-Task on 2012-08-01.
Copyright (c) NREL. All rights reserved.
"""
import numpy as np
from math import pi, gamma, exp
from wisdem.commonse.utilities import smooth_abs, smooth_min, hstack
from wisdem.nrelcsm.csmPPI import PPI
# Initialize ref and current YYYYMM
# Calling program can override these
# e.g., ppi.ref_yr = 2003, etc.
ref_yr = 2002
ref_mon = 9
curr_yr = 2009
curr_mon = 12
ppi = PPI(ref_yr, ref_mon, curr_yr, curr_mon)
# NREL Cost and Scaling Model plant energy modules
##################################################
class aero_csm(object):
def __init__(self):
# Variables
# machine_rating = Float(units = 'kW', iotype='in', desc= 'rated machine power in kW')
# max_tip_speed = Float(units = 'm/s', iotype='in', desc= 'maximum allowable tip speed for the rotor')
# rotor_diameter = Float(units = 'm', iotype='in', desc= 'rotor diameter of the machine')
# max_power_coefficient = Float(iotype='in', desc= 'maximum power coefficient of rotor for operation in region 2')
# opt_tsr = Float(iotype='in', desc= 'optimum tip speed ratio for operation in region 2')
# cut_in_wind_speed = Float(units = 'm/s', iotype='in', desc= 'cut in wind speed for the wind turbine')
# cut_out_wind_speed = Float(units = 'm/s', iotype='in', desc= 'cut out wind speed for the wind turbine')
# hub_height = Float(units = 'm', iotype='in', desc= 'hub height of wind turbine above ground / sea level')
# altitude = Float(units = 'm', iotype='in', desc= 'altitude of wind plant')
# air_density = Float(units = 'kg / (m * m * m)', iotype='in', desc= 'air density at wind plant site') # default air density value is 0.0 - forces aero csm to calculate air density in model
# max_efficiency = Float(iotype='in', desc = 'maximum efficiency of rotor and drivetrain - at rated power')
# thrust_coefficient = Float(iotype='in', desc='thrust coefficient at rated power')
# Outputs
self.rated_wind_speed = 0.0 # Float(units = 'm / s', iotype='out', desc='wind speed for rated power')
self.rated_rotor_speed = 0.0 # Float(units = 'rpm', iotype='out', desc = 'rotor speed at rated power')
self.rotor_thrust = 0.0 # Float(iotype='out', units='N', desc='maximum thrust from rotor')
self.rotor_torque = 0.0 # Float(iotype='out', units='N * m', desc = 'torque from rotor at rated power')
self.power_curve = np.zeros(161) # Array(iotype='out', units='kW', desc='total power before drivetrain losses')
self.wind_curve = np.zeros(
161
) # Array(iotype='out', units='m/s', desc='wind curve associated with power curve')
def compute(
self,
machine_rating,
max_tip_speed,
rotor_diameter,
max_power_coefficient,
opt_tsr,
cut_in_wind_speed,
cut_out_wind_speed,
hub_height,
altitude,
air_density,
max_efficiency,
thrust_coefficient,
):
"""
Executes Aerodynamics Sub-module of the NREL _cost and Scaling Model to create a power curve based on a limited set of inputs.
It then modifies the ideal power curve to take into account drivetrain efficiency losses through an interface to a drivetrain efficiency model.
"""
# initialize input parameters
self.hubHt = hub_height
self.ratedPower = machine_rating
self.maxTipSpd = max_tip_speed
self.rotorDiam = rotor_diameter
self.maxCp = max_power_coefficient
self.maxTipSpdRatio = opt_tsr
self.cutInWS = cut_in_wind_speed
self.cutOutWS = cut_out_wind_speed
if air_density == 0.0:
# Compute air density
ssl_pa = 101300 # std sea-level pressure in Pa
gas_const = 287.15 # gas constant for air in J/kg/K
gravity = 9.80665 # standard gravity in m/sec/sec
lapse_rate = 0.0065 # temp lapse rate in K/m
ssl_temp = 288.15 # std sea-level temp in K
air_density = (
ssl_pa
* (1 - ((lapse_rate * (altitude + self.hubHt)) / ssl_temp)) ** (gravity / (lapse_rate * gas_const))
) / (gas_const * (ssl_temp - lapse_rate * (altitude + self.hubHt)))
else:
air_density = air_density
# determine power curve inputs
self.reg2pt5slope = 0.05
# self.max_efficiency = self.drivetrain.getMaxEfficiency()
self.ratedHubPower = self.ratedPower / max_efficiency # RatedHubPower
self.omegaM = self.maxTipSpd / (self.rotorDiam / 2.0) # Omega M - rated rotor speed
omega0 = self.omegaM / (1 + self.reg2pt5slope) # Omega 0 - rotor speed at which region 2 hits zero torque
Tm = self.ratedHubPower * 1000 / self.omegaM # Tm - rated torque
# compute rated rotor speed
self.ratedRPM = (30.0 / pi) * self.omegaM
# compute variable-speed torque constant k
kTorque = (air_density * pi * self.rotorDiam ** 5 * self.maxCp) / (64 * self.maxTipSpdRatio ** 3) # k
b = -Tm / (self.omegaM - omega0) # b - quadratic formula values to determine omegaT
c = (Tm * omega0) / (self.omegaM - omega0) # c
# omegaT is rotor speed at which regions 2 and 2.5 intersect
# add check for feasibility of omegaT calculation 09/20/2012
omegaTflag = True
if (b ** 2 - 4 * kTorque * c) > 0:
omegaT = -(b / (2 * kTorque)) - (np.sqrt(b ** 2 - 4 * kTorque * c) / (2 * kTorque)) # Omega T
windOmegaT = (omegaT * self.rotorDiam) / (2 * self.maxTipSpdRatio) # Wind at omegaT (M25)
pwrOmegaT = kTorque * omegaT ** 3 / 1000 # Power at ometaT (M26)
else:
omegaTflag = False
windOmegaT = self.ratedRPM
pwrOmegaT = self.ratedPower
# compute rated wind speed
d = air_density * np.pi * self.rotorDiam ** 2.0 * 0.25 * self.maxCp
self.ratedWindSpeed = 0.33 * ((2.0 * self.ratedHubPower * 1000.0 / (d)) ** (1.0 / 3.0)) + 0.67 * (
(((self.ratedHubPower - pwrOmegaT) * 1000.0) / (1.5 * d * windOmegaT ** 2.0)) + windOmegaT
)
# set up for idealized power curve
n = 161 # number of wind speed bins
itp = [None] * n
ws_inc = 0.25 # size of wind speed bins for integrating power curve
Wind = []
Wval = 0.0
Wind.append(Wval)
for i in range(1, n):
Wval += ws_inc
Wind.append(Wval)
# determine idealized power curve
self.idealPowerCurve(Wind, itp, kTorque, windOmegaT, pwrOmegaT, n, omegaTflag)
# add a fix for rated wind speed calculation inaccuracies kld 9/21/2012
ratedWSflag = False
# determine power curve after losses
mtp = [None] * n
for i in range(0, n):
mtp[i] = itp[i] # * self.drivetrain.getdrivetrain_efficiency(itp[i],self.ratedHubPower)
# print [Wind[i],itp[i],self.drivetrain.getdrivetrain_efficiency(itp[i],self.ratedHubPower),mtp[i]] # for testing
if mtp[i] > self.ratedPower:
if not ratedWSflag:
ratedWSflag = True
mtp[i] = self.ratedPower
self.rated_wind_speed = self.ratedWindSpeed
self.rated_rotor_speed = self.ratedRPM
self.power_curve = np.array(mtp)
self.wind_curve = Wind
# compute turbine load outputs
self.rotor_torque = self.ratedHubPower / (self.ratedRPM * (pi / 30.0)) * 1000.0
self.rotor_thrust = (
air_density * thrust_coefficient * pi * rotor_diameter ** 2 * (self.ratedWindSpeed ** 2) / 8.0
)
def idealPowerCurve(self, Wind, ITP, kTorque, windOmegaT, pwrOmegaT, n, omegaTflag):
"""
Determine the ITP (idealized turbine power) array
"""
idealPwr = 0.0
for i in range(0, n):
if (Wind[i] >= self.cutOutWS) or (Wind[i] <= self.cutInWS):
idealPwr = 0.0 # cut out
else:
if omegaTflag:
if Wind[i] > windOmegaT:
idealPwr = (self.ratedHubPower - pwrOmegaT) / (self.ratedWindSpeed - windOmegaT) * (
Wind[i] - windOmegaT
) + pwrOmegaT # region 2.5
else:
idealPwr = (
kTorque * (Wind[i] * self.maxTipSpdRatio / (self.rotorDiam / 2.0)) ** 3 / 1000.0
) # region 2
else:
idealPwr = (
kTorque * (Wind[i] * self.maxTipSpdRatio / (self.rotorDiam / 2.0)) ** 3 / 1000.0
) # region 2
ITP[i] = idealPwr
# print [Wind[i],ITP[i]]
return
def weibull(X, K, L):
"""
Return Weibull probability at speed X for distribution with k=K, c=L
Parameters
----------
X : float
wind speed of interest [m/s]
K : float
Weibull shape factor for site
L : float
Weibull scale factor for site [m/s]
Returns
-------
w : float
Weibull pdf value
"""
w = (K / L) * ((X / L) ** (K - 1)) * exp(-((X / L) ** K))
return w
class aep_calc_csm(object):
def __init__(self):
# Variables
# power_curve = Array(iotype='in', units='kW', desc='total power after drivetrain losses')
# wind_curve = Array(iotype='in', units='m/s', desc='wind curve associated with power curve')
# hub_height = Float(iotype='in', units = 'm', desc='hub height of wind turbine above ground / sea level')
# shear_exponent = Float(iotype='in', desc= 'shear exponent for wind plant') #TODO - could use wind model here
# wind_speed_50m = Float(iotype='in', units = 'm/s', desc='mean annual wind speed at 50 m height')
# weibull_k= Float(iotype='in', desc = 'weibull shape factor for annual wind speed distribution')
# machine_rating = Float(iotype='in', units='kW', desc='machine power rating')
# Parameters
# soiling_losses = Float(0.0, iotype='in', desc = 'energy losses due to blade soiling for the wind plant - average across turbines')
# array_losses = Float(0.06, iotype='in', desc = 'energy losses due to turbine interactions - across entire plant')
# availability = Float(0.94287630736, iotype='in', desc = 'average annual availbility of wind turbines at plant')
# turbine_number = Int(100, iotype='in', desc = 'total number of wind turbines at the plant')
# Output
gross_aep = 0.0 # Float(iotype='out', desc='Gross Annual Energy Production before availability and loss impacts', unit='kWh')
net_aep = 0.0 # Float(units= 'kW * h', iotype='out', desc='Annual energy production in kWh') # use PhysicalUnits to set units='kWh'
power_array = 0.0 # Array(iotype='out', units='kW', desc='total power after drivetrain losses')
capacity_factor = 0.0 # Float(iotype='out', desc='plant capacity factor')
def compute(
self,
power_curve,
wind_curve,
hub_height,
shear_exponent,
wind_speed_50m,
weibull_k,
machine_rating,
soiling_losses,
array_losses,
availability,
turbine_number,
):
"""
Executes AEP Sub-module of the NREL _cost and Scaling Model by convolving a wind turbine power curve with a weibull distribution.
It then discounts the resulting AEP for availability, plant and soiling losses.
"""
power_array = np.array([wind_curve, power_curve])
hubHeightWindSpeed = ((hub_height / 50) ** shear_exponent) * wind_speed_50m
K = weibull_k
L = hubHeightWindSpeed / exp(np.log(gamma(1.0 + 1.0 / K)))
turbine_energy = 0.0
for i in range(0, power_array.shape[1]):
X = power_array[0, i]
result = power_array[1, i] * weibull(X, K, L)
turbine_energy += result
ws_inc = power_array[0, 1] - power_array[0, 0]
self.gross_aep = turbine_energy * 8760.0 * turbine_number * ws_inc
self.net_aep = self.gross_aep * (1.0 - soiling_losses) * (1.0 - array_losses) * availability
self.capacity_factor = self.net_aep / (8760 * machine_rating)
class drivetrain_csm(object):
"""drivetrain losses from NREL cost and scaling model"""
def __init__(self, drivetrain_type="geared"):
self.drivetrain_type = drivetrain_type
power = np.zeros(161) # Array(iotype='out', units='kW', desc='total power after drivetrain losses')
def compute(self, aero_power, aero_torque, aero_thrust, rated_power):
if self.drivetrain_type == "geared":
constant = 0.01289
linear = 0.08510
quadratic = 0.0
elif self.drivetrain_type == "single_stage":
constant = 0.01331
linear = 0.03655
quadratic = 0.06107
elif self.drivetrain_type == "multi_drive":
constant = 0.01547
linear = 0.04463
quadratic = 0.05790
elif self.drivetrain_type == "pm_direct_drive":
constant = 0.01007
linear = 0.02000
quadratic = 0.06899
Pbar0 = aero_power / rated_power
# handle negative power case (with absolute value)
Pbar1, dPbar1_dPbar0 = smooth_abs(Pbar0, dx=0.01)
# truncate idealized power curve for purposes of efficiency calculation
Pbar, dPbar_dPbar1, _ = smooth_min(Pbar1, 1.0, pct_offset=0.01)
# compute efficiency
eff = 1.0 - (constant / Pbar + linear + quadratic * Pbar)
self.power = aero_power * eff
def provideJ(self):
# gradients
dPbar_dPa = dPbar_dPbar1 * dPbar1_dPbar0 / rated_power
dPbar_dPr = -dPbar_dPbar1 * dPbar1_dPbar0 * aero_power / rated_power ** 2
deff_dPa = dPbar_dPa * (constant / Pbar ** 2 - quadratic)
deff_dPr = dPbar_dPr * (constant / Pbar ** 2 - quadratic)
dP_dPa = eff + aero_power * deff_dPa
dP_dPr = aero_power * deff_dPr
self.J = hstack([np.diag(dP_dPa), dP_dPr])
return self.J
class aep_csm(object):
def __init__(self, drivetrain_type="geared"):
self.aero = aero_csm()
self.drivetrain = drivetrain_csm(drivetrain_type)
self.aep = aep_calc_csm()
def compute(
self,
machine_rating,
max_tip_speed,
rotor_diameter,
max_power_coefficient,
opt_tsr,
cut_in_wind_speed,
cut_out_wind_speed,
hub_height,
altitude,
air_density,
max_efficiency,
thrust_coefficient,
soiling_losses,
array_losses,
availability,
turbine_number,
shear_exponent,
wind_speed_50m,
weibull_k,
):
self.aero.compute(
machine_rating,
max_tip_speed,
rotor_diameter,
max_power_coefficient,
opt_tsr,
cut_in_wind_speed,
cut_out_wind_speed,
hub_height,
altitude,
air_density,
max_efficiency,
thrust_coefficient,
)
self.drivetrain.compute(self.aero.power_curve, self.aero.rotor_torque, self.aero.rotor_thrust, machine_rating)
self.aep.compute(
self.drivetrain.power,
self.aero.wind_curve,
hub_height,
shear_exponent,
wind_speed_50m,
weibull_k,
machine_rating,
soiling_losses,
array_losses,
availability,
turbine_number,
)
# NREL Cost and Scaling Model cost modules
##################################################
# Turbine Capital Costs
##################################################
##### Rotor
class blades_csm(object):
"""
object to wrap python code for NREL cost and scaling model for a wind turbine blade
"""
def __init__(self):
"""
OpenMDAO object to wrap blade model of the NREL _cost and Scaling Model (csmBlades.py)
"""
super(blades_csm, self).__init__()
# Outputs
self.blade_cost = 0.0 # Float(0.0, units='USD', iotype='out', desc='cost for a single wind turbine blade')
self.blade_mass = 0.0 # Float(0.0, units='kg', iotype='out', desc='mass for a single wind turbine blade')
def compute(self, rotor_diameter, year=2009, month=12, advanced_blade=False):
"""
computes Blade model of the NREL _cost and Scaling Model to estimate wind turbine blade cost and mass.
"""
# Variables
self.rotor_diameter = (
rotor_diameter # = Float(126.0, units = 'm', iotype='in', desc= 'rotor diameter of the machine')
)
# Parameters
self.year = year # = Int(2009, iotype='in', desc = 'year of project start')
self.month = month # Int(12, iotype='in', desc = 'month of project start')
self.advanced_blade = (
advanced_blade # Bool(False, iotype='in', desc = 'boolean for use of advanced blade curve')
)
if self.advanced_blade == True:
massCoeff = 0.4948
massExp = 2.5300
else:
massCoeff = 0.1452
massExp = 2.9158
self.blade_mass = massCoeff * (self.rotor_diameter / 2.0) ** massExp
ppi.curr_yr = curr_yr
ppi.curr_mon = curr_mon
ppi_labor = ppi.compute("IPPI_BLL")
if self.advanced_blade == True:
ref_yr = ppi.ref_yr
ppi.ref_yr = 2003
ppi_mat = ppi.compute("IPPI_BLA")
ppi.ref_yr = ref_yr
slopeR3 = 0.4019376
intR3 = -21051.045983
else:
ppi_mat = ppi.compute("IPPI_BLD")
slopeR3 = 0.4019376
intR3 = -955.24267
laborCoeff = 2.7445
laborExp = 2.5025
bladeCostCurrent = (
(slopeR3 * (self.rotor_diameter / 2.0) ** 3.0 + (intR3)) * ppi_mat
+ (laborCoeff * (self.rotor_diameter / 2.0) ** laborExp) * ppi_labor
) / (1.0 - 0.28)
self.blade_cost = bladeCostCurrent
# derivatives
self.d_mass_d_diameter = massExp * (massCoeff * (self.rotor_diameter / 2.0) ** (massExp - 1)) * (1 / 2.0)
self.d_cost_d_diameter = (
3.0 * (slopeR3 * (self.rotor_diameter / 2.0) ** 2.0) * ppi_mat * (1 / 2.0)
+ (laborExp * laborCoeff * (self.rotor_diameter / 2.0) ** (laborExp - 1)) * ppi_labor * (1 / 2.0)
) / (1.0 - 0.28)
def list_deriv_vars(self):
inputs = ["rotor_diameter"]
outputs = ["blade_mass", "blade_cost"]
return inputs, outputs
def provideJ(self):
self.J = np.array([[self.d_mass_d_diameter], [self.d_cost_d_diameter]])
return self.J
class hub_csm(object):
"""
object to wrap python code for NREL cost and scaling model for a wind turbine hub
"""
def __init__(self):
"""
OpenMDAO object to wrap hub model of the NREL _cost and Scaling Model (csmHub.py)
"""
super(hub_csm, self).__init__()
# Outputs
self.hub_system_cost = 0.0 # Float(0.0, units='USD', iotype='out', desc='hub system cost')
self.hub_system_mass = 0.0 # Float(0.0, units='kg', iotype='out', desc='hub system mass')
self.hub_cost = 0.0 # Float(0.0, units='USD', iotype='out', desc='hub cost')
self.hub_mass = 0.0 # Float(0.0, units='kg', iotype='out', desc='hub mass')
self.pitch_system_cost = 0.0 # Float(0.0, units='USD', iotype='out', desc='pitch system cost')
self.pitch_system_mass = 0.0 # Float(0.0, units='kg', iotype='out', desc='pitch system mass')
self.spinner_cost = 0.0 # Float(0.0, units='USD', iotype='out', desc='spinner / nose cone cost')
self.spinner_mass = 0.0 # Float(0.0, units='kg', iotype='out', desc='spinner / nose cone mass')
def compute(self, rotor_diameter, blade_mass, year=2009, month=12, blade_number=3):
"""
computes hub model of the NREL _cost and Scaling model to compute hub system object masses and costs.
"""
# Variables
self.rotor_diameter = (
rotor_diameter # Float(126.0, units = 'm', iotype='in', desc= 'rotor diameter of the machine')
)
self.blade_mass = blade_mass # Float(17650.67, units='kg', iotype='in', desc='mass of an individual blade')
# Parameters
self.year = year # Int(2009, iotype='in', desc = 'year of project start')
self.month = month # Int(12, iotype='in', desc = 'month of project start')
self.blade_number = blade_number # Int(3, iotype='in', desc= 'number of rotor blades')
# *** Pitch bearing and mechanism
pitchBearingMass = 0.1295 * self.blade_mass * self.blade_number + 491.31 # slope*BldMass3 + int
bearingHousingPct = 32.80 / 100.0
massSysOffset = 555.0
self.pitch_system_mass = pitchBearingMass * (1 + bearingHousingPct) + massSysOffset
# *** Hub
self.hub_mass = 0.95402537 * self.blade_mass + 5680.272238
# *** NoseCone/Spinner
self.spinner_mass = 18.5 * self.rotor_diameter + (-520.5) # GNS
self.hub_system_mass = self.hub_mass + self.pitch_system_mass + self.spinner_mass
ppi.curr_yr = curr_yr
ppi.curr_mon = curr_mon
# *** Pitch bearing and mechanism
bearingCost = 0.2106 * self.rotor_diameter ** 2.6576
bearingCostEscalator = ppi.compute("IPPI_PMB")
self.pitch_system_cost = bearingCostEscalator * (bearingCost + bearingCost * 1.28)
# *** Hub
hubCost2002 = self.hub_mass * 4.25 # $/kg
hubCostEscalator = ppi.compute("IPPI_HUB")
self.hub_cost = hubCost2002 * hubCostEscalator
# *** NoseCone/Spinner
spinnerCostEscalator = ppi.compute("IPPI_NAC")
self.spinner_cost = spinnerCostEscalator * (5.57 * self.spinner_mass)
self.hub_system_cost = self.hub_cost + self.pitch_system_cost + self.spinner_cost
# derivatives
self.d_hub_mass_d_diameter = 0.0
self.d_pitch_mass_d_diameter = 0.0
self.d_spinner_mass_d_diameter = 18.5
self.d_system_mass_d_diameter = (
self.d_hub_mass_d_diameter + self.d_pitch_mass_d_diameter + self.d_spinner_mass_d_diameter
)
self.d_hub_cost_d_diameter = 0.0
self.d_pitch_cost_d_diameter = bearingCostEscalator * 2.28 * 2.6576 * (0.2106 * self.rotor_diameter ** 1.6576)
self.d_spinner_cost_d_diameter = spinnerCostEscalator * (5.57 * self.d_spinner_mass_d_diameter)
self.d_system_cost_d_diameter = (
self.d_hub_cost_d_diameter + self.d_pitch_cost_d_diameter + self.d_spinner_cost_d_diameter
)
self.d_hub_mass_d_blade_mass = 0.95402537
self.d_pitch_mass_d_blade_mass = 0.1295 * self.blade_number * (1 + bearingHousingPct)
self.d_spinner_mass_d_blade_mass = 0.0
self.d_system_mass_d_blade_mass = (
self.d_hub_mass_d_blade_mass + self.d_pitch_mass_d_blade_mass + self.d_spinner_mass_d_blade_mass
)
self.d_hub_cost_d_blade_mass = self.d_hub_mass_d_blade_mass * 4.25 * hubCostEscalator
self.d_pitch_cost_d_blade_mass = 0.0
self.d_spinner_cost_d_blade_mass = 0.0
self.d_system_cost_d_blade_mass = (
self.d_hub_cost_d_blade_mass + self.d_pitch_cost_d_blade_mass + self.d_spinner_cost_d_blade_mass
)
def list_deriv_vars(self):
inputs = ["rotor_diameter", "blade_mass"]
outputs = [
"hub_mass",
"pitch_system_mass",
"spinner_mass",
"hub_system_mass",
"hub_cost",
"pitch_system_cost",
"spinner_cost",
"hub_system_cost",
]
return inputs, outputs
def provideJ(self):
self.J = np.array(
[
[self.d_hub_mass_d_diameter, self.d_hub_mass_d_blade_mass],
[self.d_pitch_mass_d_diameter, self.d_pitch_mass_d_blade_mass],
[self.d_spinner_mass_d_diameter, self.d_spinner_mass_d_blade_mass],
[self.d_system_mass_d_diameter, self.d_system_mass_d_blade_mass],
[self.d_hub_cost_d_diameter, self.d_hub_cost_d_blade_mass],
[self.d_pitch_cost_d_diameter, self.d_pitch_cost_d_blade_mass],
[self.d_spinner_cost_d_diameter, self.d_spinner_cost_d_blade_mass],
[self.d_system_cost_d_diameter, self.d_system_cost_d_blade_mass],
]
)
return self.J
##### Nacelle
class nacelle_csm(object):
"""
object to wrap python code for NREL cost and scaling model for a wind turbine nacelle
"""
def __init__(self):
"""
OpenMDAO object to wrap nacelle mass-cost model based on the NREL _cost and Scaling model data (csmNacelle.py).
"""
super(nacelle_csm, self).__init__()
# Outputs
self.nacelle_mass = 0.0 # Float(0.0, units='kg', iotype='out', desc='nacelle mass')
self.lowSpeedShaft_mass = 0.0 # Float(0.0, units='kg', iotype='out', desc= 'low speed shaft mass')
self.bearings_mass = 0.0 # Float(0.0, units='kg', iotype='out', desc= 'bearings system mass')
self.gearbox_mass = 0.0 # Float(0.0, units='kg', iotype='out', desc= 'gearbox and housing mass')
self.mechanicalBrakes_mass = (
0.0 # Float(0.0, units='kg', iotype='out', desc= 'high speed shaft, coupling, and mechanical brakes mass')
)
self.generator_mass = 0.0 # Float(0.0, units='kg', iotype='out', desc= 'generator and housing mass')
self.VSElectronics_mass = 0.0 # Float(0.0, units='kg', iotype='out', desc= 'variable speed electronics mass')
self.yawSystem_mass = 0.0 # Float(0.0, units='kg', iotype='out', desc= 'yaw system mass')
self.mainframeTotal_mass = (
0.0 # Float(0.0, units='kg', iotype='out', desc= 'mainframe total mass including bedplate')
)
self.electronicCabling_mass = 0.0 # Float(0.0, units='kg', iotype='out', desc= 'electronic cabling mass')
self.HVAC_mass = 0.0 # Float(0.0, units='kg', iotype='out', desc= 'HVAC system mass')
self.nacelleCover_mass = 0.0 # Float(0.0, units='kg', iotype='out', desc= 'nacelle cover mass')
self.controls_mass = 0.0 # Float(0.0, units='kg', iotype='out', desc= 'control system mass')
self.nacelle_cost = 0.0 # Float(0.0, units='USD', iotype='out', desc='nacelle cost')
self.lowSpeedShaft_cost = 0.0 # Float(0.0, units='kg', iotype='out', desc= 'low speed shaft _cost')
self.bearings_cost = 0.0 # Float(0.0, units='kg', iotype='out', desc= 'bearings system _cost')
self.gearbox_cost = 0.0 # Float(0.0, units='kg', iotype='out', desc= 'gearbox and housing _cost')
self.mechanicalBrakes_cost = (
0.0 # Float(0.0, units='kg', iotype='out', desc= 'high speed shaft, coupling, and mechanical brakes _cost')
)
self.generator_cost = 0.0 # Float(0.0, units='kg', iotype='out', desc= 'generator and housing _cost')
self.VSElectronics_cost = 0.0 # Float(0.0, units='kg', iotype='out', desc= 'variable speed electronics _cost')
self.yawSystem_cost = 0.0 # Float(0.0, units='kg', iotype='out', desc= 'yaw system _cost')
self.mainframeTotal_cost = (
0.0 # Float(0.0, units='kg', iotype='out', desc= 'mainframe total _cost including bedplate')
)
self.electronicCabling_cost = 0.0 # Float(0.0, units='kg', iotype='out', desc= 'electronic cabling _cost')
self.HVAC_cost = 0.0 # Float(0.0, units='kg', iotype='out', desc= 'HVAC system _cost')
self.nacelleCover_cost = 0.0 # Float(0.0, units='kg', iotype='out', desc= 'nacelle cover _cost')
self.controls_cost = 0.0 # Float(0.0, units='kg', iotype='out', desc= 'control system _cost')
def compute(
self,
rotor_diameter,
rotor_mass,
rotor_thrust,
rotor_torque,
machine_rating,
drivetrain_design="geared",
crane=True,
advanced_bedplate=0,
year=2009,
month=12,
offshore=True,
):
"""
compute nacelle model of the NREL _cost and Scaling Model.
"""
# Variables
self.rotor_diameter = rotor_diameter # = Float(126.0, units='m', iotype='in', desc = 'diameter of the rotor')
self.rotor_mass = (
rotor_mass # Float(123193.3010, iotype='in', units='kg', desc = 'mass of rotor including blades and hub')
)
self.rotor_thrust = rotor_thrust # Float(500930.0837, iotype='in', units='N', desc='maximum thurst from rotor')
self.rotor_torque = (
rotor_torque # Float(4365248.7375, iotype='in', units='N * m', desc = 'torque from rotor at rated power')
)
self.machine_rating = machine_rating # Float(5000.0, units='kW', iotype='in', desc = 'Machine rated power')
# Parameters
self.drivetrain_design = drivetrain_design # Enum('geared', ('geared', 'single_stage', 'multi_drive', 'pm_direct_drive'), iotype='in')
self.crane = crane # Bool(True, iotype='in', desc = 'boolean for presence of a service crane up tower')
self.advanced_bedplate = (
advanced_bedplate # Int(0, iotype='in', desc= 'indicator for drivetrain bedplate design 0 - conventional')
)
self.year = year # Int(2009, iotype='in', desc = 'year of project start')
self.month = month # Int(12, iotype='in', desc = 'month of project start')
self.offshore = offshore # Bool(True, iotype='in', desc = 'boolean for land or offshore wind project')
# basic variable initialization
if self.offshore == False:
offshore = 0
else:
offshore = 1
ppi.curr_yr = self.year
ppi.curr_mon = self.month
# Low Speed Shaft
lenShaft = 0.03 * self.rotor_diameter
mmtArm = lenShaft / 5
bendLoad = 1.25 * 9.81 * self.rotor_mass
bendMom = bendLoad * mmtArm
hFact = 0.1
hollow = 1 / (1 - (hFact) ** 4)
outDiam = (
(32.0 / np.pi)
* hollow
* 3.25
* ((self.rotor_torque * 3.0 / 371000000.0) ** 2 + (bendMom / 71070000) ** 2) ** (0.5)
) ** (1.0 / 3.0)
inDiam = outDiam * hFact
self.lowSpeedShaft_mass = 1.25 * (np.pi / 4) * (outDiam ** 2 - inDiam ** 2) * lenShaft * 7860
LowSpeedShaftCost2002 = 0.0998 * self.rotor_diameter ** 2.8873
lssCostEsc = ppi.compute("IPPI_LSS")
self.lowSpeedShaft_cost = LowSpeedShaftCost2002 * lssCostEsc
d_mass_d_outD = 1.25 * (np.pi / 4) * (1 - 0.1 ** 2) * 2 * outDiam * lenShaft * 7860
d_outD_mult = (
((32.0 / np.pi) * hollow * 3.25) ** (1.0 / 3.0)
* (1.0 / 6.0)
* ((self.rotor_torque * 3.0 / 371000000.0) ** 2 + (bendMom / 71070000.0) ** 2) ** (-5.0 / 6.0)
)
d_outD_d_diameter = d_outD_mult * 2.0 * (bendMom / 71070000) * (1.0 / 71070000.0) * (bendLoad * 0.03 / 5)
d_outD_d_mass = d_outD_mult * 2.0 * (bendMom / 71070000) * (1.0 / 71070000.0) * (mmtArm * 1.25 * 9.81)
d_outD_d_torque = d_outD_mult * 2.0 * (self.rotor_torque * 3.0 / 371000000.0) * (3.0 / 371000000.0)
self.d_lss_mass_d_r_diameter = (
d_mass_d_outD * d_outD_d_diameter + 1.25 * (np.pi / 4) * (outDiam ** 2 - inDiam ** 2) * 7860 * 0.03
)
self.d_lss_mass_d_r_mass = d_mass_d_outD * d_outD_d_mass
self.d_lss_mass_d_r_torque = d_mass_d_outD * d_outD_d_torque
self.d_lss_cost_d_r_diameter = lssCostEsc * 2.8873 * 0.0998 * self.rotor_diameter ** 1.8873
# Gearbox
costCoeff = [None, 16.45, 74.101, 15.25697015, 0]
costExp = [None, 1.2491, 1.002, 1.2491, 0]
massCoeff = [None, 65.601, 81.63967335, 129.1702924, 0]
massExp = [None, 0.759, 0.7738, 0.7738, 0]
if self.drivetrain_design == "geared":
drivetrain_design = 1
elif self.drivetrain_design == "single_stage":
drivetrain_design = 2
elif self.drivetrain_design == "multi-drive":
drivetrain_design = 3
elif self.drivetrain_design == "pm_direct_drive":
drivetrain_design = 4
self.gearbox_mass = massCoeff[drivetrain_design] * (self.rotor_torque / 1000) ** massExp[drivetrain_design]
gearboxCostEsc = ppi.compute("IPPI_GRB")
Gearbox2002 = costCoeff[drivetrain_design] * self.machine_rating ** costExp[drivetrain_design]
self.gearbox_cost = Gearbox2002 * gearboxCostEsc
if drivetrain_design == 4:
self.d_gearbox_mass_d_r_torque = 0.0
self.d_gearbox_cost_d_rating = 0.0
else:
self.d_gearbox_mass_d_r_torque = (
massExp[drivetrain_design]
* massCoeff[drivetrain_design]
* ((self.rotor_torque / 1000.0) ** (massExp[drivetrain_design] - 1))
* (1 / 1000.0)
)
self.d_gearbox_cost_d_rating = (
gearboxCostEsc
* costExp[drivetrain_design]
* costCoeff[drivetrain_design]
* self.machine_rating ** (costExp[drivetrain_design] - 1)
)
# Generator
costCoeff = [None, 65.000, 54.72533, 48.02963, 219.3333] # $/kW - from 'Generators' worksheet
massCoeff = [None, 6.4737, 10.50972, 5.343902, 37.68400]
massExp = [None, 0.9223, 0.922300, 0.922300, 1.000000]
if drivetrain_design < 4:
self.generator_mass = massCoeff[drivetrain_design] * self.machine_rating ** massExp[drivetrain_design]
else: # direct drive
self.generator_mass = massCoeff[drivetrain_design] * self.rotor_torque ** massExp[drivetrain_design]
generatorCostEsc = ppi.compute("IPPI_GEN")
GeneratorCost2002 = costCoeff[drivetrain_design] * self.machine_rating
self.generator_cost = GeneratorCost2002 * generatorCostEsc
if drivetrain_design < 4:
self.d_generator_mass_d_r_torque = 0.0
self.d_generator_mass_d_rating = (
massExp[drivetrain_design]
* massCoeff[drivetrain_design]
* self.machine_rating ** (massExp[drivetrain_design] - 1)
)
else:
self.d_generator_mass_d_r_torque = (
massExp[drivetrain_design]
* massCoeff[drivetrain_design]
* self.rotor_torque ** (massExp[drivetrain_design] - 1)
)
self.d_generator_mass_d_rating = 0.0
self.d_generator_cost_d_rating = generatorCostEsc * costCoeff[drivetrain_design]
# Rest of the system
# --- electrical connections
self.electronicCabling_mass = 0.0
# --- bearings
self.bearings_mass = 0.00012266667 * (self.rotor_diameter ** 3.5) - 0.00030360 * (self.rotor_diameter ** 2.5)
HousingMass = self.bearings_mass
self.bearings_mass += HousingMass
self.d_bearings_mass_d_r_diameter = 2 * (
3.5 * 0.00012266667 * (self.rotor_diameter ** 2.5) - 0.00030360 * 2.5 * (self.rotor_diameter ** 1.5)
)
# --- mechanical brake
mechBrakeCost2002 = 1.9894 * self.machine_rating + (-0.1141)
self.mechanicalBrakes_mass = mechBrakeCost2002 * 0.10
self.d_brakes_mass_d_rating = 0.10 * 1.9894
# --- variable-speed electronics
self.VSElectronics_mass = 0.0
# --- yaw drive bearings
self.yawSystem_mass = 1.6 * (0.0009 * self.rotor_diameter ** 3.314)
self.d_yaw_mass_d_r_diameter = 3.314 * 1.6 * (0.0009 * self.rotor_diameter ** 2.314)
# --- hydraulics, cooling
self.HVAC_mass = 0.08 * self.machine_rating
self.d_hvac_mass_d_rating = 0.08
# --- bedplate ---
if self.advanced_bedplate == 0: # not an actual option in cost and scaling model
BedplateWeightFac = 2.86 # modular
elif self.advanced_bedplate == 1: # test for mod-adv
BedplateWeightFac = 2.40 # modular-advanced
else:
BedplateWeightFac = 0.71 # advanced
# These RD functions from spreadsheet don't quite form a continuous composite function
"""if (self.rotor_diameter <= 15.0): # Removing for gradients - assuming large turbines only
TowerTopDiam = 0.3
elif (self.rotor_diameter <= 60.0):
TowerTopDiam = (0.07042*self.rotor_diameter-0.715)
else:"""
TowerTopDiam = (12.29 * self.rotor_diameter + 2648) / 1000
MassFromTorque = BedplateWeightFac * 0.00368 * self.rotor_torque
MassFromThrust = 0.00158 * BedplateWeightFac * self.rotor_thrust * TowerTopDiam
MassFromRotorWeight = 0.015 * BedplateWeightFac * self.rotor_mass * TowerTopDiam
# Bedplate(Length|Area) added by GNS
BedplateLength = 1.5874 * 0.052 * self.rotor_diameter
BedplateArea = 0.5 * BedplateLength * BedplateLength
MassFromArea = 100 * BedplateWeightFac * BedplateArea
# mfmCoeff[1,4] for different drivetrain configurations
mfmCoeff = [None, 22448, 1.29490, 1.72080, 22448]
mfmExp = [None, 0, 1.9525, 1.9525, 0]
# --- nacelle totals
TotalMass = MassFromTorque + MassFromThrust + MassFromRotorWeight + MassFromArea
if (drivetrain_design == 1) or (drivetrain_design == 4):
self.bedplate_mass = TotalMass
else:
self.bedplate_mass = mfmCoeff[drivetrain_design] * (self.rotor_diameter ** mfmExp[drivetrain_design])
NacellePlatformsMass = 0.125 * self.bedplate_mass
# --- crane ---
if self.crane:
self.crane_mass = 3000.0
else:
self.crane_mass = 0.0
# --- main frame ---
self.mainframeTotal_mass = self.bedplate_mass + NacellePlatformsMass + self.crane_mass
if (drivetrain_design == 1) or (drivetrain_design == 4):
self.d_mainframe_mass_d_r_diameter = 1.125 * (
(
(0.00158 * BedplateWeightFac * self.rotor_thrust * (12.29 / 1000.0))
+ (0.015 * BedplateWeightFac * self.rotor_mass * (12.29 / 1000.0))
+ (100 * BedplateWeightFac * 0.5 * (1.5874 * 0.052) ** 2.0 * (2 * self.rotor_diameter))
)
)
self.d_mainframe_mass_d_r_mass = 1.125 * (0.015 * BedplateWeightFac * TowerTopDiam)
self.d_mainframe_mass_d_r_thrust = 1.125 * (0.00158 * BedplateWeightFac * TowerTopDiam)
self.d_mainframe_mass_d_r_torque = 1.125 * BedplateWeightFac * 0.00368
else:
self.d_mainframe_mass_d_r_diameter = (
1.125
* mfmCoeff[drivetrain_design]
* (mfmExp[drivetrain_design] * self.rotor_diameter ** (mfmExp[drivetrain_design] - 1))
)
self.d_mainframe_mass_d_r_mass = 0.0
self.d_mainframe_mass_d_r_thrust = 0.0
self.d_mainframe_mass_d_r_torque = 0.0
# --- nacelle cover ---
nacelleCovCost2002 = 11.537 * self.machine_rating + (3849.7)
self.nacelleCover_mass = nacelleCovCost2002 * 0.111111
self.d_cover_mass_d_rating = 0.111111 * 11.537
# --- control system ---
self.controls_mass = 0.0
# overall mass
self.nacelle_mass = (
self.lowSpeedShaft_mass
+ self.bearings_mass
+ self.gearbox_mass
+ self.mechanicalBrakes_mass
+ self.generator_mass
+ self.VSElectronics_mass
+ self.yawSystem_mass
+ self.mainframeTotal_mass
+ self.electronicCabling_mass
+ self.HVAC_mass
+ self.nacelleCover_mass
+ self.controls_mass
)
self.d_nacelle_mass_d_r_diameter = (
self.d_lss_mass_d_r_diameter
+ self.d_bearings_mass_d_r_diameter
+ self.d_yaw_mass_d_r_diameter
+ self.d_mainframe_mass_d_r_diameter
)
self.d_nacelle_mass_d_r_mass = self.d_lss_mass_d_r_mass + self.d_mainframe_mass_d_r_mass
self.d_nacelle_mass_d_r_thrust = self.d_mainframe_mass_d_r_thrust
self.d_nacelle_mass_d_r_torque = (
self.d_lss_mass_d_r_torque
+ self.d_gearbox_mass_d_r_torque
+ self.d_generator_mass_d_r_torque
+ self.d_mainframe_mass_d_r_torque
)
self.d_nacelle_mass_d_rating = (
self.d_generator_mass_d_rating
+ self.d_brakes_mass_d_rating
+ self.d_hvac_mass_d_rating
+ self.d_cover_mass_d_rating
)
# Rest of System Costs
# Cost Escalators - obtained from ppi tables
bearingCostEsc = ppi.compute("IPPI_BRN")
mechBrakeCostEsc = ppi.compute("IPPI_BRK")
VspdEtronicsCostEsc = ppi.compute("IPPI_VSE")
yawDrvBearingCostEsc = ppi.compute("IPPI_YAW")
nacelleCovCostEsc = ppi.compute("IPPI_NAC")
hydrCoolingCostEsc = ppi.compute("IPPI_HYD")
mainFrameCostEsc = ppi.compute("IPPI_MFM")
econnectionsCostEsc = ppi.compute("IPPI_ELC")
# These RD functions from spreadsheet don't quite form a continuous composite function
# --- electrical connections
self.electronicCabling_cost = 40.0 * self.machine_rating # 2002
self.electronicCabling_cost *= econnectionsCostEsc
self.d_electronics_cost_d_rating = 40.0 * econnectionsCostEsc
# --- bearings
bearingMass = 0.00012266667 * (self.rotor_diameter ** 3.5) - 0.00030360 * (self.rotor_diameter ** 2.5)
HousingMass = bearingMass
brngSysCostFactor = 17.6 # $/kg
Bearings2002 = bearingMass * brngSysCostFactor
Housing2002 = HousingMass * brngSysCostFactor
self.bearings_cost = (Bearings2002 + Housing2002) * bearingCostEsc
self.d_bearings_cost_d_r_diameter = bearingCostEsc * brngSysCostFactor * self.d_bearings_mass_d_r_diameter
# --- mechanical brake
mechBrakeCost2002 = 1.9894 * self.machine_rating + (-0.1141)
self.mechanicalBrakes_cost = mechBrakeCostEsc * mechBrakeCost2002
self.d_brakes_cost_d_rating = mechBrakeCostEsc * 1.9894
# --- variable-speed electronics
VspdEtronics2002 = 79.32 * self.machine_rating
self.VSElectronics_cost = VspdEtronics2002 * VspdEtronicsCostEsc
self.d_vselectronics_cost_d_rating = VspdEtronicsCostEsc * 79.32
# --- yaw drive bearings
YawDrvBearing2002 = 2 * (0.0339 * self.rotor_diameter ** 2.9637)
self.yawSystem_cost = YawDrvBearing2002 * yawDrvBearingCostEsc
self.d_yaw_cost_d_r_diameter = yawDrvBearingCostEsc * 2 * 2.9637 * (0.0339 * self.rotor_diameter ** 1.9637)
# --- hydraulics, cooling
self.HVAC_cost = 12.0 * self.machine_rating # 2002
self.HVAC_cost *= hydrCoolingCostEsc
self.d_hvac_cost_d_rating = hydrCoolingCostEsc * 12.0
# --- control system ---
initControlCost = [35000, 55900] # land, off-shore
self.controls_cost = initControlCost[offshore] * ppi.compute("IPPI_CTL")
# --- nacelle totals
NacellePlatforms2002 = 8.7 * NacellePlatformsMass
# --- nacelle cover ---
nacelleCovCost2002 = 11.537 * self.machine_rating + (3849.7)
self.nacelleCover_cost = nacelleCovCostEsc * nacelleCovCost2002
self.d_cover_cost_d_rating = nacelleCovCostEsc * 11.537
# --- crane ---
if self.crane:
self.crane_cost = 12000.0
else:
self.crane_cost = 0.0
# --- main frame ---
# mfmCoeff[1,4] for different drivetrain configurations
mfmCoeff = [None, 9.4885, 303.96, 17.923, 627.28]
mfmExp = [None, 1.9525, 1.0669, 1.6716, 0.8500]
MainFrameCost2002 = mfmCoeff[drivetrain_design] * self.rotor_diameter ** mfmExp[drivetrain_design]
BaseHardware2002 = MainFrameCost2002 * 0.7
MainFrame2002 = MainFrameCost2002 + NacellePlatforms2002 + self.crane_cost + BaseHardware2002 # service crane
self.mainframeTotal_cost = MainFrame2002 * mainFrameCostEsc
self.d_mainframe_cost_d_r_diameter = mainFrameCostEsc * (
1.7
* mfmCoeff[drivetrain_design]
* mfmExp[drivetrain_design]
* self.rotor_diameter ** (mfmExp[drivetrain_design] - 1)
+ 8.7 * self.d_mainframe_mass_d_r_diameter * (0.125 / 1.125)
)
self.d_mainframe_cost_d_r_mass = mainFrameCostEsc * 8.7 * self.d_mainframe_mass_d_r_mass * (0.125 / 1.125)
self.d_mainframe_cost_d_r_thrust = mainFrameCostEsc * 8.7 * self.d_mainframe_mass_d_r_thrust * (0.125 / 1.125)
self.d_mainframe_cost_d_r_torque = mainFrameCostEsc * 8.7 * self.d_mainframe_mass_d_r_torque * (0.125 / 1.125)
# overall system cost
self.nacelle_cost = (
self.lowSpeedShaft_cost
+ self.bearings_cost
+ self.gearbox_cost
+ self.mechanicalBrakes_cost
+ self.generator_cost
+ self.VSElectronics_cost
+ self.yawSystem_cost
+ self.mainframeTotal_cost
+ self.electronicCabling_cost
+ self.HVAC_cost
+ self.nacelleCover_cost
+ self.controls_cost
)
self.d_nacelle_cost_d_r_diameter = (
self.d_lss_cost_d_r_diameter
+ self.d_bearings_cost_d_r_diameter
+ self.d_yaw_cost_d_r_diameter
+ self.d_mainframe_cost_d_r_diameter
)
self.d_nacelle_cost_d_r_mass = self.d_mainframe_cost_d_r_mass
self.d_nacelle_cost_d_r_thrust = self.d_mainframe_cost_d_r_thrust
self.d_nacelle_cost_d_r_torque = self.d_mainframe_cost_d_r_torque
self.d_nacelle_cost_d_rating = (
self.d_gearbox_cost_d_rating
+ self.d_generator_cost_d_rating
+ self.d_brakes_cost_d_rating
+ self.d_hvac_cost_d_rating
+ self.d_cover_cost_d_rating
+ self.d_electronics_cost_d_rating
+ self.d_vselectronics_cost_d_rating
)
def list_deriv_vars(self):
inputs = ["rotor_diameter", "rotor_mass", "rotor_thrust", "rotor_torque", "machine_rating"]
outputs = [
"nacelle_mass",
"lowSpeedShaft_mass",
"bearings_mass",
"gearbox_mass",
"generator_mass",
"mechanicalBrakes_mass",
"yawSystem_mass",
"electronicCabling_mass",
"HVAC_mass",
"VSElectronics_mass",
"mainframeTotal_mass",
"nacelleCover_mass",
"controls_mass",
"nacelle_cost",
"lowSpeedShaft_cost",
"bearings_cost",
"gearbox_cost",
"generator_cost",
"mechanicalBrakes_cost",
"yawSystem_cost",
"electronicCabling_cost",
"HVAC_cost",
"VSElectronics_cost",
"mainframeTotal_cost",
"nacelleCover_cost",
"controls_cost",
]
return inputs, outputs
def provideJ(self):
self.J = np.array(
[
[
self.d_nacelle_mass_d_r_diameter,
self.d_nacelle_mass_d_r_mass,
self.d_nacelle_mass_d_r_thrust,
self.d_nacelle_mass_d_r_torque,
self.d_nacelle_mass_d_rating,
],
[self.d_lss_mass_d_r_diameter, self.d_lss_mass_d_r_mass, 0.0, self.d_lss_mass_d_r_torque, 0.0],
[self.d_bearings_mass_d_r_diameter, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, self.d_gearbox_mass_d_r_torque, 0.0],
[0.0, 0.0, 0.0, self.d_generator_mass_d_r_torque, self.d_generator_mass_d_rating],
[0.0, 0.0, 0.0, 0.0, self.d_brakes_mass_d_rating],
[self.d_yaw_mass_d_r_diameter, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, self.d_hvac_mass_d_rating],
[0.0, 0.0, 0.0, 0.0, 0.0],
[
self.d_mainframe_mass_d_r_diameter,
self.d_mainframe_mass_d_r_mass,
self.d_mainframe_mass_d_r_thrust,
self.d_mainframe_mass_d_r_torque,
0.0,
],
[0.0, 0.0, 0.0, 0.0, self.d_cover_mass_d_rating],
[0.0, 0.0, 0.0, 0.0, 0.0],
[
self.d_nacelle_cost_d_r_diameter,
self.d_nacelle_cost_d_r_mass,
self.d_nacelle_cost_d_r_thrust,
self.d_nacelle_cost_d_r_torque,
self.d_nacelle_cost_d_rating,
],
[self.d_lss_cost_d_r_diameter, 0.0, 0.0, 0.0, 0.0],
[self.d_bearings_cost_d_r_diameter, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, self.d_gearbox_cost_d_rating],
[0.0, 0.0, 0.0, 0.0, self.d_generator_cost_d_rating],
[0.0, 0.0, 0.0, 0.0, self.d_brakes_cost_d_rating],
[self.d_yaw_cost_d_r_diameter, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, self.d_electronics_cost_d_rating],
[0.0, 0.0, 0.0, 0.0, self.d_hvac_cost_d_rating],
[0.0, 0.0, 0.0, 0.0, self.d_vselectronics_cost_d_rating],
[
self.d_mainframe_cost_d_r_diameter,
self.d_mainframe_cost_d_r_mass,
self.d_mainframe_cost_d_r_thrust,
self.d_mainframe_cost_d_r_torque,
0.0,
],
[0.0, 0.0, 0.0, 0.0, self.d_cover_cost_d_rating],
[0.0, 0.0, 0.0, 0.0, 0.0],
]
)
return self.J
##### Tower
class tower_csm(object):
"""
object to wrap python code for NREL cost and scaling model for a wind turbine tower
"""
def __init__(self):
"""
OpenMDAO object to wrap tower model based of the NREL _cost and Scaling Model data (csmTower.py).
"""
super(tower_csm, self).__init__()
# Outputs
self.tower_cost = 0.0 # Float(0.0, units='USD', iotype='out', desc='cost for a tower')
self.tower_mass = 0.0 # Float(0.0, units='kg', iotype='out', desc='mass for a turbine tower')
def compute(self, rotor_diameter, hub_height, year=2009, month=12, advanced_tower=False):
"""
computes the tower model of the NREL _cost and Scaling Model.
"""
# Variables
self.rotor_diameter = (
rotor_diameter # Float(126.0, units = 'm', iotype='in', desc= 'rotor diameter of the machine')
)
self.hub_height = hub_height # Float(90.0, units = 'm', iotype='in', desc = 'hub height of machine')
# Parameters
self.year = year # Int(2009, iotype='in', desc = 'year of project start')
self.month = month # Int(12, iotype='in', desc = 'month of project start')
self.advanced_tower = advanced_tower # Bool(False, iotype='in', desc = 'advanced tower configuration')
windpactMassSlope = 0.397251147546925
windpactMassInt = -1414.381881
if self.advanced_tower:
windpactMassSlope = 0.269380169
windpactMassInt = 1779.328183
self.tower_mass = (
windpactMassSlope * np.pi * (self.rotor_diameter / 2.0) ** 2 * self.hub_height + windpactMassInt
)
ppi.curr_yr = curr_yr
ppi.curr_mon = curr_mon
twrCostEscalator = 1.5944
twrCostEscalator = ppi.compute("IPPI_TWR")
twrCostCoeff = 1.5 # $/kg
self.towerCost2002 = self.tower_mass * twrCostCoeff
self.tower_cost = self.towerCost2002 * twrCostEscalator
# derivatives
self.d_mass_d_diameter = (
2 * windpactMassSlope * np.pi * (self.rotor_diameter / 2.0) * (1 / 2.0) * self.hub_height
)
self.d_mass_d_hheight = windpactMassSlope * np.pi * (self.rotor_diameter / 2.0) ** 2
self.d_cost_d_diameter = twrCostCoeff * twrCostEscalator * self.d_mass_d_diameter
self.d_cost_d_hheight = twrCostCoeff * twrCostEscalator * self.d_mass_d_hheight
def list_deriv_vars(self):
inputs = ["rotor_diameter", "hub_height"]
outputs = ["tower_mass", "tower_cost"]
return inputs, outputs
def provideJ(self):
self.J = np.array(
[[self.d_mass_d_diameter, self.d_mass_d_hheight], [self.d_cost_d_diameter, self.d_cost_d_hheight]]
)
return self.J
##### Turbine
# -------------------------------------------------------
# Rotor mass adder
class rotor_mass_adder(object):
def __init__(self):
super(rotor_mass_adder, self).__init__()
# Outputs
self.rotor_mass = 0.0 # Float(units='kg', iotype='out', desc= 'overall rotor mass')
def compute(self, blade_mass, hub_system_mass, blade_number=3):
# Variables
self.blade_mass = blade_mass # Float(0.0, units='kg', iotype='in', desc='mass for a single wind turbine blade')
self.hub_system_mass = hub_system_mass # Float(0.0, units='kg', iotype='in', desc='hub system mass')
# Parameters
self.blade_number = blade_number # Int(3, iotype='in', desc='blade numebr')
self.rotor_mass = self.blade_mass * self.blade_number + self.hub_system_mass
self.d_mass_d_blade_mass = self.blade_number
self.d_mass_d_hub_mass = 1.0
def list_deriv_vars(self):
inputs = ["blade_mass", "hub_system_mass"]
outputs = ["rotor_mass"]
return inputs, outputs
def provideJ(self):
self.J = np.array([[self.d_mass_d_blade_mass, self.d_mass_d_hub_mass]])
return self.J
# ------------------------------------------------------------------
class turbine_csm(object):
def __init__(self):
super(turbine_csm, self).__init__()
# Outputs
self.rotor_mass = 0.0 # Float(0.0, units='kg', iotype='out', desc='rotor mass')
self.rotor_cost = 0.0 # Float(0.0, iotype='out', desc='rotor cost')
self.turbine_mass = 0.0 # Float(0.0, units='kg', iotype='out', desc='turbine mass')
self.turbine_cost = (
0.0 # Float(0.0, iotype='out', desc='Overall wind turbine capial costs including transportation costs')
)
def compute(
self,
blade_cost,
blade_mass,
hub_system_cost,
hub_system_mass,
nacelle_mass,
nacelle_cost,
tower_cost,
tower_mass,
blade_number=3,
offshore=True,
):
"""
compute Turbine Capital _costs Model of the NREL _cost and Scaling Model.
"""
# Variables
self.blade_cost = (
blade_cost # Float(0.0, units='USD', iotype='in', desc='cost for a single wind turbine blade')
)
self.blade_mass = blade_mass # Float(0.0, units='kg', iotype='in', desc='mass for a single wind turbine blade')
self.hub_system_cost = hub_system_cost # Float(0.0, units='USD', iotype='in', desc='hub system cost')
self.hub_system_mass = hub_system_mass # Float(0.0, units='kg', iotype='in', desc='hub system mass')
self.nacelle_mass = nacelle_mass # Float(0.0, units='kg', iotype='in', desc='nacelle mass')
self.nacelle_cost = nacelle_cost # Float(0.0, units='USD', iotype='in', desc='nacelle cost')
self.tower_cost = tower_cost # Float(0.0, units='USD', iotype='in', desc='cost for a tower')
self.tower_mass = tower_mass # Float(0.0, units='kg', iotype='in', desc='mass for a turbine tower')
# Parameters (and ignored inputs)
self.blade_number = blade_number # Int(3, iotype='in', desc = 'number of rotor blades')
self.offshore = offshore # Bool(False, iotype='in', desc= 'boolean for offshore')
# high level output assignment
self.rotor_mass = self.blade_mass * self.blade_number + self.hub_system_mass
self.rotor_cost = self.blade_cost * self.blade_number + self.hub_system_cost
self.turbine_mass = self.rotor_mass + self.nacelle_mass + self.tower_mass
self.turbine_cost = self.rotor_cost + self.nacelle_cost + self.tower_cost
if self.offshore:
self.turbine_cost *= 1.1
# derivatives
self.d_mass_d_blade_mass = self.blade_number
self.d_mass_d_hub_mass = 1.0
self.d_mass_d_nacelle_mass = 1.0
self.d_mass_d_tower_mass = 1.0
if self.offshore:
self.d_cost_d_blade_cost = 1.1 * self.blade_number
self.d_cost_d_hub_cost = 1.1
self.d_cost_d_nacelle_cost = 1.1
self.d_cost_d_tower_cost = 1.1
else:
self.d_cost_d_blade_cost = self.blade_number
self.d_cost_d_hub_cost = 1.0
self.d_cost_d_nacelle_cost = 1.0
self.d_cost_d_tower_cost = 1.0
def list_deriv_vars(self):
inputs = [
"blade_mass",
"hub_system_mass",
"nacelle_mass",
"tower_mass",
"blade_cost",
"hub_system_cost",
"nacelle_cost",
"tower_cost",
]
outputs = ["turbine_mass", "turbine_cost"]
return inputs, outputs
def provideJ(self):
self.J = np.array(
[
[
self.d_mass_d_blade_mass,
self.d_mass_d_hub_mass,
self.d_mass_d_nacelle_mass,
self.d_mass_d_tower_mass,
0.0,
0.0,
0.0,
0.0,
],
[
0.0,
0.0,
0.0,
0.0,
self.d_cost_d_blade_cost,
self.d_cost_d_hub_cost,
self.d_cost_d_nacelle_cost,
self.d_cost_d_tower_cost,
],
]
)
return self.J
# --------------------------------------------------------------------
class tcc_csm(object):
def __init__(self):
super(tcc_csm, self).__init__() # will actually run the workflow
# Outputs
self.turbine_cost = (
0.0 # Float(0.0, iotype='out', desc='Overall wind turbine capial costs including transportation costs')
)
self.rotor_cost = 0.0 # Float(0.0, iotype='out', desc='Rotor cost')
self.nacelle_cost = 0.0 # Float(0.0, iotype='out', desc='Nacelle cost')
self.tower_cost = 0.0 # Float(0.0, iotype='out', desc='Tower cost')
def compute(
self,
rotor_diameter,
machine_rating,
hub_height,
rotor_thrust,
rotor_torque,
year=2009,
month=12,
blade_number=3,
offshore=True,
advanced_blade=False,
drivetrain_design="geared",
crane=True,
advanced_bedplate=0,
advanced_tower=False,
):
# Variables
self.rotor_diameter = rotor_diameter # Float(units = 'm', iotype='in', desc= 'rotor diameter of the machine')
self.machine_rating = machine_rating # Float(units = 'kW', iotype='in', desc = 'rated power of wind turbine')
self.hub_height = (
hub_height # Float(units = 'm', iotype='in', desc= 'hub height of wind turbine above ground / sea level')
)
self.rotor_thrust = rotor_thrust # Float(iotype='in', units='N', desc='maximum thurst from rotor')
self.rotor_torque = rotor_torque # Float(iotype='in', units='N * m', desc = 'torque from rotor at rated power')
# Parameters
self.year = year # Int(2009, iotype='in', desc = 'year of project start')
self.month = month # Int(12, iotype='in', desc = 'month of project start')
self.blade_number = blade_number # Int(3, iotype='in', desc = 'number of rotor blades')
self.offshore = offshore # Bool(True, iotype='in', desc = 'boolean for offshore')
self.advanced_blade = (
advanced_blade # Bool(False, iotype='in', desc = 'boolean for use of advanced blade curve')
)
self.drivetrain_design = drivetrain_design # Enum('geared', ('geared', 'single_stage', 'multi_drive', 'pm_direct_drive'), iotype='in')
self.crane = crane # Bool(True, iotype='in', desc = 'boolean for presence of a service crane up tower')
self.advanced_bedplate = (
advanced_bedplate # Int(0, iotype='in', desc= 'indicator for drivetrain bedplate design 0 - conventional')
)
self.advanced_tower = advanced_tower # Bool(False, iotype='in', desc = 'advanced tower configuration')
blade = blades_csm()
blade.compute(rotor_diameter, year, month, advanced_blade)
hub = hub_csm()
hub.compute(rotor_diameter, blade.blade_mass, year, month, blade_number)
rotor = rotor_mass_adder()
rotor.compute(blade.blade_mass, hub.hub_system_mass, blade_number)
nacelle = nacelle_csm()
nacelle.compute(
rotor_diameter,
rotor.rotor_mass,
rotor_thrust,
rotor_torque,
machine_rating,
drivetrain_design,
crane,
advanced_bedplate,
year,
month,
offshore,
)
tower = tower_csm()
tower.compute(rotor_diameter, hub_height, year, month, advanced_tower)
turbine = turbine_csm()
turbine.compute(
blade.blade_cost,
blade.blade_mass,
hub.hub_system_cost,
hub.hub_system_mass,
nacelle.nacelle_mass,
nacelle.nacelle_cost,
tower.tower_cost,
tower.tower_mass,
blade_number,
offshore,
)
self.rotor_cost = turbine.rotor_cost
self.rotor_mass = turbine.rotor_mass
self.turbine_cost = turbine.turbine_cost
self.turbine_mass = turbine.turbine_mass
# Balance of System Costs
##################################################
class bos_csm(object):
def __init__(self):
# Outputs
# bos_breakdown = VarTree(BOSVarTree(), iotype='out', desc='BOS cost breakdown')
# bos_costs = Float(iotype='out', desc='Overall wind plant balance of station/system costs up to point of comissioning')
self.bos_costs = 0.0 # *= self.multiplier # TODO: add to gradients
self.bos_breakdown_development_costs = 0.0 # engPermits_costs * self.turbine_number
self.bos_breakdown_preparation_and_staging_costs = (
0.0 # (roadsCivil_costs + portStaging_costs) * self.turbine_number
)
self.bos_breakdown_transportation_costs = 0.0 # (transportation_costs * self.turbine_number)
self.bos_breakdown_foundation_and_substructure_costs = 0.0 # foundation_cost * self.turbine_number
self.bos_breakdown_electrical_costs = 0.0 # electrical_costs * self.turbine_number
self.bos_breakdown_assembly_and_installation_costs = 0.0 # installation_costs * self.turbine_number
self.bos_breakdown_soft_costs = 0.0 # 0.0
self.bos_breakdown_other_costs = 0.0 # (pai_costs + scour_costs + suretyBond) * self.turbine_number
def compute(
self,
machine_rating,
rotor_diameter,
hub_height,
RNA_mass,
turbine_cost,
turbine_number=100,
sea_depth=20.0,
year=2009,
month=12,
multiplier=1.0,
):
# for coding ease
# Default Variables
self.machine_rating = machine_rating # Float(iotype='in', units='kW', desc='turbine machine rating')
self.rotor_diameter = rotor_diameter # Float(iotype='in', units='m', desc='rotor diameter')
self.hub_height = hub_height # Float(iotype='in', units='m', desc='hub height')
self.RNA_mass = RNA_mass # Float(iotype='in', units='kg', desc='Rotor Nacelle Assembly mass')
self.turbine_cost = turbine_cost # Float(iotype='in', units='USD', desc='Single Turbine Capital _costs')
# Parameters
self.turbine_number = turbine_number # Int(iotype='in', desc='number of turbines in project')
self.sea_depth = (
sea_depth # Float(20.0, units = 'm', iotype = 'in', desc = 'sea depth for offshore wind plant')
)
self.year = year # Int(2009, iotype='in', desc='year for project start')
self.month = month # Int(12, iotype = 'in', desc= 'month for project start')
self.multiplier = multiplier # Float(1.0, iotype='in')
lPrmtsCostCoeff1 = 9.94e-04
lPrmtsCostCoeff2 = 20.31
oPrmtsCostFactor = 37.0 # $/kW (2003)
scourCostFactor = 55.0 # $/kW (2003)
ptstgCostFactor = 20.0 # $/kW (2003)
ossElCostFactor = 260.0 # $/kW (2003) shallow
ostElCostFactor = 290.0 # $/kW (2003) transitional
ostSTransFactor = 25.0 # $/kW (2003)
ostTTransFactor = 77.0 # $/kW (2003)
osInstallFactor = 100.0 # $/kW (2003) shallow & trans
suppInstallFactor = 330.0 # $/kW (2003) trans additional
paiCost = 60000.0 # per turbine
suretyBRate = 0.03 # 3% of ICC
suretyBond = 0.0
# set variables
if self.sea_depth == 0: # type of plant # 1: Land, 2: < 30m, 3: < 60m, 4: >= 60m
iDepth = 1
elif self.sea_depth < 30:
iDepth = 2
elif self.sea_depth < 60:
iDepth = 3
else:
iDepth = 4
# initialize self.ppi index calculator
if iDepth == 1:
ref_yr = 2002
ref_mon = 9
else:
ref_yr = 2003
ref_mon = 9
ppi.ref_yr = ref_yr
ppi.ref_mon = ref_mon
ppi.curr_yr = self.year
ppi.curr_mon = self.month
self.d_foundation_d_diameter = 0.0
self.d_foundation_d_hheight = 0.0
self.d_foundation_d_rating = 0.0
# foundation costs
if iDepth == 1: # land
fcCoeff = 303.23
fcExp = 0.4037
SweptArea = (self.rotor_diameter * 0.5) ** 2.0 * np.pi
foundation_cost = fcCoeff * (self.hub_height * SweptArea) ** fcExp
fndnCostEscalator = ppi.compute("IPPI_FND")
self.d_foundation_d_diameter = (
fndnCostEscalator
* fcCoeff
* fcExp
* ((self.hub_height * (2.0 * 0.5 * (self.rotor_diameter * 0.5) * np.pi)) ** (fcExp - 1))
* self.hub_height
)
self.d_foundation_d_hheight = (
fndnCostEscalator * fcCoeff * fcExp * ((self.hub_height * SweptArea) ** (fcExp - 1)) * SweptArea
)
elif iDepth == 2:
sscf = 300.0 # $/kW
foundation_cost = sscf * self.machine_rating
fndnCostEscalator = ppi.compute("IPPI_MPF")
self.d_foundation_d_rating = fndnCostEscalator * sscf
elif iDepth == 3:
sscf = 450.0 # $/kW
foundation_cost = sscf * self.machine_rating
fndnCostEscalator = ppi.compute("IPPI_OAI")
self.d_foundation_d_rating = fndnCostEscalator * sscf
elif iDepth == 4:
foundation_cost = 0.0
fndnCostEscalator = 1.0
foundation_cost *= fndnCostEscalator
# cost calculations
tpC1 = 0.00001581
tpC2 = -0.0375
tpInt = 54.7
tFact = tpC1 * self.machine_rating * self.machine_rating + tpC2 * self.machine_rating + tpInt
roadsCivil_costs = 0.0
portStaging_costs = 0.0
pai_costs = 0.0
scour_costs = 0.0
self.d_assembly_d_diameter = 0.0
self.d_assembly_d_hheight = 0.0
self.d_development_d_rating = 0.0
self.d_preparation_d_rating = 0.0
self.d_transport_d_rating = 0.0
self.d_electrical_d_rating = 0.0
self.d_assembly_d_rating = 0.0
self.d_other_d_rating = 0.0
if iDepth == 1:
engPermits_costs = (lPrmtsCostCoeff1 * self.machine_rating * self.machine_rating) + (
lPrmtsCostCoeff2 * self.machine_rating
)
ppi.ref_mon = 3
engPermits_costs *= ppi.compute("IPPI_LPM")
self.d_development_d_rating = ppi.compute("IPPI_LPM") * (
2.0 * lPrmtsCostCoeff1 * self.machine_rating + lPrmtsCostCoeff2
)
ppi.ref_mon = 9
elC1 = 3.49e-06
elC2 = -0.0221
elInt = 109.7
eFact = elC1 * self.machine_rating * self.machine_rating + elC2 * self.machine_rating + elInt
electrical_costs = self.machine_rating * eFact * ppi.compute("IPPI_LEL")
self.d_electrical_d_rating = ppi.compute("IPPI_LEL") * (
3.0 * elC1 * self.machine_rating ** 2.0 + 2.0 * elC2 * self.machine_rating + elInt
)
rcC1 = 2.17e-06
rcC2 = -0.0145
rcInt = 69.54
rFact = rcC1 * self.machine_rating * self.machine_rating + rcC2 * self.machine_rating + rcInt
roadsCivil_costs = self.machine_rating * rFact * ppi.compute("IPPI_RDC")
self.d_preparation_d_rating = ppi.compute("IPPI_RDC") * (
3.0 * rcC1 * self.machine_rating ** 2.0 + 2.0 * rcC2 * self.machine_rating + rcInt
)
iCoeff = 1.965
iExp = 1.1736
installation_costs = iCoeff * ((self.hub_height * self.rotor_diameter) ** iExp) * ppi.compute("IPPI_LAI")
self.d_assembly_d_diameter = (
iCoeff
* ((self.hub_height * self.rotor_diameter) ** (iExp - 1))
* self.hub_height
* ppi.compute("IPPI_LAI")
)
self.d_assembly_d_hheight = (
iCoeff
* ((self.hub_height * self.rotor_diameter) ** (iExp - 1))
* self.rotor_diameter
* ppi.compute("IPPI_LAI")
)
transportation_costs = self.machine_rating * tFact * ppi.compute("IPPI_TPT")
self.d_transport_d_rating = ppi.compute("IPPI_TPT") * (
tpC1 * 3.0 * self.machine_rating ** 2.0 + tpC2 * 2.0 * self.machine_rating + tpInt
)
elif iDepth == 2: # offshore shallow
ppi.ref_yr = 2003
pai_costs = paiCost * ppi.compute("IPPI_PAE")
portStaging_costs = ptstgCostFactor * self.machine_rating * ppi.compute("IPPI_STP") # 1.415538133
self.d_preparation_d_rating = ptstgCostFactor * ppi.compute("IPPI_STP")
engPermits_costs = oPrmtsCostFactor * self.machine_rating * ppi.compute("IPPI_OPM")
self.d_development_d_rating = oPrmtsCostFactor * ppi.compute("IPPI_OPM")
scour_costs = scourCostFactor * self.machine_rating * ppi.compute("IPPI_STP") # 1.415538133#
self.d_other_d_rating = scourCostFactor * ppi.compute("IPPI_STP")
installation_costs = osInstallFactor * self.machine_rating * ppi.compute("IPPI_OAI")
self.d_assembly_d_rating = osInstallFactor * ppi.compute("IPPI_OAI")
electrical_costs = ossElCostFactor * self.machine_rating * ppi.compute("IPPI_OEL")
self.d_electrical_d_rating = ossElCostFactor * ppi.compute("IPPI_OEL")
ppi.ref_yr = 2002
transportation_costs = self.machine_rating * tFact * ppi.compute("IPPI_TPT")
self.d_transport_d_rating = ppi.compute("IPPI_TPT") * (
tpC1 * 3.0 * self.machine_rating ** 2.0 + tpC2 * 2.0 * self.machine_rating + tpInt
)
ppi.ref_yr = 2003
elif iDepth == 3: # offshore transitional depth
ppi.ref_yr = 2003
turbInstall = osInstallFactor * self.machine_rating * ppi.compute("IPPI_OAI")
supportInstall = suppInstallFactor * self.machine_rating * ppi.compute("IPPI_OAI")
installation_costs = turbInstall + supportInstall
self.d_assembly_d_rating = (osInstallFactor + suppInstallFactor) * ppi.compute("IPPI_OAI")
pai_costs = paiCost * ppi.compute("IPPI_PAE")
electrical_costs = ostElCostFactor * self.machine_rating * ppi.compute("IPPI_OEL")
self.d_electrical_d_rating = ossElCostFactor * ppi.compute("IPPI_OEL")
portStaging_costs = ptstgCostFactor * self.machine_rating * ppi.compute("IPPI_STP")
self.d_preparation_d_rating = ptstgCostFactor * ppi.compute("IPPI_STP")
engPermits_costs = oPrmtsCostFactor * self.machine_rating * ppi.compute("IPPI_OPM")
self.d_development_d_rating = oPrmtsCostFactor * ppi.compute("IPPI_OPM")
scour_costs = scourCostFactor * self.machine_rating * ppi.compute("IPPI_STP")
self.d_other_d_rating = scourCostFactor * ppi.compute("IPPI_STP")
ppi.ref_yr = 2002
turbTrans = ostTTransFactor * self.machine_rating * ppi.compute("IPPI_TPT")
self.d_transport_d_rating = ostTTransFactor * ppi.compute("IPPI_TPT")
ppi.ref_yr = 2003
supportTrans = ostSTransFactor * self.machine_rating * ppi.compute("IPPI_OAI")
transportation_costs = turbTrans + supportTrans
self.d_transport_d_rating += ostSTransFactor * ppi.compute("IPPI_OAI")
elif iDepth == 4: # offshore deep
print("\ncsmBOS: Add costCat 4 code\n\n")
bos_costs = (
foundation_cost
+ transportation_costs
+ roadsCivil_costs
+ portStaging_costs
+ installation_costs
+ electrical_costs
+ engPermits_costs
+ pai_costs
+ scour_costs
)
self.d_other_d_tcc = 0.0
if self.sea_depth > 0.0:
suretyBond = suretyBRate * (self.turbine_cost + bos_costs)
self.d_other_d_tcc = suretyBRate
d_surety_d_rating = suretyBRate * (
self.d_development_d_rating
+ self.d_preparation_d_rating
+ self.d_transport_d_rating
+ self.d_foundation_d_rating
+ self.d_electrical_d_rating
+ self.d_assembly_d_rating
+ self.d_other_d_rating
)
self.d_other_d_rating += d_surety_d_rating
else:
suretyBond = 0.0
self.bos_costs = self.turbine_number * (bos_costs + suretyBond)
self.bos_costs *= self.multiplier # TODO: add to gradients
self.bos_breakdown_development_costs = engPermits_costs * self.turbine_number
self.bos_breakdown_preparation_and_staging_costs = (roadsCivil_costs + portStaging_costs) * self.turbine_number
self.bos_breakdown_transportation_costs = transportation_costs * self.turbine_number
self.bos_breakdown_foundation_and_substructure_costs = foundation_cost * self.turbine_number
self.bos_breakdown_electrical_costs = electrical_costs * self.turbine_number
self.bos_breakdown_assembly_and_installation_costs = installation_costs * self.turbine_number
self.bos_breakdown_soft_costs = 0.0
self.bos_breakdown_other_costs = (pai_costs + scour_costs + suretyBond) * self.turbine_number
# derivatives
self.d_development_d_rating *= self.turbine_number
self.d_preparation_d_rating *= self.turbine_number
self.d_transport_d_rating *= self.turbine_number
self.d_foundation_d_rating *= self.turbine_number
self.d_electrical_d_rating *= self.turbine_number
self.d_assembly_d_rating *= self.turbine_number
self.d_soft_d_rating = 0.0
self.d_other_d_rating *= self.turbine_number
self.d_cost_d_rating = (
self.d_development_d_rating
+ self.d_preparation_d_rating
+ self.d_transport_d_rating
+ self.d_foundation_d_rating
+ self.d_electrical_d_rating
+ self.d_assembly_d_rating
+ self.d_soft_d_rating
+ self.d_other_d_rating
)
self.d_development_d_diameter = 0.0
self.d_preparation_d_diameter = 0.0
self.d_transport_d_diameter = 0.0
# self.d_foundation_d_diameter
self.d_electrical_d_diameter = 0.0
# self.d_assembly_d_diameter
self.d_soft_d_diameter = 0.0
self.d_other_d_diameter = 0.0
self.d_cost_d_diameter = (
self.d_development_d_diameter
+ self.d_preparation_d_diameter
+ self.d_transport_d_diameter
+ self.d_foundation_d_diameter
+ self.d_electrical_d_diameter
+ self.d_assembly_d_diameter
+ self.d_soft_d_diameter
+ self.d_other_d_diameter
)
self.d_development_d_tcc = 0.0
self.d_preparation_d_tcc = 0.0
self.d_transport_d_tcc = 0.0
self.d_foundation_d_tcc = 0.0
self.d_electrical_d_tcc = 0.0
self.d_assembly_d_tcc = 0.0
self.d_soft_d_tcc = 0.0
self.d_other_d_tcc *= self.turbine_number
self.d_cost_d_tcc = (
self.d_development_d_tcc
+ self.d_preparation_d_tcc
+ self.d_transport_d_tcc
+ self.d_foundation_d_tcc
+ self.d_electrical_d_tcc
+ self.d_assembly_d_tcc
+ self.d_soft_d_tcc
+ self.d_other_d_tcc
)
self.d_development_d_hheight = 0.0
self.d_preparation_d_hheight = 0.0
self.d_transport_d_hheight = 0.0
# self.d_foundation_d_hheight
self.d_electrical_d_hheight = 0.0
# self.d_assembly_d_hheight
self.d_soft_d_hheight = 0.0
self.d_other_d_hheight = 0.0
self.d_cost_d_hheight = (
self.d_development_d_hheight
+ self.d_preparation_d_hheight
+ self.d_transport_d_hheight
+ self.d_foundation_d_hheight
+ self.d_electrical_d_hheight
+ self.d_assembly_d_hheight
+ self.d_soft_d_hheight
+ self.d_other_d_hheight
)
self.d_development_d_rna = 0.0
self.d_preparation_d_rna = 0.0
self.d_transport_d_rna = 0.0
self.d_foundation_d_rna = 0.0
self.d_electrical_d_rna = 0.0
self.d_assembly_d_rna = 0.0
self.d_soft_d_rna = 0.0
self.d_other_d_rna = 0.0
self.d_cost_d_rna = (
self.d_development_d_rna
+ self.d_preparation_d_rna
+ self.d_transport_d_rna
+ self.d_foundation_d_rna
+ self.d_electrical_d_rna
+ self.d_assembly_d_rna
+ self.d_soft_d_rna
+ self.d_other_d_rna
)
def list_deriv_vars(self):
inputs = ["machine_rating", "rotor_diameter", "turbine_cost", "hub_height", "RNA_mass"]
outputs = [
"bos_breakdown.development_costs",
"bos_breakdown.preparation_and_staging_costs",
"bos_breakdown.transportation_costs",
"bos_breakdown.foundation_and_substructure_costs",
"bos_breakdown.electrical_costs",
"bos_breakdown.assembly_and_installation_costs",
"bos_breakdown.soft_costs",
"bos_breakdown.other_costs",
"bos_costs",
]
return inputs, outputs
def provideJ(self):
self.J = np.array(
[
[
self.d_development_d_rating,
self.d_development_d_diameter,
self.d_development_d_tcc,
self.d_development_d_hheight,
self.d_development_d_rna,
],
[
self.d_preparation_d_rating,
self.d_preparation_d_diameter,
self.d_preparation_d_tcc,
self.d_preparation_d_hheight,
self.d_preparation_d_rna,
],
[
self.d_transport_d_rating,
self.d_transport_d_diameter,
self.d_transport_d_tcc,
self.d_transport_d_hheight,
self.d_transport_d_rna,
],
[
self.d_foundation_d_rating,
self.d_foundation_d_diameter,
self.d_foundation_d_tcc,
self.d_foundation_d_hheight,
self.d_foundation_d_rna,
],
[
self.d_electrical_d_rating,
self.d_electrical_d_diameter,
self.d_electrical_d_tcc,
self.d_electrical_d_hheight,
self.d_electrical_d_rna,
],
[
self.d_assembly_d_rating,
self.d_assembly_d_diameter,
self.d_assembly_d_tcc,
self.d_assembly_d_hheight,
self.d_assembly_d_rna,
],
[
self.d_soft_d_rating,
self.d_soft_d_diameter,
self.d_soft_d_tcc,
self.d_soft_d_hheight,
self.d_soft_d_rna,
],
[
self.d_other_d_rating,
self.d_other_d_diameter,
self.d_other_d_tcc,
self.d_other_d_hheight,
self.d_other_d_rna,
],
[
self.d_cost_d_rating,
self.d_cost_d_diameter,
self.d_cost_d_tcc,
self.d_cost_d_hheight,
self.d_cost_d_rna,
],
]
)
return self.J
# Operational Expenditures
##################################################
class opex_csm(object):
def __init__(self):
# variables
# Outputs
self.avg_annual_opex = 0.0
# self.opex_breakdown = VarTree(OPEXVarTree(),iotype='out')
self.opex_breakdown_preventative_opex = 0.0
self.opex_breakdown_corrective_opex = 0.0
self.opex_breakdown_lease_opex = 0.0
self.opex_breakdown_other_opex = 0.0
def compute(self, sea_depth, year, month, turbine_number, machine_rating, net_aep):
# initialize variables
if sea_depth == 0:
offshore = False
else:
offshore = True
ppi.curr_yr = year
ppi.curr_mon = month
# O&M
offshoreCostFactor = 0.0200 # $/kwH
landCostFactor = 0.0070 # $/kwH
if not offshore: # kld - place for an error check - iShore should be in 1:4
cost = net_aep * landCostFactor
costEscalator = ppi.compute("IPPI_LOM")
else:
cost = net_aep * offshoreCostFactor
ppi.ref_yr = 2003
costEscalator = ppi.compute("IPPI_OOM")
ppi.ref_yr = 2002
self.opex_breakdown_preventative_opex = cost * costEscalator # in $/year
# LRC
if not offshore:
lrcCF = 10.70 # land based
costlrcEscFactor = ppi.compute("IPPI_LLR")
else: # TODO: transition and deep water options if applicable
lrcCF = 17.00 # offshore
ppi.ref_yr = 2003
costlrcEscFactor = ppi.compute("IPPI_OLR")
ppi.ref_yr = 2002
self.opex_breakdown_corrective_opex = machine_rating * lrcCF * costlrcEscFactor * turbine_number # in $/yr
# LLC
if not offshore:
leaseCF = 0.00108 # land based
costlandEscFactor = ppi.compute("IPPI_LSE")
else: # TODO: transition and deep water options if applicable
leaseCF = 0.00108 # offshore
costlandEscFactor = ppi.compute("IPPI_LSE")
self.opex_breakdown_lease_opex = net_aep * leaseCF * costlandEscFactor # in $/yr
# Other
self.opex_breakdown_other_opex = 0.0
# Total OPEX
self.avg_annual_opex = (
self.opex_breakdown_preventative_opex + self.opex_breakdown_corrective_opex + self.opex_breakdown_lease_opex
)
def compute_partials(self):
# dervivatives
self.d_corrective_d_aep = 0.0
self.d_corrective_d_rating = lrcCF * costlrcEscFactor * self.turbine_number
self.d_lease_d_aep = leaseCF * costlandEscFactor
self.d_lease_d_rating = 0.0
self.d_other_d_aep = 0.0
self.d_other_d_rating = 0.0
if not offshore:
self.d_preventative_d_aep = landCostFactor * costEscalator
else:
self.d_preventative_d_aep = offshoreCostFactor * costEscalator
self.d_preventative_d_rating = 0.0
self.d_opex_d_aep = (
self.d_preventative_d_aep + self.d_corrective_d_aep + self.d_lease_d_aep + self.d_other_d_aep
)
self.d_opex_d_rating = (
self.d_preventative_d_rating + self.d_corrective_d_rating + self.d_lease_d_rating + self.d_other_d_rating
)
self.J = np.array(
[
[self.d_preventative_d_aep, self.d_preventative_d_rating],
[self.d_corrective_d_aep, self.d_corrective_d_rating],
[self.d_lease_d_aep, self.d_lease_d_rating],
[self.d_other_d_aep, self.d_other_d_rating],
[self.d_opex_d_aep, self.d_opex_d_rating],
]
)
return self.J
# NREL Cost and Scaling Model finance modules
##################################################
class fin_csm(object):
def __init__(
self,
fixed_charge_rate=0.12,
construction_finance_rate=0.0,
tax_rate=0.4,
discount_rate=0.07,
construction_time=1.0,
project_lifetime=20.0,
):
"""
OpenMDAO component to wrap finance model of the NREL _cost and Scaling Model (csmFinance.py)
"""
super(fin_csm, self).__init__()
# Outputs
self.coe = 0.0 # Float(iotype='out', desc='Levelized cost of energy for the wind plant')
self.lcoe = 0.0 # Float(iotype='out', desc='_cost of energy - unlevelized')
# parameters
self.fixed_charge_rate = (
fixed_charge_rate # Float(0.12, iotype = 'in', desc = 'fixed charge rate for coe calculation')
)
self.construction_finance_rate = construction_finance_rate # Float(0.00, iotype='in', desc = 'construction financing rate applied to overnight capital costs')
self.tax_rate = tax_rate # Float(0.4, iotype = 'in', desc = 'tax rate applied to operations')
self.discount_rate = discount_rate # Float(0.07, iotype = 'in', desc = 'applicable project discount rate')
self.construction_time = (
construction_time # Float(1.0, iotype = 'in', desc = 'number of years to complete project construction')
)
self.project_lifetime = (
project_lifetime # Float(20.0, iotype = 'in', desc = 'project lifetime for LCOE calculation')
)
def compute(self, turbine_cost, turbine_number, bos_costs, avg_annual_opex, net_aep, sea_depth):
"""
Executes finance model of the NREL _cost and Scaling model to determine overall plant COE and LCOE.
"""
# Inputs
self.turbine_cost = turbine_cost # Float(iotype='in', desc = 'A Wind Turbine Capital _cost')
self.turbine_number = turbine_number # Int(iotype = 'in', desc = 'number of turbines at plant')
self.bos_costs = bos_costs # Float(iotype='in', desc='A Wind Plant Balance of Station _cost Model')
self.avg_annual_opex = avg_annual_opex # Float(iotype='in', desc='A Wind Plant Operations Expenditures Model')
self.net_aep = net_aep # Float(iotype='in', desc='A Wind Plant Annual Energy Production Model', units='kW*h')
self.sea_depth = sea_depth
if self.sea_depth > 0.0:
offshore = True
else:
offshore = False
if offshore:
warrantyPremium = (self.turbine_cost * self.turbine_number / 1.10) * 0.15
icc = self.turbine_cost * self.turbine_number + warrantyPremium + self.bos_costs
else:
icc = self.turbine_cost * self.turbine_number + self.bos_costs
# compute COE and LCOE values
self.coe = (icc * self.fixed_charge_rate / self.net_aep) + (self.avg_annual_opex) * (
1 - self.tax_rate
) / self.net_aep
amortFactor = (1 + 0.5 * ((1 + self.discount_rate) ** self.construction_time - 1)) * (
self.discount_rate / (1 - (1 + self.discount_rate) ** (-1.0 * self.project_lifetime))
)
self.lcoe = (icc * amortFactor + self.avg_annual_opex) / self.net_aep
# derivatives
if offshore:
self.d_coe_d_turbine_cost = (
self.turbine_number * (1 + 0.15 / 1.10) * self.fixed_charge_rate
) / self.net_aep
else:
self.d_coe_d_turbine_cost = self.turbine_number * self.fixed_charge_rate / self.net_aep
self.d_coe_d_bos_cost = self.fixed_charge_rate / self.net_aep
self.d_coe_d_avg_opex = (1 - self.tax_rate) / self.net_aep
self.d_coe_d_net_aep = -(icc * self.fixed_charge_rate + self.avg_annual_opex * (1 - self.tax_rate)) / (
self.net_aep ** 2
)
if offshore:
self.d_lcoe_d_turbine_cost = self.turbine_number * (1 + 0.15 / 1.10) * amortFactor / self.net_aep
else:
self.d_lcoe_d_turbine_cost = self.turbine_number * amortFactor / self.net_aep
self.d_lcoe_d_bos_cost = amortFactor / self.net_aep
self.d_lcoe_d_avg_opex = 1.0 / self.net_aep
self.d_lcoe_d_net_aep = -(icc * amortFactor + self.avg_annual_opex) / (self.net_aep ** 2)
def list_deriv_vars(self):
inputs = ["turbine_cost", "bos_costs", "avg_annual_opex", "net_aep"]
outputs = ["coe", "lcoe"]
return inputs, outputs
def provideJ(self):
# Jacobian
self.J = np.array(
[
[self.d_coe_d_turbine_cost, self.d_coe_d_bos_cost, self.d_coe_d_avg_opex, self.d_coe_d_net_aep],
[self.d_lcoe_d_turbine_cost, self.d_lcoe_d_bos_cost, self.d_lcoe_d_avg_opex, self.d_lcoe_d_net_aep],
]
)
return self.J
"""if __name__=="__main__":
### TODO: Examples
"""
| [
"wisdem.commonse.utilities.smooth_min",
"math.exp",
"wisdem.nrelcsm.csmPPI.PPI",
"numpy.zeros",
"wisdem.commonse.utilities.smooth_abs",
"math.gamma",
"numpy.array",
"numpy.diag",
"numpy.sqrt"
] | [((462, 501), 'wisdem.nrelcsm.csmPPI.PPI', 'PPI', (['ref_yr', 'ref_mon', 'curr_yr', 'curr_mon'], {}), '(ref_yr, ref_mon, curr_yr, curr_mon)\n', (465, 501), False, 'from wisdem.nrelcsm.csmPPI import PPI\n'), ((2518, 2531), 'numpy.zeros', 'np.zeros', (['(161)'], {}), '(161)\n', (2526, 2531), True, 'import numpy as np\n'), ((2638, 2651), 'numpy.zeros', 'np.zeros', (['(161)'], {}), '(161)\n', (2646, 2651), True, 'import numpy as np\n'), ((7466, 7479), 'numpy.array', 'np.array', (['mtp'], {}), '(mtp)\n', (7474, 7479), True, 'import numpy as np\n'), ((9325, 9343), 'math.exp', 'exp', (['(-(X / L) ** K)'], {}), '(-(X / L) ** K)\n', (9328, 9343), False, 'from math import pi, gamma, exp\n'), ((11719, 11754), 'numpy.array', 'np.array', (['[wind_curve, power_curve]'], {}), '([wind_curve, power_curve])\n', (11727, 11754), True, 'import numpy as np\n'), ((12648, 12661), 'numpy.zeros', 'np.zeros', (['(161)'], {}), '(161)\n', (12656, 12661), True, 'import numpy as np\n'), ((13523, 13549), 'wisdem.commonse.utilities.smooth_abs', 'smooth_abs', (['Pbar0'], {'dx': '(0.01)'}), '(Pbar0, dx=0.01)\n', (13533, 13549), False, 'from wisdem.commonse.utilities import smooth_abs, smooth_min, hstack\n'), ((13663, 13702), 'wisdem.commonse.utilities.smooth_min', 'smooth_min', (['Pbar1', '(1.0)'], {'pct_offset': '(0.01)'}), '(Pbar1, 1.0, pct_offset=0.01)\n', (13673, 13702), False, 'from wisdem.commonse.utilities import smooth_abs, smooth_min, hstack\n'), ((18952, 19014), 'numpy.array', 'np.array', (['[[self.d_mass_d_diameter], [self.d_cost_d_diameter]]'], {}), '([[self.d_mass_d_diameter], [self.d_cost_d_diameter]])\n', (18960, 19014), True, 'import numpy as np\n'), ((24139, 24700), 'numpy.array', 'np.array', (['[[self.d_hub_mass_d_diameter, self.d_hub_mass_d_blade_mass], [self.\n d_pitch_mass_d_diameter, self.d_pitch_mass_d_blade_mass], [self.\n d_spinner_mass_d_diameter, self.d_spinner_mass_d_blade_mass], [self.\n d_system_mass_d_diameter, self.d_system_mass_d_blade_mass], [self.\n d_hub_cost_d_diameter, self.d_hub_cost_d_blade_mass], [self.\n d_pitch_cost_d_diameter, self.d_pitch_cost_d_blade_mass], [self.\n d_spinner_cost_d_diameter, self.d_spinner_cost_d_blade_mass], [self.\n d_system_cost_d_diameter, self.d_system_cost_d_blade_mass]]'], {}), '([[self.d_hub_mass_d_diameter, self.d_hub_mass_d_blade_mass], [self\n .d_pitch_mass_d_diameter, self.d_pitch_mass_d_blade_mass], [self.\n d_spinner_mass_d_diameter, self.d_spinner_mass_d_blade_mass], [self.\n d_system_mass_d_diameter, self.d_system_mass_d_blade_mass], [self.\n d_hub_cost_d_diameter, self.d_hub_cost_d_blade_mass], [self.\n d_pitch_cost_d_diameter, self.d_pitch_cost_d_blade_mass], [self.\n d_spinner_cost_d_diameter, self.d_spinner_cost_d_blade_mass], [self.\n d_system_cost_d_diameter, self.d_system_cost_d_blade_mass]])\n', (24147, 24700), True, 'import numpy as np\n'), ((47633, 49498), 'numpy.array', 'np.array', (['[[self.d_nacelle_mass_d_r_diameter, self.d_nacelle_mass_d_r_mass, self.\n d_nacelle_mass_d_r_thrust, self.d_nacelle_mass_d_r_torque, self.\n d_nacelle_mass_d_rating], [self.d_lss_mass_d_r_diameter, self.\n d_lss_mass_d_r_mass, 0.0, self.d_lss_mass_d_r_torque, 0.0], [self.\n d_bearings_mass_d_r_diameter, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, self\n .d_gearbox_mass_d_r_torque, 0.0], [0.0, 0.0, 0.0, self.\n d_generator_mass_d_r_torque, self.d_generator_mass_d_rating], [0.0, 0.0,\n 0.0, 0.0, self.d_brakes_mass_d_rating], [self.d_yaw_mass_d_r_diameter, \n 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0,\n self.d_hvac_mass_d_rating], [0.0, 0.0, 0.0, 0.0, 0.0], [self.\n d_mainframe_mass_d_r_diameter, self.d_mainframe_mass_d_r_mass, self.\n d_mainframe_mass_d_r_thrust, self.d_mainframe_mass_d_r_torque, 0.0], [\n 0.0, 0.0, 0.0, 0.0, self.d_cover_mass_d_rating], [0.0, 0.0, 0.0, 0.0, \n 0.0], [self.d_nacelle_cost_d_r_diameter, self.d_nacelle_cost_d_r_mass,\n self.d_nacelle_cost_d_r_thrust, self.d_nacelle_cost_d_r_torque, self.\n d_nacelle_cost_d_rating], [self.d_lss_cost_d_r_diameter, 0.0, 0.0, 0.0,\n 0.0], [self.d_bearings_cost_d_r_diameter, 0.0, 0.0, 0.0, 0.0], [0.0, \n 0.0, 0.0, 0.0, self.d_gearbox_cost_d_rating], [0.0, 0.0, 0.0, 0.0, self\n .d_generator_cost_d_rating], [0.0, 0.0, 0.0, 0.0, self.\n d_brakes_cost_d_rating], [self.d_yaw_cost_d_r_diameter, 0.0, 0.0, 0.0, \n 0.0], [0.0, 0.0, 0.0, 0.0, self.d_electronics_cost_d_rating], [0.0, 0.0,\n 0.0, 0.0, self.d_hvac_cost_d_rating], [0.0, 0.0, 0.0, 0.0, self.\n d_vselectronics_cost_d_rating], [self.d_mainframe_cost_d_r_diameter,\n self.d_mainframe_cost_d_r_mass, self.d_mainframe_cost_d_r_thrust, self.\n d_mainframe_cost_d_r_torque, 0.0], [0.0, 0.0, 0.0, 0.0, self.\n d_cover_cost_d_rating], [0.0, 0.0, 0.0, 0.0, 0.0]]'], {}), '([[self.d_nacelle_mass_d_r_diameter, self.d_nacelle_mass_d_r_mass,\n self.d_nacelle_mass_d_r_thrust, self.d_nacelle_mass_d_r_torque, self.\n d_nacelle_mass_d_rating], [self.d_lss_mass_d_r_diameter, self.\n d_lss_mass_d_r_mass, 0.0, self.d_lss_mass_d_r_torque, 0.0], [self.\n d_bearings_mass_d_r_diameter, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, self\n .d_gearbox_mass_d_r_torque, 0.0], [0.0, 0.0, 0.0, self.\n d_generator_mass_d_r_torque, self.d_generator_mass_d_rating], [0.0, 0.0,\n 0.0, 0.0, self.d_brakes_mass_d_rating], [self.d_yaw_mass_d_r_diameter, \n 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0,\n self.d_hvac_mass_d_rating], [0.0, 0.0, 0.0, 0.0, 0.0], [self.\n d_mainframe_mass_d_r_diameter, self.d_mainframe_mass_d_r_mass, self.\n d_mainframe_mass_d_r_thrust, self.d_mainframe_mass_d_r_torque, 0.0], [\n 0.0, 0.0, 0.0, 0.0, self.d_cover_mass_d_rating], [0.0, 0.0, 0.0, 0.0, \n 0.0], [self.d_nacelle_cost_d_r_diameter, self.d_nacelle_cost_d_r_mass,\n self.d_nacelle_cost_d_r_thrust, self.d_nacelle_cost_d_r_torque, self.\n d_nacelle_cost_d_rating], [self.d_lss_cost_d_r_diameter, 0.0, 0.0, 0.0,\n 0.0], [self.d_bearings_cost_d_r_diameter, 0.0, 0.0, 0.0, 0.0], [0.0, \n 0.0, 0.0, 0.0, self.d_gearbox_cost_d_rating], [0.0, 0.0, 0.0, 0.0, self\n .d_generator_cost_d_rating], [0.0, 0.0, 0.0, 0.0, self.\n d_brakes_cost_d_rating], [self.d_yaw_cost_d_r_diameter, 0.0, 0.0, 0.0, \n 0.0], [0.0, 0.0, 0.0, 0.0, self.d_electronics_cost_d_rating], [0.0, 0.0,\n 0.0, 0.0, self.d_hvac_cost_d_rating], [0.0, 0.0, 0.0, 0.0, self.\n d_vselectronics_cost_d_rating], [self.d_mainframe_cost_d_r_diameter,\n self.d_mainframe_cost_d_r_mass, self.d_mainframe_cost_d_r_thrust, self.\n d_mainframe_cost_d_r_torque, 0.0], [0.0, 0.0, 0.0, 0.0, self.\n d_cover_cost_d_rating], [0.0, 0.0, 0.0, 0.0, 0.0]])\n', (47641, 49498), True, 'import numpy as np\n'), ((52972, 53085), 'numpy.array', 'np.array', (['[[self.d_mass_d_diameter, self.d_mass_d_hheight], [self.d_cost_d_diameter,\n self.d_cost_d_hheight]]'], {}), '([[self.d_mass_d_diameter, self.d_mass_d_hheight], [self.\n d_cost_d_diameter, self.d_cost_d_hheight]])\n', (52980, 53085), True, 'import numpy as np\n'), ((54234, 54296), 'numpy.array', 'np.array', (['[[self.d_mass_d_blade_mass, self.d_mass_d_hub_mass]]'], {}), '([[self.d_mass_d_blade_mass, self.d_mass_d_hub_mass]])\n', (54242, 54296), True, 'import numpy as np\n'), ((57903, 58179), 'numpy.array', 'np.array', (['[[self.d_mass_d_blade_mass, self.d_mass_d_hub_mass, self.\n d_mass_d_nacelle_mass, self.d_mass_d_tower_mass, 0.0, 0.0, 0.0, 0.0], [\n 0.0, 0.0, 0.0, 0.0, self.d_cost_d_blade_cost, self.d_cost_d_hub_cost,\n self.d_cost_d_nacelle_cost, self.d_cost_d_tower_cost]]'], {}), '([[self.d_mass_d_blade_mass, self.d_mass_d_hub_mass, self.\n d_mass_d_nacelle_mass, self.d_mass_d_tower_mass, 0.0, 0.0, 0.0, 0.0], [\n 0.0, 0.0, 0.0, 0.0, self.d_cost_d_blade_cost, self.d_cost_d_hub_cost,\n self.d_cost_d_nacelle_cost, self.d_cost_d_tower_cost]])\n', (57911, 58179), True, 'import numpy as np\n'), ((80012, 81271), 'numpy.array', 'np.array', (['[[self.d_development_d_rating, self.d_development_d_diameter, self.\n d_development_d_tcc, self.d_development_d_hheight, self.\n d_development_d_rna], [self.d_preparation_d_rating, self.\n d_preparation_d_diameter, self.d_preparation_d_tcc, self.\n d_preparation_d_hheight, self.d_preparation_d_rna], [self.\n d_transport_d_rating, self.d_transport_d_diameter, self.\n d_transport_d_tcc, self.d_transport_d_hheight, self.d_transport_d_rna],\n [self.d_foundation_d_rating, self.d_foundation_d_diameter, self.\n d_foundation_d_tcc, self.d_foundation_d_hheight, self.\n d_foundation_d_rna], [self.d_electrical_d_rating, self.\n d_electrical_d_diameter, self.d_electrical_d_tcc, self.\n d_electrical_d_hheight, self.d_electrical_d_rna], [self.\n d_assembly_d_rating, self.d_assembly_d_diameter, self.d_assembly_d_tcc,\n self.d_assembly_d_hheight, self.d_assembly_d_rna], [self.\n d_soft_d_rating, self.d_soft_d_diameter, self.d_soft_d_tcc, self.\n d_soft_d_hheight, self.d_soft_d_rna], [self.d_other_d_rating, self.\n d_other_d_diameter, self.d_other_d_tcc, self.d_other_d_hheight, self.\n d_other_d_rna], [self.d_cost_d_rating, self.d_cost_d_diameter, self.\n d_cost_d_tcc, self.d_cost_d_hheight, self.d_cost_d_rna]]'], {}), '([[self.d_development_d_rating, self.d_development_d_diameter, self\n .d_development_d_tcc, self.d_development_d_hheight, self.\n d_development_d_rna], [self.d_preparation_d_rating, self.\n d_preparation_d_diameter, self.d_preparation_d_tcc, self.\n d_preparation_d_hheight, self.d_preparation_d_rna], [self.\n d_transport_d_rating, self.d_transport_d_diameter, self.\n d_transport_d_tcc, self.d_transport_d_hheight, self.d_transport_d_rna],\n [self.d_foundation_d_rating, self.d_foundation_d_diameter, self.\n d_foundation_d_tcc, self.d_foundation_d_hheight, self.\n d_foundation_d_rna], [self.d_electrical_d_rating, self.\n d_electrical_d_diameter, self.d_electrical_d_tcc, self.\n d_electrical_d_hheight, self.d_electrical_d_rna], [self.\n d_assembly_d_rating, self.d_assembly_d_diameter, self.d_assembly_d_tcc,\n self.d_assembly_d_hheight, self.d_assembly_d_rna], [self.\n d_soft_d_rating, self.d_soft_d_diameter, self.d_soft_d_tcc, self.\n d_soft_d_hheight, self.d_soft_d_rna], [self.d_other_d_rating, self.\n d_other_d_diameter, self.d_other_d_tcc, self.d_other_d_hheight, self.\n d_other_d_rna], [self.d_cost_d_rating, self.d_cost_d_diameter, self.\n d_cost_d_tcc, self.d_cost_d_hheight, self.d_cost_d_rna]])\n', (80020, 81271), True, 'import numpy as np\n'), ((85756, 86027), 'numpy.array', 'np.array', (['[[self.d_preventative_d_aep, self.d_preventative_d_rating], [self.\n d_corrective_d_aep, self.d_corrective_d_rating], [self.d_lease_d_aep,\n self.d_lease_d_rating], [self.d_other_d_aep, self.d_other_d_rating], [\n self.d_opex_d_aep, self.d_opex_d_rating]]'], {}), '([[self.d_preventative_d_aep, self.d_preventative_d_rating], [self.\n d_corrective_d_aep, self.d_corrective_d_rating], [self.d_lease_d_aep,\n self.d_lease_d_rating], [self.d_other_d_aep, self.d_other_d_rating], [\n self.d_opex_d_aep, self.d_opex_d_rating]])\n', (85764, 86027), True, 'import numpy as np\n'), ((90735, 90952), 'numpy.array', 'np.array', (['[[self.d_coe_d_turbine_cost, self.d_coe_d_bos_cost, self.d_coe_d_avg_opex,\n self.d_coe_d_net_aep], [self.d_lcoe_d_turbine_cost, self.\n d_lcoe_d_bos_cost, self.d_lcoe_d_avg_opex, self.d_lcoe_d_net_aep]]'], {}), '([[self.d_coe_d_turbine_cost, self.d_coe_d_bos_cost, self.\n d_coe_d_avg_opex, self.d_coe_d_net_aep], [self.d_lcoe_d_turbine_cost,\n self.d_lcoe_d_bos_cost, self.d_lcoe_d_avg_opex, self.d_lcoe_d_net_aep]])\n', (90743, 90952), True, 'import numpy as np\n'), ((14273, 14288), 'numpy.diag', 'np.diag', (['dP_dPa'], {}), '(dP_dPa)\n', (14280, 14288), True, 'import numpy as np\n'), ((5573, 5606), 'numpy.sqrt', 'np.sqrt', (['(b ** 2 - 4 * kTorque * c)'], {}), '(b ** 2 - 4 * kTorque * c)\n', (5580, 5606), True, 'import numpy as np\n'), ((11906, 11926), 'math.gamma', 'gamma', (['(1.0 + 1.0 / K)'], {}), '(1.0 + 1.0 / K)\n', (11911, 11926), False, 'from math import pi, gamma, exp\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 3 19:49:16 2021
@author: ghiggi
"""
import os
import glob
import shutil
import time
import torch
import zarr
import dask
import numpy as np
import xarray as xr
from modules.dataloader_autoregressive import remove_unused_Y
from modules.dataloader_autoregressive import get_aligned_ar_batch
from modules.dataloader_autoregressive import AutoregressiveDataset
from modules.dataloader_autoregressive import AutoregressiveDataLoader
from modules.utils_autoregressive import check_ar_settings
from modules.utils_autoregressive import check_input_k
from modules.utils_autoregressive import check_output_k
from modules.utils_io import _get_feature_order, check_timesteps_format, check_no_duplicate_timesteps
from modules.utils_zarr import check_chunks
from modules.utils_zarr import check_rounding
from modules.utils_zarr import rechunk_Dataset
from modules.utils_zarr import write_zarr
from modules.utils_torch import check_device
from modules.utils_torch import check_pin_memory
from modules.utils_torch import check_asyncronous_gpu_transfer
from modules.utils_torch import check_prefetch_in_gpu
from modules.utils_torch import check_prefetch_factor
from modules.utils_swag import bn_update
##----------------------------------------------------------------------------.
# conda install -c conda-forge zarr
# conda install -c conda-forge cfgrib
# conda install -c conda-forge rechunker
#----------------------------------------------------------------------------.
###############
### Checks ####
###############
def check_timedelta_unit(timedelta_unit):
"""Check timedelta_unit validity."""
if not isinstance(timedelta_unit, str):
raise TypeError("'timedelta_unit' must be a string.")
valid_timedelta_unit = list(get_timedelta_types().keys())
if timedelta_unit not in valid_timedelta_unit:
raise ValueError("Specify a valid 'timedelta_unit': {}".format(valid_timedelta_unit))
return timedelta_unit
def get_timedelta_types():
"""Return {time_delta_unit: timedelta_type} dictionary."""
timedelta_types = {'nanosecond': 'timedelta64[ns]',
'microsecond': 'timedelta64[ms]',
'second': 'timedelta64[s]',
'minute': 'timedelta64[m]',
'hour': 'timedelta64[h]',
'day': 'timedelta64[D]',
'month': 'timedelta64[M]',
'year': 'timedelta64[Y]'}
return timedelta_types
##----------------------------------------------------------------------------.
#########################
### Prediction utils ####
#########################
def get_dict_Y_pred_selection(dim_info,
dict_forecast_rel_idx_Y,
keep_first_prediction = True):
# dict_forecast_rel_idx_Y = get_dict_Y(ar_iterations = ar_iterations,
# forecast_cycle = forecast_cycle,
# output_k = output_k)
# Retrieve the time dimension index in the predicted Y tensors
time_dim = dim_info['time']
# Retrieve AR iterations
ar_iterations = max(list(dict_forecast_rel_idx_Y.keys()))
# Initialize a general subset indexing
all_subset_indexing = [slice(None) for i in range(len(dim_info))]
# Retrieve all output k
all_output_k = np.unique(np.stack(list(dict_forecast_rel_idx_Y.values())).flatten())
# For each output k, search which AR iterations predict such output k
# - {output_k: [ar_iteration with output_k]}
dict_k_occurence = {k: [] for k in all_output_k}
for ar_iteration, leadtimes in dict_forecast_rel_idx_Y.items():
for leadtime in leadtimes:
dict_k_occurence[leadtime].append(ar_iteration)
# For each output k, choose if keep the first or last prediction
if keep_first_prediction:
dict_k_selection = {leadtime: min(dict_k_occurence[leadtime]) for leadtime in dict_k_occurence.keys()}
else:
dict_k_selection = {leadtime: max(dict_k_occurence[leadtime]) for leadtime in dict_k_occurence.keys()}
# Build {ar_iteration: [(leadtime, subset_indexing), (...,...)]}
dict_Y_pred_selection = {ar_iteration: [] for ar_iteration in range(ar_iterations + 1)}
for leadtime, ar_iteration in dict_k_selection.items():
# Retrieve tuple (leadtime, Y_tensor_indexing)
leadtime_slice_idx = np.argwhere(dict_forecast_rel_idx_Y[ar_iteration] == leadtime)[0][0]
subset_indexing = all_subset_indexing.copy()
subset_indexing[time_dim] = leadtime_slice_idx
dict_Y_pred_selection[ar_iteration].append((leadtime, subset_indexing))
return dict_Y_pred_selection
def create_ds_forecast(dict_Y_predicted_per_leadtime,
forecast_reference_times,
leadtimes,
data_dynamic,
dim_info_dynamic):
"""Create the forecast xarray Dataset stacking the tensors in dict_Y_predicted_per_leadtime."""
# Stack forecast leadtimes
list_to_stack = []
available_leadtimes = list(dict_Y_predicted_per_leadtime.keys())
for leadtime in available_leadtimes:
# Append the tensor slice to the list
list_to_stack.append(dict_Y_predicted_per_leadtime[leadtime])
# - Remove tensor from dictionary
del dict_Y_predicted_per_leadtime[leadtime]
Y_forecasts = np.stack(list_to_stack, axis=dim_info_dynamic['time'])
##----------------------------------------------------------------.
### Create xarray Dataset of forecasts
# - Retrieve ancient optional dimensions (to add)
dims = list(data_dynamic.dims)
dims_optional = np.array(dims)[np.isin(dims, ['time','feature'], invert=True)].tolist()
# - Retrieve features
features = _get_feature_order(data_dynamic)
# - Create DataArray
forecast_dims = ['forecast_reference_time', 'leadtime'] + dims_optional + ['feature']
da = xr.DataArray(Y_forecasts,
dims = forecast_dims,
coords = {'leadtime': leadtimes,
'forecast_reference_time': forecast_reference_times,
'feature': features})
# - Transform to dataset (to save to zarr)
ds = da.to_dataset(dim='feature')
## - Add ancient coordinates
coords = list(data_dynamic.coords.keys())
dict_coords = {coord: data_dynamic[coord] for coord in coords}
_ = dict_coords.pop("time", None)
_ = dict_coords.pop("feature", None)
for k, v in dict_coords.items():
ds[k] = v
ds = ds.set_coords(k)
##----------------------------------------------------------------.
# Return the forecast xr.Dataset
return ds
def rescale_forecasts(ds, scaler, reconcat=True):
"""Apply the scaler inverse transform to the forecast xarray Dataset."""
# - Apply scaler
# --> scaler.inverse_transform(ds).compute() works only for GlobalScalers
# --> Need to create the time dimension to apply correctly TemporalScalers
ds['time'] = ds['leadtime'] + ds['forecast_reference_time']
ds = ds.set_coords('time')
l_rescaled_ds = []
# - Iterate over each forecast
for i in range(len(ds['forecast_reference_time'])):
tmp_ds = ds.isel(forecast_reference_time=i).swap_dims({"leadtime": "time"})
l_rescaled_ds.append(scaler.inverse_transform(tmp_ds).swap_dims({"time": "leadtime"}).drop('time'))
if reconcat is True:
ds = xr.concat(l_rescaled_ds, dim='forecast_reference_time')
return ds
else:
return l_rescaled_ds
def rescale_forecasts_and_write_zarr(ds, scaler, zarr_fpath,
chunks = None, default_chunks = None,
compressor = None, default_compressor = None,
rounding = None,
consolidated = True,
append = True,
append_dim = 'forecast_reference_time',
show_progress = False):
"""Apply the scaler inverse transform to the forecast Dataset and write it to Zarr."""
# It apply the scaler to each single forecast_reference_time and write it directly to disk.
ds['time'] = ds['leadtime'] + ds['forecast_reference_time']
ds = ds.set_coords('time')
# - Iterate over each forecast
l_ds = []
for i in range(len(ds['forecast_reference_time'])):
ds_tmp = ds.isel(forecast_reference_time=i).swap_dims({"leadtime": "time"})
ds_tmp = scaler.inverse_transform(ds_tmp).swap_dims({"time": "leadtime"}).drop('time').expand_dims('forecast_reference_time')
## Writing each separate forecast_reference_time is much slow
# write_zarr(zarr_fpath = zarr_fpath,
# ds = ds_tmp,
# chunks = chunks, default_chunks = default_chunks,
# compressor = compressor, default_compressor = default_compressor,
# rounding = rounding,
# consolidated = consolidated,
# append = append,
# append_dim = append_dim,
# show_progress = show_progress)
l_ds.append(ds_tmp)
ds = xr.concat(l_ds, dim="forecast_reference_time")
write_zarr(zarr_fpath = zarr_fpath,
ds = ds,
chunks = chunks, default_chunks = default_chunks,
compressor = compressor, default_compressor = default_compressor,
rounding = rounding,
consolidated = consolidated,
append = append,
append_dim = append_dim,
show_progress = show_progress)
return None
#-----------------------------------------------------------------------------.
############################
### Prediction Wrappers ####
############################
def AutoregressivePredictions(model,
# Data
data_dynamic,
data_static = None,
data_bc = None,
bc_generator = None,
# AR_batching_function
ar_batch_fun = get_aligned_ar_batch,
# Scaler options
scaler_transform = None,
scaler_inverse = None,
# Dataloader options
batch_size = 64,
num_workers = 0,
prefetch_factor = 2,
prefetch_in_gpu = False,
pin_memory = False,
asyncronous_gpu_transfer = True,
device = 'cpu',
# Autoregressive settings
input_k = [-3,-2,-1],
output_k = [0],
forecast_cycle = 1,
ar_iterations = 50,
stack_most_recent_prediction = True,
# Prediction options
forecast_reference_times = None,
keep_first_prediction = True,
ar_blocks = None,
# Save options
zarr_fpath = None,
rounding = None,
compressor = "auto",
chunks = "auto"):
"""Wrapper to generate weather forecasts following CDS Common Data Model (CDM).
CDS coordinate dtype Synonims
-------------------------------------------------------------------------
- realization int64
- forecast_reference_time datetime64[ns] (base time)
- leadtime timedelta64[ns]
- lat float64
- lon float64
- time datetime64[ns] (forecasted_time/valid_time)
To convert to ECMWF Common Data Model use the following code:
import cf2cdm
cf2cdm.translate_coords(ds_forecasts, cf2cdm.ECMWF)
Terminology
- Forecasts reference time: The time of the analysis from which the forecast was made
- (Validity) Time: The time represented by the forecast
- Leadtime: The time interval between the forecast reference time and the (validity) time.
Coordinates notes:
- output_k = 0 correspond to the first forecast leadtime
- leadtime = 0 is not forecasted. It correspond to the analysis forecast_reference_time
- In the ECMWF CMD, forecast_reference_time is termed 'time', 'time' termed 'valid_time'!
Prediction settings
- ar_blocks = None (or ar_blocks = ar_iterations + 1) run all ar_iterations in a single run.
- ar_blocks < ar_iterations + 1: run ar_iterations per ar_block of ar_iteration
"""
# Possible speed up: rescale only after all batch have been processed ...
##------------------------------------------------------------------------.
with dask.config.set(scheduler='synchronous'):
## Checks arguments
device = check_device(device)
pin_memory = check_pin_memory(pin_memory=pin_memory, num_workers=num_workers, device=device)
asyncronous_gpu_transfer = check_asyncronous_gpu_transfer(asyncronous_gpu_transfer=asyncronous_gpu_transfer, device=device)
prefetch_in_gpu = check_prefetch_in_gpu(prefetch_in_gpu=prefetch_in_gpu, num_workers=num_workers, device=device)
prefetch_factor = check_prefetch_factor(prefetch_factor=prefetch_factor, num_workers=num_workers)
##------------------------------------------------------------------------.
# Check that autoregressive settings are valid
# - input_k and output_k must be numpy arrays hereafter !
input_k = check_input_k(input_k=input_k, ar_iterations=ar_iterations)
output_k = check_output_k(output_k = output_k)
check_ar_settings(input_k = input_k,
output_k = output_k,
forecast_cycle = forecast_cycle,
ar_iterations = ar_iterations,
stack_most_recent_prediction = stack_most_recent_prediction)
ar_iterations = int(ar_iterations)
##------------------------------------------------------------------------.
### Retrieve feature info of the forecast
features = _get_feature_order(data_dynamic)
##------------------------------------------------------------------------.
# Check Zarr settings
WRITE_TO_ZARR = zarr_fpath is not None
if WRITE_TO_ZARR:
# - If zarr fpath provided, create the required folder
if not os.path.exists(os.path.dirname(zarr_fpath)):
os.makedirs(os.path.dirname(zarr_fpath))
if os.path.exists(zarr_fpath):
raise ValueError("An {} store already exists.")
# - Set default chunks and compressors
# ---> -1 to all optional dimensions (i..e nodes, lat, lon, ens, plevels,...)
dims = list(data_dynamic.dims)
dims_optional = np.array(dims)[np.isin(dims, ['time','feature'], invert=True)].tolist()
default_chunks = {dim : -1 for dim in dims_optional}
default_chunks['forecast_reference_time'] = 1
default_chunks['leadtime'] = 1
default_compressor = zarr.Blosc(cname="zstd", clevel=0, shuffle=2)
# - Check rounding settings
rounding = check_rounding(rounding = rounding,
variable_names = features)
##------------------------------------------------------------------------.
# Check ar_blocks
if not isinstance(ar_blocks, (int, float, type(None))):
raise TypeError("'ar_blocks' must be int or None.")
if isinstance(ar_blocks, float):
ar_blocks = int(ar_blocks)
if not WRITE_TO_ZARR and isinstance(ar_blocks, int):
raise ValueError("If 'zarr_fpath' not specified, 'ar_blocks' must be None.")
if ar_blocks is None:
ar_blocks = ar_iterations + 1
if ar_blocks > ar_iterations + 1:
raise ValueError("'ar_blocks' must be equal or smaller to 'ar_iterations'")
PREDICT_AR_BLOCKS = ar_blocks != (ar_iterations + 1)
##------------------------------------------------------------------------.
### Define DataLoader subset_timesteps
subset_timesteps = None
if forecast_reference_times is not None:
# Check forecast_reference_times
forecast_reference_times = check_timesteps_format(forecast_reference_times)
if len(forecast_reference_times) == 0:
raise ValueError("If you don't want to specify specific 'forecast_reference_times', set it to None")
check_no_duplicate_timesteps(forecast_reference_times, var_name='forecast_reference_times')
# Ensure the temporal order of forecast_reference_times
forecast_reference_times.sort()
# Define subset_timesteps (aka idx_k=0 aka first forecasted timestep)
t_res_timedelta = np.diff(data_dynamic.time.values)[0]
subset_timesteps = forecast_reference_times + -1*max(input_k)*t_res_timedelta
# Redefine batch_size if larger than the number of forecast to generate
# --> And set num_workers to 0 (only 1 batch to load ...)
if batch_size >= len(forecast_reference_times):
batch_size = len(forecast_reference_times)
num_workers = 0
##------------------------------------------------------------------------.
### Create training Autoregressive Dataset and DataLoader
dataset = AutoregressiveDataset(data_dynamic = data_dynamic,
data_bc = data_bc,
data_static = data_static,
bc_generator = bc_generator,
scaler = scaler_transform,
# Dataset options
subset_timesteps = subset_timesteps,
training_mode = False,
# Autoregressive settings
input_k = input_k,
output_k = output_k,
forecast_cycle = forecast_cycle,
ar_iterations = ar_iterations,
stack_most_recent_prediction = stack_most_recent_prediction,
# GPU settings
device = device)
dataloader = AutoregressiveDataLoader(dataset = dataset,
batch_size = batch_size,
drop_last_batch = False,
shuffle = False,
num_workers = num_workers,
prefetch_factor = prefetch_factor,
prefetch_in_gpu = prefetch_in_gpu,
pin_memory = pin_memory,
asyncronous_gpu_transfer = asyncronous_gpu_transfer,
device = device)
##------------------------------------------------------------------------.
# Retrieve custom ar_batch_fun fuction
ar_batch_fun = dataset.ar_batch_fun
assert features == dataset.feature_order['dynamic']
### Start forecasting
# - Initialize
t_i = time.time()
model.to(device)
# - Set dropout and batch normalization layers to evaluation mode
model.eval()
list_ds = []
FIRST_PREDICTION = True
with torch.set_grad_enabled(False):
##--------------------------------------------------------------------.
# Iterate along batches
dataloader_iter = iter(dataloader)
num_batches = len(dataloader_iter)
batch_indices = range(num_batches)
for batch_count in batch_indices:
batch_dict = next(dataloader_iter)
t_gen = time.time()
##----------------------------------------------------------------.
### Retrieve forecast informations
dim_info_dynamic = batch_dict['dim_info']['dynamic']
feature_order_dynamic = batch_dict['feature_order']['dynamic']
forecast_time_info = batch_dict['forecast_time_info']
forecast_reference_times = forecast_time_info["forecast_reference_time"]
dict_forecast_leadtime = forecast_time_info["dict_forecast_leadtime"]
dict_forecast_rel_idx_Y = forecast_time_info["dict_forecast_rel_idx_Y"]
leadtimes = np.unique(np.stack(list(dict_forecast_leadtime.values())).flatten())
assert features == feature_order_dynamic
##----------------------------------------------------------------.
### Retrieve dictionary providing at each AR iteration
# the tensor slice indexing to obtain a "regular" forecasts
if FIRST_PREDICTION:
dict_Y_pred_selection = get_dict_Y_pred_selection(dim_info = dim_info_dynamic,
dict_forecast_rel_idx_Y = dict_forecast_rel_idx_Y,
keep_first_prediction = keep_first_prediction)
FIRST_PREDICTION = False
##----------------------------------------------------------------.
### Perform autoregressive forecasting
dict_Y_predicted = {}
dict_Y_predicted_per_leadtime = {}
ar_counter_per_block = 0
previous_block_ar_iteration = 0
for ar_iteration in range(ar_iterations+1):
# Retrieve X and Y for current AR iteration
# - Torch Y stays in CPU with training_mode=False
torch_X, _ = ar_batch_fun(ar_iteration = ar_iteration,
batch_dict = batch_dict,
dict_Y_predicted = dict_Y_predicted,
device = device,
asyncronous_gpu_transfer = asyncronous_gpu_transfer)
##------------------------------------------------------------.
# Forward pass and store output for stacking into next AR iterations
dict_Y_predicted[ar_iteration] = model(torch_X)
##------------------------------------------------------------.
# Select required tensor slices (along time dimension) for final forecast
if len(dict_Y_pred_selection[ar_iteration]) > 0:
for leadtime, subset_indexing in dict_Y_pred_selection[ar_iteration]:
dict_Y_predicted_per_leadtime[leadtime] = dict_Y_predicted[ar_iteration][subset_indexing].cpu().numpy()
##------------------------------------------------------------.
# Remove unnecessary variables on GPU
remove_unused_Y(ar_iteration = ar_iteration,
dict_Y_predicted = dict_Y_predicted,
dict_Y_to_remove = batch_dict['dict_Y_to_remove'])
del torch_X
##------------------------------------------------------------.
# The following code can be used to verify that no leak of memory occurs
# torch.cuda.synchronize()
# print("{}: {:.2f} MB".format(ar_iteration, torch.cuda.memory_allocated()/1000/1000))
##------------------------------------------------------------.
# Create and save a forecast Dataset after each ar_block ar_iterations
ar_counter_per_block += 1
if ar_counter_per_block == ar_blocks:
block_slice = slice(previous_block_ar_iteration, ar_iteration+1)
ds = create_ds_forecast(dict_Y_predicted_per_leadtime = dict_Y_predicted_per_leadtime,
leadtimes = leadtimes[block_slice],
forecast_reference_times = forecast_reference_times,
data_dynamic = data_dynamic,
dim_info_dynamic = dim_info_dynamic)
# Reset ar_counter_per_block
ar_counter_per_block = 0
previous_block_ar_iteration = ar_iteration + 1
# --------------------------------------------------------.
# If predicting blocks of ar_iterations
# - Write AR blocks temporary to disk (and append progressively)
if PREDICT_AR_BLOCKS: # (WRITE_TO_ZARR=True implicit)
tmp_ar_block_zarr_fpath = os.path.join(os.path.dirname(zarr_fpath), "tmp_ar_blocks.zarr")
write_zarr(zarr_fpath = tmp_ar_block_zarr_fpath,
ds = ds,
chunks = chunks, default_chunks = default_chunks,
compressor = compressor, default_compressor = default_compressor,
rounding = rounding,
consolidated = True,
append = True,
append_dim = 'leadtime',
show_progress = False)
# --------------------------------------------------------.
##--------------------------------------.-------------------------.
# Clean memory
del dict_Y_predicted
del dict_Y_predicted_per_leadtime
##----------------------------------------------------------------.
### Post-processing
t_post = time.time()
# - Retransform data to original dimensions (and write to Zarr optionally)
if WRITE_TO_ZARR:
if PREDICT_AR_BLOCKS:
# - Read the temporary ar_blocks saved on disk
ds = xr.open_zarr(tmp_ar_block_zarr_fpath)
if scaler_inverse is not None:
# TODO: Here an error occur if chunk forecast_reference_time > 1
# --> Applying the inverse scaler means processing each
# forecast_reference_time separately
# ---> A solution would be to stack all forecasts together before
# write to disk ... but this would consume memory and time.
rescale_forecasts_and_write_zarr(ds = ds,
scaler = scaler_inverse,
zarr_fpath = zarr_fpath,
chunks = chunks, default_chunks = default_chunks,
compressor = compressor, default_compressor = default_compressor,
rounding = rounding,
consolidated = True,
append = True,
append_dim = 'forecast_reference_time',
show_progress = False)
else:
write_zarr(zarr_fpath = zarr_fpath,
ds = ds,
chunks = chunks, default_chunks = default_chunks,
compressor = compressor, default_compressor = default_compressor,
rounding = rounding,
consolidated = True,
append = True,
append_dim = 'forecast_reference_time',
show_progress = False)
if PREDICT_AR_BLOCKS:
shutil.rmtree(tmp_ar_block_zarr_fpath)
else:
if scaler_inverse is not None:
ds = rescale_forecasts(ds=ds, scaler=scaler_inverse, reconcat=True)
list_ds.append(ds)
#-------------------------------------------------------------------.
# Print prediction report
tmp_time_gen = round(t_post - t_gen, 1)
tmp_time_post = round(time.time() - t_post, 1)
tmp_time_per_forecast = round((tmp_time_gen+tmp_time_post)/batch_size, 3)
print(" - Batch: {} / {} | Generation: {}s | Writing: {}s |"
"Single forecast computation: {}s ".format(batch_count, len(dataloader),
tmp_time_gen, tmp_time_post,
tmp_time_per_forecast))
#---------------------------------------------------------------------.
# Remove the dataloader and dataset to avoid deadlocks
del batch_dict
del dataset
del dataloader
del dataloader_iter
##------------------------------------------------------------------------.
# Re-read the forecast dataset
if WRITE_TO_ZARR:
ds_forecasts = xr.open_zarr(zarr_fpath, chunks="auto")
else:
ds_forecasts = xr.merge(list_ds)
##------------------------------------------------------------------------.
print("- Elapsed time for forecast generation: {:.2f} minutes".format((time.time()-t_i)/60))
##------------------------------------------------------------------------.
return ds_forecasts
#----------------------------------------------------------------------------.
def reshape_forecasts_for_verification(ds):
"""Process a Dataset with forecasts in the format required for verification."""
l_reshaped_ds = []
for i in range(len(ds['leadtime'])):
tmp_ds = ds.isel(leadtime=i)
tmp_ds['forecast_reference_time'] = tmp_ds['forecast_reference_time'] + tmp_ds['leadtime']
tmp_ds = tmp_ds.rename({'forecast_reference_time': 'time'})
l_reshaped_ds.append(tmp_ds)
ds = xr.concat(l_reshaped_ds, dim='leadtime', join='outer')
return ds
def rechunk_forecasts_for_verification(ds, target_store, chunks="auto", max_mem = '1GB', force=False):
"""
Rechunk forecast Dataset in the format required for verification.
Make data contiguous over the time dimension, and chunked over space.
The forecasted time (referred as dimension 'time') is computed by
summing the leadtime to the forecast_reference_time.
Parameters
----------
ds : xarray.Dataset
Dataset with dimensions 'forecast_reference_time' and 'leadtime'.
target_store : TYPE
Filepath of the zarr store where to save the new Dataset.
chunks : str, optional
Option for custom chunks of the new Dataset. The default is "auto".
The default is chunked pixel-wise and per leadtime, contiguous over time.
max_mem : str, optional
The amount of memory (in bytes) that workers are allowed to use.
The default is '1GB'.
Returns
-------
ds_verification : xarray.Dataset
Dataset for verification (with 'time' and 'leadtime' dimensions.
"""
##------------------------------------------------------------------------.
# Check target_store do not exist already
if os.path.exists(target_store):
if force:
shutil.rmtree(target_store)
else:
raise ValueError("A zarr store already exists at {}. If you want to overwrite, specify force=True".format(target_store))
##------------------------------------------------------------------------.
# Define temp store for rechunking
temp_store = os.path.join(os.path.dirname(target_store), "tmp_store.zarr")
# Define intermediate store for rechunked data
intermediate_store = os.path.join(os.path.dirname(target_store), "rechunked_store.zarr")
##------------------------------------------------------------------------.
# Remove temp_store and intermediate_store is exists
if os.path.exists(temp_store):
shutil.rmtree(temp_store)
if os.path.exists(intermediate_store):
shutil.rmtree(intermediate_store)
##------------------------------------------------------------------------.
# Default chunking
# - Do not chunk along forecast_reference_time, chunk 1 to all other dimensions
dims = list(ds.dims)
dims_optional = np.array(dims)[np.isin(dims, ['time','feature'], invert=True)].tolist()
default_chunks = {dim : 1 for dim in dims_optional}
default_chunks['forecast_reference_time'] = -1
default_chunks['leadtime'] = 1
# Check chunking
chunks = check_chunks(ds=ds, chunks=chunks, default_chunks=default_chunks)
##------------------------------------------------------------------------.
# Rechunk Dataset (on disk)
rechunk_Dataset(ds=ds, chunks=chunks,
target_store=intermediate_store, temp_store=temp_store,
max_mem = max_mem,
force=force)
##------------------------------------------------------------------------.
# Load rechunked dataset (contiguous over forecast referece time, chunked over space)
ds = xr.open_zarr(intermediate_store, chunks="auto")
##------------------------------------------------------------------------.
# Reshape
ds_verification = reshape_forecasts_for_verification(ds)
##------------------------------------------------------------------------.
# Remove 'chunks' key in encoding (bug in xarray-dask-zarr)
for var in list(ds_verification.data_vars.keys()):
ds_verification[var].encoding.pop('chunks')
##------------------------------------------------------------------------.
# Write to disk
ds_verification.to_zarr(target_store)
##------------------------------------------------------------------------.
# Remove rechunked store
shutil.rmtree(intermediate_store)
##------------------------------------------------------------------------.
# Load the Dataset for verification
ds_verification = xr.open_zarr(target_store)
##------------------------------------------------------------------------.
# Return the Dataset for verification
return ds_verification
#----------------------------------------------------------------------------.
#----------------------------------------------------------------------------.
def AutoregressiveSWAGPredictions(model, exp_dir,
# Data
training_data_dynamic,
training_data_bc = None,
data_static = None,
test_data_dynamic = None,
test_data_bc = None,
bc_generator = None,
# Scaler options
scaler_transform = None,
scaler_inverse = None,
# Dataloader options
batch_size = 64,
num_workers = 0,
prefetch_factor = 2,
prefetch_in_gpu = False,
pin_memory = False,
asyncronous_gpu_transfer = True,
device = 'cpu',
# Autoregressive settings
input_k = [-3,-2,-1],
output_k = [0],
forecast_cycle = 1,
ar_iterations = 50,
stack_most_recent_prediction = True,
# Prediction options
forecast_reference_times = None,
keep_first_prediction = True,
ar_blocks = None,
# SWAG settings
no_cov_mat=False,
sampling_scale = 0.1,
nb_samples = 10,
# Save options
rounding = None,
compressor = "auto",
chunks = "auto"):
""" Caution: the following function is in development !"""
sampling_scale_str = str(sampling_scale).replace(".", "")
for i in range(1, nb_samples+1):
print(f"- Sample {i}")
forecast_zarr_fpath = os.path.join(exp_dir, f"model_predictions/spatial_chunks/test_pred_{sampling_scale_str}_temp{i}.zarr")
with torch.no_grad():
model.sample(sampling_scale, cov=(no_cov_mat))
bn_update(model,
# Data
data_dynamic = training_data_dynamic,
data_bc = training_data_bc,
data_static = data_static,
bc_generator = bc_generator,
scaler = scaler_transform,
# Dataloader options
device = device,
batch_size = batch_size, # number of forecasts per batch
num_workers = num_workers,
# tune_num_workers = False,
prefetch_factor = prefetch_factor,
prefetch_in_gpu = prefetch_in_gpu,
pin_memory = pin_memory,
asyncronous_gpu_transfer = asyncronous_gpu_transfer,
# Autoregressive settings
input_k = input_k,
output_k = output_k,
forecast_cycle = forecast_cycle,
ar_iterations = ar_iterations,
stack_most_recent_prediction = stack_most_recent_prediction
)
_ = AutoregressivePredictions(model = model,
# Data
data_static = data_static,
data_dynamic = test_data_dynamic,
data_bc = test_data_bc,
bc_generator = bc_generator,
scaler_transform = scaler_transform,
scaler_inverse = scaler_inverse,
# Dataloader options
device = device,
batch_size = batch_size, # number of forecasts per batch
num_workers = num_workers,
# tune_num_workers = False,
prefetch_factor = prefetch_factor,
prefetch_in_gpu = prefetch_in_gpu,
pin_memory = pin_memory,
asyncronous_gpu_transfer = asyncronous_gpu_transfer,
# Autoregressive settings
input_k = input_k,
output_k = output_k,
forecast_cycle = forecast_cycle,
stack_most_recent_prediction = stack_most_recent_prediction,
ar_iterations = ar_iterations, # How many time to autoregressive iterate
# Prediction options
forecast_reference_times = forecast_reference_times,
keep_first_prediction = keep_first_prediction,
ar_blocks = ar_blocks,
# Save options
zarr_fpath = forecast_zarr_fpath, # None --> do not write to disk
rounding = rounding, # Default None. Accept also a dictionary
compressor = compressor, # Accept also a dictionary per variable
chunks = chunks)
##-------------------------------------------------------------------------.
# Ensemble the predicitons along dim "member"
zarr_members_fpaths = glob.glob(os.path.join(exp_dir, f"model_predictions/spatial_chunks/test_pred_{sampling_scale_str}_*"))
list_ds_member = [xr.open_zarr(fpath) for fpath in zarr_members_fpaths]
ds_ensemble = xr.concat(list_ds_member, dim="member")
del list_ds_member
##-------------------------------------------------------------------------.
# Save ensemble
forecast_zarr_fpath = os.path.join(exp_dir, f"model_predictions/spatial_chunks/test_pred_{sampling_scale_str}.zarr")
if not os.path.exists(forecast_zarr_fpath):
ds_ensemble.to_zarr(forecast_zarr_fpath, mode='w') # Create
else:
ds_ensemble.to_zarr(forecast_zarr_fpath, append_dim='member') # Append
ds_ensemble = xr.open_zarr(forecast_zarr_fpath, chunks="auto")
##-------------------------------------------------------------------------.
# Remove individual members
for member in zarr_members_fpaths:
shutil.rmtree(member)
##-------------------------------------------------------------------------.
# Compute median of ensemble
forecast_zarr_fpath = os.path.join(exp_dir, f"model_predictions/spatial_chunks/test_pred_{sampling_scale_str}_median.zarr")
df_median = ds_ensemble.median(dim="member")
del ds_ensemble
df_median.to_zarr(forecast_zarr_fpath, mode='w') # Create
df_median = xr.open_zarr(forecast_zarr_fpath, chunks="auto")
return df_median | [
"numpy.isin",
"modules.utils_zarr.rechunk_Dataset",
"modules.utils_autoregressive.check_ar_settings",
"modules.utils_torch.check_prefetch_factor",
"shutil.rmtree",
"modules.dataloader_autoregressive.AutoregressiveDataset",
"os.path.join",
"zarr.Blosc",
"torch.no_grad",
"os.path.dirname",
"os.pat... | [((5528, 5582), 'numpy.stack', 'np.stack', (['list_to_stack'], {'axis': "dim_info_dynamic['time']"}), "(list_to_stack, axis=dim_info_dynamic['time'])\n", (5536, 5582), True, 'import numpy as np\n'), ((5923, 5955), 'modules.utils_io._get_feature_order', '_get_feature_order', (['data_dynamic'], {}), '(data_dynamic)\n', (5941, 5955), False, 'from modules.utils_io import _get_feature_order, check_timesteps_format, check_no_duplicate_timesteps\n'), ((6080, 6235), 'xarray.DataArray', 'xr.DataArray', (['Y_forecasts'], {'dims': 'forecast_dims', 'coords': "{'leadtime': leadtimes, 'forecast_reference_time': forecast_reference_times,\n 'feature': features}"}), "(Y_forecasts, dims=forecast_dims, coords={'leadtime': leadtimes,\n 'forecast_reference_time': forecast_reference_times, 'feature': features})\n", (6092, 6235), True, 'import xarray as xr\n'), ((9467, 9513), 'xarray.concat', 'xr.concat', (['l_ds'], {'dim': '"""forecast_reference_time"""'}), "(l_ds, dim='forecast_reference_time')\n", (9476, 9513), True, 'import xarray as xr\n'), ((9518, 9793), 'modules.utils_zarr.write_zarr', 'write_zarr', ([], {'zarr_fpath': 'zarr_fpath', 'ds': 'ds', 'chunks': 'chunks', 'default_chunks': 'default_chunks', 'compressor': 'compressor', 'default_compressor': 'default_compressor', 'rounding': 'rounding', 'consolidated': 'consolidated', 'append': 'append', 'append_dim': 'append_dim', 'show_progress': 'show_progress'}), '(zarr_fpath=zarr_fpath, ds=ds, chunks=chunks, default_chunks=\n default_chunks, compressor=compressor, default_compressor=\n default_compressor, rounding=rounding, consolidated=consolidated,\n append=append, append_dim=append_dim, show_progress=show_progress)\n', (9528, 9793), False, 'from modules.utils_zarr import write_zarr\n'), ((32296, 32350), 'xarray.concat', 'xr.concat', (['l_reshaped_ds'], {'dim': '"""leadtime"""', 'join': '"""outer"""'}), "(l_reshaped_ds, dim='leadtime', join='outer')\n", (32305, 32350), True, 'import xarray as xr\n'), ((33567, 33595), 'os.path.exists', 'os.path.exists', (['target_store'], {}), '(target_store)\n', (33581, 33595), False, 'import os\n'), ((34291, 34317), 'os.path.exists', 'os.path.exists', (['temp_store'], {}), '(temp_store)\n', (34305, 34317), False, 'import os\n'), ((34360, 34394), 'os.path.exists', 'os.path.exists', (['intermediate_store'], {}), '(intermediate_store)\n', (34374, 34394), False, 'import os\n'), ((34919, 34984), 'modules.utils_zarr.check_chunks', 'check_chunks', ([], {'ds': 'ds', 'chunks': 'chunks', 'default_chunks': 'default_chunks'}), '(ds=ds, chunks=chunks, default_chunks=default_chunks)\n', (34931, 34984), False, 'from modules.utils_zarr import check_chunks\n'), ((35102, 35229), 'modules.utils_zarr.rechunk_Dataset', 'rechunk_Dataset', ([], {'ds': 'ds', 'chunks': 'chunks', 'target_store': 'intermediate_store', 'temp_store': 'temp_store', 'max_mem': 'max_mem', 'force': 'force'}), '(ds=ds, chunks=chunks, target_store=intermediate_store,\n temp_store=temp_store, max_mem=max_mem, force=force)\n', (35117, 35229), False, 'from modules.utils_zarr import rechunk_Dataset\n'), ((35469, 35516), 'xarray.open_zarr', 'xr.open_zarr', (['intermediate_store'], {'chunks': '"""auto"""'}), "(intermediate_store, chunks='auto')\n", (35481, 35516), True, 'import xarray as xr\n'), ((36186, 36219), 'shutil.rmtree', 'shutil.rmtree', (['intermediate_store'], {}), '(intermediate_store)\n', (36199, 36219), False, 'import shutil\n'), ((36362, 36388), 'xarray.open_zarr', 'xr.open_zarr', (['target_store'], {}), '(target_store)\n', (36374, 36388), True, 'import xarray as xr\n'), ((43006, 43045), 'xarray.concat', 'xr.concat', (['list_ds_member'], {'dim': '"""member"""'}), "(list_ds_member, dim='member')\n", (43015, 43045), True, 'import xarray as xr\n'), ((43210, 43308), 'os.path.join', 'os.path.join', (['exp_dir', 'f"""model_predictions/spatial_chunks/test_pred_{sampling_scale_str}.zarr"""'], {}), "(exp_dir,\n f'model_predictions/spatial_chunks/test_pred_{sampling_scale_str}.zarr')\n", (43222, 43308), False, 'import os\n'), ((43552, 43600), 'xarray.open_zarr', 'xr.open_zarr', (['forecast_zarr_fpath'], {'chunks': '"""auto"""'}), "(forecast_zarr_fpath, chunks='auto')\n", (43564, 43600), True, 'import xarray as xr\n'), ((43929, 44039), 'os.path.join', 'os.path.join', (['exp_dir', 'f"""model_predictions/spatial_chunks/test_pred_{sampling_scale_str}_median.zarr"""'], {}), "(exp_dir,\n f'model_predictions/spatial_chunks/test_pred_{sampling_scale_str}_median.zarr'\n )\n", (43941, 44039), False, 'import os\n'), ((44178, 44226), 'xarray.open_zarr', 'xr.open_zarr', (['forecast_zarr_fpath'], {'chunks': '"""auto"""'}), "(forecast_zarr_fpath, chunks='auto')\n", (44190, 44226), True, 'import xarray as xr\n'), ((7640, 7695), 'xarray.concat', 'xr.concat', (['l_rescaled_ds'], {'dim': '"""forecast_reference_time"""'}), "(l_rescaled_ds, dim='forecast_reference_time')\n", (7649, 7695), True, 'import xarray as xr\n'), ((13523, 13563), 'dask.config.set', 'dask.config.set', ([], {'scheduler': '"""synchronous"""'}), "(scheduler='synchronous')\n", (13538, 13563), False, 'import dask\n'), ((13611, 13631), 'modules.utils_torch.check_device', 'check_device', (['device'], {}), '(device)\n', (13623, 13631), False, 'from modules.utils_torch import check_device\n'), ((13653, 13732), 'modules.utils_torch.check_pin_memory', 'check_pin_memory', ([], {'pin_memory': 'pin_memory', 'num_workers': 'num_workers', 'device': 'device'}), '(pin_memory=pin_memory, num_workers=num_workers, device=device)\n', (13669, 13732), False, 'from modules.utils_torch import check_pin_memory\n'), ((13770, 13871), 'modules.utils_torch.check_asyncronous_gpu_transfer', 'check_asyncronous_gpu_transfer', ([], {'asyncronous_gpu_transfer': 'asyncronous_gpu_transfer', 'device': 'device'}), '(asyncronous_gpu_transfer=\n asyncronous_gpu_transfer, device=device)\n', (13800, 13871), False, 'from modules.utils_torch import check_asyncronous_gpu_transfer\n'), ((13894, 13993), 'modules.utils_torch.check_prefetch_in_gpu', 'check_prefetch_in_gpu', ([], {'prefetch_in_gpu': 'prefetch_in_gpu', 'num_workers': 'num_workers', 'device': 'device'}), '(prefetch_in_gpu=prefetch_in_gpu, num_workers=\n num_workers, device=device)\n', (13915, 13993), False, 'from modules.utils_torch import check_prefetch_in_gpu\n'), ((14016, 14095), 'modules.utils_torch.check_prefetch_factor', 'check_prefetch_factor', ([], {'prefetch_factor': 'prefetch_factor', 'num_workers': 'num_workers'}), '(prefetch_factor=prefetch_factor, num_workers=num_workers)\n', (14037, 14095), False, 'from modules.utils_torch import check_prefetch_factor\n'), ((14321, 14380), 'modules.utils_autoregressive.check_input_k', 'check_input_k', ([], {'input_k': 'input_k', 'ar_iterations': 'ar_iterations'}), '(input_k=input_k, ar_iterations=ar_iterations)\n', (14334, 14380), False, 'from modules.utils_autoregressive import check_input_k\n'), ((14403, 14436), 'modules.utils_autoregressive.check_output_k', 'check_output_k', ([], {'output_k': 'output_k'}), '(output_k=output_k)\n', (14417, 14436), False, 'from modules.utils_autoregressive import check_output_k\n'), ((14447, 14628), 'modules.utils_autoregressive.check_ar_settings', 'check_ar_settings', ([], {'input_k': 'input_k', 'output_k': 'output_k', 'forecast_cycle': 'forecast_cycle', 'ar_iterations': 'ar_iterations', 'stack_most_recent_prediction': 'stack_most_recent_prediction'}), '(input_k=input_k, output_k=output_k, forecast_cycle=\n forecast_cycle, ar_iterations=ar_iterations,\n stack_most_recent_prediction=stack_most_recent_prediction)\n', (14464, 14628), False, 'from modules.utils_autoregressive import check_ar_settings\n'), ((14962, 14994), 'modules.utils_io._get_feature_order', '_get_feature_order', (['data_dynamic'], {}), '(data_dynamic)\n', (14980, 14994), False, 'from modules.utils_io import _get_feature_order, check_timesteps_format, check_no_duplicate_timesteps\n'), ((18425, 18816), 'modules.dataloader_autoregressive.AutoregressiveDataset', 'AutoregressiveDataset', ([], {'data_dynamic': 'data_dynamic', 'data_bc': 'data_bc', 'data_static': 'data_static', 'bc_generator': 'bc_generator', 'scaler': 'scaler_transform', 'subset_timesteps': 'subset_timesteps', 'training_mode': '(False)', 'input_k': 'input_k', 'output_k': 'output_k', 'forecast_cycle': 'forecast_cycle', 'ar_iterations': 'ar_iterations', 'stack_most_recent_prediction': 'stack_most_recent_prediction', 'device': 'device'}), '(data_dynamic=data_dynamic, data_bc=data_bc,\n data_static=data_static, bc_generator=bc_generator, scaler=\n scaler_transform, subset_timesteps=subset_timesteps, training_mode=\n False, input_k=input_k, output_k=output_k, forecast_cycle=\n forecast_cycle, ar_iterations=ar_iterations,\n stack_most_recent_prediction=stack_most_recent_prediction, device=device)\n', (18446, 18816), False, 'from modules.dataloader_autoregressive import AutoregressiveDataset\n'), ((19539, 19838), 'modules.dataloader_autoregressive.AutoregressiveDataLoader', 'AutoregressiveDataLoader', ([], {'dataset': 'dataset', 'batch_size': 'batch_size', 'drop_last_batch': '(False)', 'shuffle': '(False)', 'num_workers': 'num_workers', 'prefetch_factor': 'prefetch_factor', 'prefetch_in_gpu': 'prefetch_in_gpu', 'pin_memory': 'pin_memory', 'asyncronous_gpu_transfer': 'asyncronous_gpu_transfer', 'device': 'device'}), '(dataset=dataset, batch_size=batch_size,\n drop_last_batch=False, shuffle=False, num_workers=num_workers,\n prefetch_factor=prefetch_factor, prefetch_in_gpu=prefetch_in_gpu,\n pin_memory=pin_memory, asyncronous_gpu_transfer=\n asyncronous_gpu_transfer, device=device)\n', (19563, 19838), False, 'from modules.dataloader_autoregressive import AutoregressiveDataLoader\n'), ((20572, 20583), 'time.time', 'time.time', ([], {}), '()\n', (20581, 20583), False, 'import time\n'), ((31386, 31425), 'xarray.open_zarr', 'xr.open_zarr', (['zarr_fpath'], {'chunks': '"""auto"""'}), "(zarr_fpath, chunks='auto')\n", (31398, 31425), True, 'import xarray as xr\n'), ((31460, 31477), 'xarray.merge', 'xr.merge', (['list_ds'], {}), '(list_ds)\n', (31468, 31477), True, 'import xarray as xr\n'), ((33952, 33981), 'os.path.dirname', 'os.path.dirname', (['target_store'], {}), '(target_store)\n', (33967, 33981), False, 'import os\n'), ((34090, 34119), 'os.path.dirname', 'os.path.dirname', (['target_store'], {}), '(target_store)\n', (34105, 34119), False, 'import os\n'), ((34327, 34352), 'shutil.rmtree', 'shutil.rmtree', (['temp_store'], {}), '(temp_store)\n', (34340, 34352), False, 'import shutil\n'), ((34404, 34437), 'shutil.rmtree', 'shutil.rmtree', (['intermediate_store'], {}), '(intermediate_store)\n', (34417, 34437), False, 'import shutil\n'), ((39093, 39204), 'os.path.join', 'os.path.join', (['exp_dir', 'f"""model_predictions/spatial_chunks/test_pred_{sampling_scale_str}_temp{i}.zarr"""'], {}), "(exp_dir,\n f'model_predictions/spatial_chunks/test_pred_{sampling_scale_str}_temp{i}.zarr'\n )\n", (39105, 39204), False, 'import os\n'), ((39294, 39837), 'modules.utils_swag.bn_update', 'bn_update', (['model'], {'data_dynamic': 'training_data_dynamic', 'data_bc': 'training_data_bc', 'data_static': 'data_static', 'bc_generator': 'bc_generator', 'scaler': 'scaler_transform', 'device': 'device', 'batch_size': 'batch_size', 'num_workers': 'num_workers', 'prefetch_factor': 'prefetch_factor', 'prefetch_in_gpu': 'prefetch_in_gpu', 'pin_memory': 'pin_memory', 'asyncronous_gpu_transfer': 'asyncronous_gpu_transfer', 'input_k': 'input_k', 'output_k': 'output_k', 'forecast_cycle': 'forecast_cycle', 'ar_iterations': 'ar_iterations', 'stack_most_recent_prediction': 'stack_most_recent_prediction'}), '(model, data_dynamic=training_data_dynamic, data_bc=\n training_data_bc, data_static=data_static, bc_generator=bc_generator,\n scaler=scaler_transform, device=device, batch_size=batch_size,\n num_workers=num_workers, prefetch_factor=prefetch_factor,\n prefetch_in_gpu=prefetch_in_gpu, pin_memory=pin_memory,\n asyncronous_gpu_transfer=asyncronous_gpu_transfer, input_k=input_k,\n output_k=output_k, forecast_cycle=forecast_cycle, ar_iterations=\n ar_iterations, stack_most_recent_prediction=stack_most_recent_prediction)\n', (39303, 39837), False, 'from modules.utils_swag import bn_update\n'), ((42819, 42914), 'os.path.join', 'os.path.join', (['exp_dir', 'f"""model_predictions/spatial_chunks/test_pred_{sampling_scale_str}_*"""'], {}), "(exp_dir,\n f'model_predictions/spatial_chunks/test_pred_{sampling_scale_str}_*')\n", (42831, 42914), False, 'import os\n'), ((42934, 42953), 'xarray.open_zarr', 'xr.open_zarr', (['fpath'], {}), '(fpath)\n', (42946, 42953), True, 'import xarray as xr\n'), ((43316, 43351), 'os.path.exists', 'os.path.exists', (['forecast_zarr_fpath'], {}), '(forecast_zarr_fpath)\n', (43330, 43351), False, 'import os\n'), ((43762, 43783), 'shutil.rmtree', 'shutil.rmtree', (['member'], {}), '(member)\n', (43775, 43783), False, 'import shutil\n'), ((15398, 15424), 'os.path.exists', 'os.path.exists', (['zarr_fpath'], {}), '(zarr_fpath)\n', (15412, 15424), False, 'import os\n'), ((15974, 16019), 'zarr.Blosc', 'zarr.Blosc', ([], {'cname': '"""zstd"""', 'clevel': '(0)', 'shuffle': '(2)'}), "(cname='zstd', clevel=0, shuffle=2)\n", (15984, 16019), False, 'import zarr\n'), ((16083, 16141), 'modules.utils_zarr.check_rounding', 'check_rounding', ([], {'rounding': 'rounding', 'variable_names': 'features'}), '(rounding=rounding, variable_names=features)\n', (16097, 16141), False, 'from modules.utils_zarr import check_rounding\n'), ((17221, 17269), 'modules.utils_io.check_timesteps_format', 'check_timesteps_format', (['forecast_reference_times'], {}), '(forecast_reference_times)\n', (17243, 17269), False, 'from modules.utils_io import _get_feature_order, check_timesteps_format, check_no_duplicate_timesteps\n'), ((17451, 17547), 'modules.utils_io.check_no_duplicate_timesteps', 'check_no_duplicate_timesteps', (['forecast_reference_times'], {'var_name': '"""forecast_reference_times"""'}), "(forecast_reference_times, var_name=\n 'forecast_reference_times')\n", (17479, 17547), False, 'from modules.utils_io import _get_feature_order, check_timesteps_format, check_no_duplicate_timesteps\n'), ((20772, 20801), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (20794, 20801), False, 'import torch\n'), ((33628, 33655), 'shutil.rmtree', 'shutil.rmtree', (['target_store'], {}), '(target_store)\n', (33641, 33655), False, 'import shutil\n'), ((39209, 39224), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (39222, 39224), False, 'import torch\n'), ((4503, 4565), 'numpy.argwhere', 'np.argwhere', (['(dict_forecast_rel_idx_Y[ar_iteration] == leadtime)'], {}), '(dict_forecast_rel_idx_Y[ar_iteration] == leadtime)\n', (4514, 4565), True, 'import numpy as np\n'), ((5809, 5823), 'numpy.array', 'np.array', (['dims'], {}), '(dims)\n', (5817, 5823), True, 'import numpy as np\n'), ((5824, 5871), 'numpy.isin', 'np.isin', (['dims', "['time', 'feature']"], {'invert': '(True)'}), "(dims, ['time', 'feature'], invert=True)\n", (5831, 5871), True, 'import numpy as np\n'), ((17770, 17803), 'numpy.diff', 'np.diff', (['data_dynamic.time.values'], {}), '(data_dynamic.time.values)\n', (17777, 17803), True, 'import numpy as np\n'), ((21208, 21219), 'time.time', 'time.time', ([], {}), '()\n', (21217, 21219), False, 'import time\n'), ((27637, 27648), 'time.time', 'time.time', ([], {}), '()\n', (27646, 27648), False, 'import time\n'), ((34671, 34685), 'numpy.array', 'np.array', (['dims'], {}), '(dims)\n', (34679, 34685), True, 'import numpy as np\n'), ((34686, 34733), 'numpy.isin', 'np.isin', (['dims', "['time', 'feature']"], {'invert': '(True)'}), "(dims, ['time', 'feature'], invert=True)\n", (34693, 34733), True, 'import numpy as np\n'), ((15296, 15323), 'os.path.dirname', 'os.path.dirname', (['zarr_fpath'], {}), '(zarr_fpath)\n', (15311, 15323), False, 'import os\n'), ((15354, 15381), 'os.path.dirname', 'os.path.dirname', (['zarr_fpath'], {}), '(zarr_fpath)\n', (15369, 15381), False, 'import os\n'), ((24487, 24618), 'modules.dataloader_autoregressive.remove_unused_Y', 'remove_unused_Y', ([], {'ar_iteration': 'ar_iteration', 'dict_Y_predicted': 'dict_Y_predicted', 'dict_Y_to_remove': "batch_dict['dict_Y_to_remove']"}), "(ar_iteration=ar_iteration, dict_Y_predicted=\n dict_Y_predicted, dict_Y_to_remove=batch_dict['dict_Y_to_remove'])\n", (24502, 24618), False, 'from modules.dataloader_autoregressive import remove_unused_Y\n'), ((31637, 31648), 'time.time', 'time.time', ([], {}), '()\n', (31646, 31648), False, 'import time\n'), ((15703, 15717), 'numpy.array', 'np.array', (['dims'], {}), '(dims)\n', (15711, 15717), True, 'import numpy as np\n'), ((15718, 15765), 'numpy.isin', 'np.isin', (['dims', "['time', 'feature']"], {'invert': '(True)'}), "(dims, ['time', 'feature'], invert=True)\n", (15725, 15765), True, 'import numpy as np\n'), ((27921, 27958), 'xarray.open_zarr', 'xr.open_zarr', (['tmp_ar_block_zarr_fpath'], {}), '(tmp_ar_block_zarr_fpath)\n', (27933, 27958), True, 'import xarray as xr\n'), ((29357, 29629), 'modules.utils_zarr.write_zarr', 'write_zarr', ([], {'zarr_fpath': 'zarr_fpath', 'ds': 'ds', 'chunks': 'chunks', 'default_chunks': 'default_chunks', 'compressor': 'compressor', 'default_compressor': 'default_compressor', 'rounding': 'rounding', 'consolidated': '(True)', 'append': '(True)', 'append_dim': '"""forecast_reference_time"""', 'show_progress': '(False)'}), "(zarr_fpath=zarr_fpath, ds=ds, chunks=chunks, default_chunks=\n default_chunks, compressor=compressor, default_compressor=\n default_compressor, rounding=rounding, consolidated=True, append=True,\n append_dim='forecast_reference_time', show_progress=False)\n", (29367, 29629), False, 'from modules.utils_zarr import write_zarr\n'), ((29990, 30028), 'shutil.rmtree', 'shutil.rmtree', (['tmp_ar_block_zarr_fpath'], {}), '(tmp_ar_block_zarr_fpath)\n', (30003, 30028), False, 'import shutil\n'), ((30486, 30497), 'time.time', 'time.time', ([], {}), '()\n', (30495, 30497), False, 'import time\n'), ((26591, 26860), 'modules.utils_zarr.write_zarr', 'write_zarr', ([], {'zarr_fpath': 'tmp_ar_block_zarr_fpath', 'ds': 'ds', 'chunks': 'chunks', 'default_chunks': 'default_chunks', 'compressor': 'compressor', 'default_compressor': 'default_compressor', 'rounding': 'rounding', 'consolidated': '(True)', 'append': '(True)', 'append_dim': '"""leadtime"""', 'show_progress': '(False)'}), "(zarr_fpath=tmp_ar_block_zarr_fpath, ds=ds, chunks=chunks,\n default_chunks=default_chunks, compressor=compressor,\n default_compressor=default_compressor, rounding=rounding, consolidated=\n True, append=True, append_dim='leadtime', show_progress=False)\n", (26601, 26860), False, 'from modules.utils_zarr import write_zarr\n'), ((26512, 26539), 'os.path.dirname', 'os.path.dirname', (['zarr_fpath'], {}), '(zarr_fpath)\n', (26527, 26539), False, 'import os\n')] |
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from probflow.modules import Dense, Sequential
from probflow.parameters import Parameter
from probflow.utils.settings import Sampling
tfd = tfp.distributions
def is_close(a, b, tol=1e-3):
return np.abs(a - b) < tol
def test_Sequential():
"""Tests probflow.modules.Sequential"""
# Create the module
seq = Sequential(
[Dense(5, 10), tf.nn.relu, Dense(10, 3), tf.nn.relu, Dense(3, 1)]
)
# Steps should be list
assert isinstance(seq.steps, list)
assert len(seq.steps) == 5
# Test MAP outputs are the same
x = tf.random.normal([4, 5])
samples1 = seq(x)
samples2 = seq(x)
assert np.all(samples1.numpy() == samples2.numpy())
assert samples1.ndim == 2
assert samples1.shape[0] == 4
assert samples1.shape[1] == 1
# Test samples are different
with Sampling(n=1):
samples1 = seq(x)
samples2 = seq(x)
assert np.all(samples1.numpy() != samples2.numpy())
assert samples1.ndim == 2
assert samples1.shape[0] == 4
assert samples1.shape[1] == 1
# parameters should return list of all parameters
param_list = seq.parameters
assert isinstance(param_list, list)
assert len(param_list) == 6
assert all(isinstance(p, Parameter) for p in param_list)
param_names = [p.name for p in seq.parameters]
assert "Dense_weights" in param_names
assert "Dense_bias" in param_names
param_shapes = [p.shape for p in seq.parameters]
assert [5, 10] in param_shapes
assert [1, 10] in param_shapes
assert [10, 3] in param_shapes
assert [1, 3] in param_shapes
assert [3, 1] in param_shapes
assert [1, 1] in param_shapes
# kl_loss should return sum of KL losses
kl_loss = seq.kl_loss()
assert isinstance(kl_loss, tf.Tensor)
assert kl_loss.ndim == 0
| [
"probflow.modules.Dense",
"probflow.utils.settings.Sampling",
"numpy.abs",
"tensorflow.random.normal"
] | [((642, 666), 'tensorflow.random.normal', 'tf.random.normal', (['[4, 5]'], {}), '([4, 5])\n', (658, 666), True, 'import tensorflow as tf\n'), ((283, 296), 'numpy.abs', 'np.abs', (['(a - b)'], {}), '(a - b)\n', (289, 296), True, 'import numpy as np\n'), ((908, 921), 'probflow.utils.settings.Sampling', 'Sampling', ([], {'n': '(1)'}), '(n=1)\n', (916, 921), False, 'from probflow.utils.settings import Sampling\n'), ((428, 440), 'probflow.modules.Dense', 'Dense', (['(5)', '(10)'], {}), '(5, 10)\n', (433, 440), False, 'from probflow.modules import Dense, Sequential\n'), ((454, 466), 'probflow.modules.Dense', 'Dense', (['(10)', '(3)'], {}), '(10, 3)\n', (459, 466), False, 'from probflow.modules import Dense, Sequential\n'), ((480, 491), 'probflow.modules.Dense', 'Dense', (['(3)', '(1)'], {}), '(3, 1)\n', (485, 491), False, 'from probflow.modules import Dense, Sequential\n')] |
from pop_finder import __version__
from pop_finder import pop_finder
from pop_finder import contour_classifier
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
import os
import shutil
import pytest
# helper data
infile_all = "tests/test_inputs/onlyAtl_500.recode.vcf.locator.hdf5"
infile_all_vcf = "tests/test_inputs/onlyAtl_500.recode.vcf"
infile_kfcv = "tests/test_inputs/onlyAtl_500_kfcv.recode.vcf"
sample_data1 = "tests/test_inputs/onlyAtl_truelocs.txt"
sample_data2 = "tests/test_inputs/onlyAtl_truelocs_NAs.txt"
sample_data3 = "tests/test_inputs/onlyAtl_truelocs_badsamps.txt"
sample_data4 = "tests/test_inputs/onlyAtl_truelocs_3col.txt"
pred_path = "tests/test_inputs/test_out/loc_boot0_predlocs.txt"
X_train = np.load("tests/test_inputs/X_train.npy")
X_train_empty = np.zeros(shape=0)
y_train = pd.read_csv("tests/test_inputs/y_train.csv")
y_train_empty = pd.DataFrame()
X_test = np.load("tests/test_inputs/X_test.npy")
X_test_empty = np.zeros(shape=0)
y_test = pd.read_csv("tests/test_inputs/y_test.csv")
y_test_empty = pd.DataFrame()
unknowns = pd.read_csv("tests/test_inputs/test_unknowns.csv")
unknowns_empty = pd.DataFrame()
ukgen = np.load("tests/test_inputs/ukgen.npy")
ukgen_empty = np.zeros(shape=0)
def test_version():
assert __version__ == "1.0.9"
def test_read_data():
# Read data w/o kfcv
x = pop_finder.read_data(infile_all, sample_data2)
assert isinstance(x, tuple)
assert isinstance(x[0], pd.core.frame.DataFrame)
assert isinstance(x[1], np.ndarray)
assert isinstance(x[2], pd.core.frame.DataFrame)
assert len(x) == 3
# Read data w/ kfcv
y = pop_finder.read_data(infile_all, sample_data1, kfcv=True)
assert isinstance(y, tuple)
assert isinstance(y[0], pd.core.frame.DataFrame)
assert isinstance(y[1], np.ndarray)
assert len(y) == 2
# Test inputs
with pytest.raises(ValueError, match="Path to infile does not exist"):
pop_finder.read_data(infile="hello", sample_data=sample_data2)
with pytest.raises(
ValueError, match="Infile must have extension 'zarr', 'vcf', or 'hdf5'"
):
pop_finder.read_data(infile=sample_data1, sample_data=sample_data2)
with pytest.raises(ValueError,
match="Path to sample_data does not exist"):
pop_finder.read_data(infile_all, sample_data="hello")
with pytest.raises(ValueError,
match="sample_data does not have correct columns"):
pop_finder.read_data(infile_all, sample_data=sample_data4)
with pytest.raises(
ValueError,
match="sample ordering failed! Check that sample IDs match VCF."
):
pop_finder.read_data(infile_kfcv, sample_data3)
def test_hp_tuning():
hm_test = pop_finder.classifierHyperModel(
input_shape=2, num_classes=2)
assert isinstance(hm_test,
pop_finder.classifierHyperModel)
assert hm_test.input_shape == 2
assert hm_test.num_classes == 2
def test_hyper_tune():
# General run
tuner_test = pop_finder.hyper_tune(
infile=infile_all,
sample_data=sample_data2,
max_epochs=10,
save_dir="tests/hyper_tune_test_out",
mod_name="hyper_tune",
)
assert type(
tuner_test[0] == "tensorflow.python.keras.engine.sequential.Sequential"
)
# Make sure correct files are output
assert os.path.exists("tests/hyper_tune_test_out")
assert os.path.exists("tests/hyper_tune_test_out/best_mod")
assert os.path.exists("tests/hyper_tune_test_out/X_train.npy")
assert os.path.exists("tests/hyper_tune_test_out/X_test.npy")
assert os.path.exists("tests/hyper_tune_test_out/y_train.csv")
assert os.path.exists("tests/hyper_tune_test_out/y_test.csv")
# Remove files for next run
if os.path.exists("tests/hyper_tune_test_out/best_mod"):
shutil.rmtree("tests/hyper_tune_test_out/best_mod")
# Test if value error thrown if y_val != y_train
with pytest.raises(ValueError, match="train_prop is too high"):
pop_finder.hyper_tune(
infile=infile_all,
sample_data=sample_data2,
max_epochs=10,
save_dir="tests/hyper_tune_test_out",
mod_name="hyper_tune",
train_prop=0.99,
)
# Check all inputs
# infile does not exist
with pytest.raises(ValueError, match="infile does not exist"):
pop_finder.hyper_tune(
infile="tests/test_inputs/onlyAtl_500.vcf",
sample_data=sample_data2,
max_epochs=10,
save_dir="tests/hyper_tune_test_out",
mod_name="hyper_tune",
)
# sample_data does not exist
with pytest.raises(ValueError, match="sample_data does not exist"):
pop_finder.hyper_tune(
infile=infile_all,
sample_data="hello.txt",
max_epochs=10,
save_dir="tests/hyper_tune_test_out",
mod_name="hyper_tune",
)
# max_trials not right format
with pytest.raises(ValueError, match="max_trials should be integer"):
pop_finder.hyper_tune(
infile=infile_all,
sample_data=sample_data2,
max_epochs=10,
max_trials=1.5,
save_dir="tests/hyper_tune_test_out",
mod_name="hyper_tune",
)
# runs_per_trial not right format
with pytest.raises(ValueError, match="runs_per_trial should be integer"):
pop_finder.hyper_tune(
infile=infile_all,
sample_data=sample_data2,
max_epochs=10,
runs_per_trial=1.2,
save_dir="tests/hyper_tune_test_out",
mod_name="hyper_tune",
)
# max_epochs not right format
with pytest.raises(ValueError, match="max_epochs should be integer"):
pop_finder.hyper_tune(
infile=infile_all,
sample_data=sample_data2,
max_epochs="10",
save_dir="tests/hyper_tune_test_out",
mod_name="hyper_tune",
)
# train_prop not right format
with pytest.raises(ValueError, match="train_prop should be float"):
pop_finder.hyper_tune(
infile=infile_all,
sample_data=sample_data2,
max_epochs=10,
save_dir="tests/hyper_tune_test_out",
mod_name="hyper_tune",
train_prop=1,
)
# seed wrong format
with pytest.raises(ValueError, match="seed should be integer or None"):
pop_finder.hyper_tune(
infile=infile_all,
sample_data=sample_data2,
max_epochs=10,
save_dir="tests/hyper_tune_test_out",
mod_name="hyper_tune",
train_prop=0.8,
seed="2",
)
# save_dir wrong format
with pytest.raises(ValueError, match="save_dir should be string"):
pop_finder.hyper_tune(
infile=infile_all,
sample_data=sample_data2,
max_epochs=10,
save_dir=2,
mod_name="hyper_tune",
train_prop=0.8,
)
# mod_name wrong format
with pytest.raises(ValueError, match="mod_name should be string"):
pop_finder.hyper_tune(
infile=infile_all,
sample_data=sample_data2,
max_epochs=10,
save_dir="tests/hyper_tune_test_out",
mod_name=2,
train_prop=0.8,
)
def test_kfcv():
report = pop_finder.kfcv(
infile=infile_all,
sample_data=sample_data2,
n_splits=3,
n_reps=1,
patience=10,
max_epochs=10,
save_dir="tests/kfcv_test_output",
mod_path="hyper_tune_test_out",
)
# Check output in correct format
assert isinstance(report, pd.DataFrame)
# Check that two outputs are created with ensemble
report, ensemble_report = pop_finder.kfcv(
infile=infile_all,
sample_data=sample_data2,
n_splits=3,
n_reps=1,
ensemble=True,
nbags=2,
patience=10,
max_epochs=10,
save_dir="tests/kfcv_test_output",
mod_path="hyper_tune_test_out",
)
assert isinstance(report, pd.DataFrame)
assert isinstance(ensemble_report, pd.DataFrame)
# Check input errors
# infile does not exist
with pytest.raises(ValueError, match="path to infile does not exist"):
pop_finder.kfcv(
infile="hello.txt",
sample_data=sample_data2,
n_splits=3,
n_reps=1,
patience=10,
max_epochs=10,
save_dir="tests/kfcv_test_output",
mod_path="hyper_tune_test_out",
)
# sample_data does not exist
with pytest.raises(ValueError, match="path to sample_data incorrect"):
pop_finder.kfcv(
infile=infile_all,
sample_data="hello.txt",
n_splits=3,
n_reps=1,
patience=10,
max_epochs=10,
save_dir="tests/kfcv_test_output",
mod_path="hyper_tune_test_out",
)
# n_splits wrong format
with pytest.raises(ValueError, match="n_splits should be an integer"):
pop_finder.kfcv(
infile=infile_all,
sample_data=sample_data2,
n_splits=1.5,
n_reps=1,
patience=10,
max_epochs=10,
save_dir="tests/kfcv_test_output",
mod_path="hyper_tune_test_out",
)
# n_reps wrong format
with pytest.raises(ValueError, match="n_reps should be an integer"):
pop_finder.kfcv(
infile=infile_all,
sample_data=sample_data2,
n_splits=3,
n_reps=1.5,
patience=10,
max_epochs=10,
save_dir="tests/kfcv_test_output",
mod_path="hyper_tune_test_out",
)
# ensemble wrong format
with pytest.raises(ValueError, match="ensemble should be a boolean"):
pop_finder.kfcv(
infile=infile_all,
sample_data=sample_data2,
n_splits=3,
n_reps=1,
ensemble="True",
patience=10,
max_epochs=10,
save_dir="tests/kfcv_test_output",
mod_path="hyper_tune_test_out",
)
# save_dir wrong format
with pytest.raises(ValueError, match="save_dir should be a string"):
pop_finder.kfcv(
infile=infile_all,
sample_data=sample_data2,
n_splits=3,
n_reps=1,
patience=10,
max_epochs=10,
save_dir=2,
mod_path="hyper_tune_test_out",
)
# n_splits > 1
with pytest.raises(ValueError, match="n_splits must be greater than 1"):
pop_finder.kfcv(
infile=infile_all,
sample_data=sample_data2,
n_splits=1,
n_reps=1,
patience=10,
max_epochs=10,
save_dir="tests/kfcv_test_output",
mod_path="hyper_tune_test_out",
)
# n_splits cannot be greater than smallest pop
with pytest.raises(
ValueError,
match="n_splits cannot be greater than number of samples",
):
pop_finder.kfcv(
infile=infile_all,
sample_data=sample_data2,
n_splits=10,
n_reps=1,
patience=10,
max_epochs=10,
save_dir="tests/kfcv_test_output",
mod_path="hyper_tune_test_out",
)
def test_pop_finder():
test_dict = pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns=unknowns,
ukgen=ukgen,
save_dir="tests/test_output",
max_epochs=10,
)
assert isinstance(test_dict, dict)
test_dict, tot_bag_df = pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns=unknowns,
ukgen=ukgen,
ensemble=True,
nbags=2,
save_dir="tests/test_output",
max_epochs=10,
)
assert isinstance(test_dict, dict)
assert isinstance(tot_bag_df, pd.DataFrame)
# Check inputs
with pytest.raises(ValueError, match="y_train is not a pandas dataframe"):
pop_finder.pop_finder(
X_train=X_train,
y_train=2,
X_test=X_test,
y_test=y_test,
unknowns=unknowns,
ukgen=ukgen,
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="y_train exists, but is empty"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train_empty,
X_test=X_test,
y_test=y_test,
unknowns=unknowns,
ukgen=ukgen,
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="y_test is not a pandas dataframe"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=2,
unknowns=unknowns,
ukgen=ukgen,
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="y_test exists, but is empty"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test_empty,
unknowns=unknowns,
ukgen=ukgen,
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="X_train is not a numpy array"):
pop_finder.pop_finder(
X_train=2,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns=unknowns,
ukgen=ukgen,
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="X_train exists, but is empty"):
pop_finder.pop_finder(
X_train=X_train_empty,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns=unknowns,
ukgen=ukgen,
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="X_test is not a numpy array"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=2,
y_test=y_test,
unknowns=unknowns,
ukgen=ukgen,
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="X_test exists, but is empty"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test_empty,
y_test=y_test,
unknowns=unknowns,
ukgen=ukgen,
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="ukgen is not a numpy array"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns=unknowns,
ukgen=2,
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="ukgen exists, but is empty"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns=unknowns,
ukgen=ukgen_empty,
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="unknowns is not pandas dataframe"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns="unknowns",
ukgen=ukgen,
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="unknowns exists, but is empty"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns=unknowns_empty,
ukgen=ukgen,
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="ensemble should be a boolean"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns=unknowns,
ukgen=ukgen,
ensemble="True",
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="try_stacking should be a boolean"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns=unknowns,
ukgen=ukgen,
try_stacking="True",
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="nbags should be an integer"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns=unknowns,
ukgen=ukgen,
ensemble=True,
nbags=1.5,
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="train_prop should be a float"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns=unknowns,
ukgen=ukgen,
train_prop=1,
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="predict should be a boolean"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns=unknowns,
ukgen=ukgen,
predict="True",
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="save_dir should be a string"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns=unknowns,
ukgen=ukgen,
save_dir=2,
max_epochs=10,
)
with pytest.raises(ValueError, match="save_weights should be a boolean"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns=unknowns,
ukgen=ukgen,
save_weights="True",
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="patience should be an integer"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns=unknowns,
ukgen=ukgen,
patience=5.6,
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="batch_size should be an integer"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns=unknowns,
ukgen=ukgen,
batch_size=5.6,
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="max_epochs should be an integer"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns=unknowns,
ukgen=ukgen,
max_epochs=5.6,
save_dir="tests/test_output",
)
with pytest.raises(ValueError, match="plot_history should be a boolean"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns=unknowns,
ukgen=ukgen,
plot_history="True",
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError,
match="mod_path should be a string or None"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns=unknowns,
ukgen=ukgen,
mod_path=2,
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="unknowns is not pandas dataframe"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns="hello",
ukgen=ukgen,
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="unknowns exists, but is empty"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns=unknowns_empty,
ukgen=ukgen,
save_dir="tests/test_output",
max_epochs=10,
)
with pytest.raises(ValueError, match="train_prop is too high"):
pop_finder.pop_finder(
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
unknowns=unknowns,
ukgen=ukgen,
save_dir="tests/test_output",
max_epochs=10,
train_prop=0.99,
seed=1234,
)
def test_run_neural_net():
save_path = "tests/test_output"
pop_finder.run_neural_net(
infile_all,
sample_data2,
patience=10,
max_epochs=2,
save_dir=save_path,
)
# Check correct files are created
assert os.path.isfile(save_path + "/metrics.csv")
assert os.path.isfile(save_path + "/pop_assign.csv")
shutil.rmtree(save_path)
pop_finder.run_neural_net(
infile_all,
sample_data2,
patience=10,
max_epochs=2,
ensemble=True,
nbags=2,
try_stacking=True,
save_dir=save_path,
)
# Check correct files are created
assert os.path.isfile(save_path + "/ensemble_test_results.csv")
assert os.path.isfile(save_path + "/pop_assign_ensemble.csv")
assert os.path.isfile(save_path + "/metrics.csv")
assert os.path.isfile(save_path + "/pop_assign_freqs.csv")
shutil.rmtree(save_path)
# Check inputs
with pytest.raises(ValueError, match="Path to infile does not exist"):
pop_finder.run_neural_net(
infile="hello",
sample_data=sample_data2,
patience=10,
max_epochs=2,
save_dir=save_path,
)
with pytest.raises(ValueError, match="Path to sample_data does not exist"):
pop_finder.run_neural_net(
infile_all,
sample_data="hello",
patience=10,
max_epochs=2,
save_dir=save_path,
)
with pytest.raises(ValueError,
match="save_allele_counts should be a boolean"):
pop_finder.run_neural_net(
infile_all,
sample_data2,
save_allele_counts="True",
patience=10,
max_epochs=2,
save_dir=save_path,
)
with pytest.raises(ValueError,
match="mod_path should either be a string or None"):
pop_finder.run_neural_net(
infile_all,
sample_data2,
mod_path=2,
patience=10,
max_epochs=2,
save_dir=save_path,
)
with pytest.raises(ValueError, match="Path to mod_path does not exist"):
pop_finder.run_neural_net(
infile_all,
sample_data2,
mod_path="hello",
patience=10,
max_epochs=2,
save_dir=save_path,
)
with pytest.raises(ValueError, match="train_prop should be a float"):
pop_finder.run_neural_net(
infile_all,
sample_data2,
patience=10,
max_epochs=2,
save_dir=save_path,
train_prop=1,
)
with pytest.raises(ValueError, match="train_prop is too high"):
pop_finder.run_neural_net(
infile_all,
sample_data2,
patience=10,
max_epochs=2,
save_dir=save_path,
train_prop=0.99,
)
def test_assign_plot():
# Check inputs
with pytest.raises(ValueError, match="save_dir should be string"):
pop_finder.assign_plot(save_dir=2)
with pytest.raises(ValueError, match="ensemble should be boolean"):
pop_finder.assign_plot(save_dir="hello", ensemble="True")
with pytest.raises(ValueError, match="col_scheme should be string"):
pop_finder.assign_plot(save_dir="hello",
ensemble=False,
col_scheme=1)
with pytest.raises(
ValueError,
match="pop_assign_freqs.csv does not exist in save_dir"
):
pop_finder.assign_plot(save_dir="hello", ensemble=True)
with pytest.raises(ValueError,
match="pop_assign.csv does not exist in save_dir"):
pop_finder.assign_plot(save_dir="hello", ensemble=False)
def test_structure_plot():
# Check outputs
pop_finder.structure_plot(save_dir="tests/test_inputs/kfcv_test_output")
assert os.path.exists(
"tests/test_inputs/kfcv_test_output/structure_plot.png")
if os.path.exists(
"tests/test_inputs/kfcv_test_output/structure_plot.png"
):
os.remove(
"tests/test_inputs/kfcv_test_output/structure_plot.png"
)
pop_finder.structure_plot(
save_dir="tests/test_inputs/kfcv_ensemble_test_output",
ensemble=True
)
assert os.path.exists(
"tests/test_inputs/kfcv_ensemble_test_output/structure_plot.png"
)
if os.path.exists(
"tests/test_inputs/kfcv_ensemble_test_output/structure_plot.png"
):
os.remove(
"tests/test_inputs/kfcv_ensemble_test_output/structure_plot.png"
)
# Check inputs
with pytest.raises(ValueError,
match="Path to ensemble_preds does not exist"):
pop_finder.structure_plot(save_dir="incorrect", ensemble=True)
with pytest.raises(ValueError,
match="Path to preds does not exist"):
pop_finder.structure_plot(save_dir="incorrect",
ensemble=False)
with pytest.raises(ValueError,
match="col_scheme should be a string"):
pop_finder.structure_plot(
save_dir="tests/test_inputs/kfcv_test_output",
ensemble=False, col_scheme=2
)
def test_contour_classifier():
with pytest.raises(ValueError, match="save_dir does not exist"):
contour_classifier.contour_classifier(
sample_data=sample_data1, save_dir="incorrect"
)
with pytest.raises(ValueError, match="path to sample_data incorrect"):
contour_classifier.contour_classifier(
sample_data="incorrect", save_dir="tests/test_inputs/test_out"
)
with pytest.raises(ValueError, match="path to genetic data incorrect"):
contour_classifier.contour_classifier(
sample_data=sample_data1,
run_locator=True,
gen_dat="incorrect",
save_dir="tests/test_inputs/test_out",
)
with pytest.raises(ValueError, match="Cannot use hdf5 file"):
contour_classifier.contour_classifier(
sample_data=sample_data1,
run_locator=True,
gen_dat=infile_all,
save_dir="tests/test_inputs/test_out",
)
with pytest.raises(ValueError, match="bootstraps"):
contour_classifier.contour_classifier(
sample_data=sample_data1,
nboots=25,
save_dir="tests/test_inputs/test_out",
multi_iter=1,
)
with pytest.raises(ValueError, match="bootstraps"):
contour_classifier.contour_classifier(
sample_data=sample_data1,
nboots=25,
save_dir="tests/test_inputs/test_out",
multi_iter=1,
)
with pytest.raises(
ValueError,
match="Something went wrong with the prediction data"
):
contour_classifier.contour_classifier(
sample_data=sample_data3,
save_dir="tests/test_inputs/test_out"
)
with pytest.raises(
ValueError,
match="sample_data file should have columns x, y, pop, and sampleID"
):
contour_classifier.contour_classifier(
sample_data=sample_data4,
save_dir="tests/test_inputs/test_out"
)
with pytest.raises(Exception,
match="Too few points to generate contours"):
contour_classifier.contour_classifier(
sample_data=sample_data2,
run_locator=True,
gen_dat=infile_all_vcf,
nboots=1,
max_epochs=1,
save_dir="tests/test_inputs/test_out",
)
class_df = contour_classifier.contour_classifier(
sample_data=sample_data2,
save_dir="tests/test_inputs/test_out"
)
assert isinstance(class_df, pd.core.frame.DataFrame)
assert (class_df.columns == ["sampleID",
"classification",
"kd_estimate"]).all()
assert (class_df["kd_estimate"] <= 1).all()
assert (class_df["kd_estimate"] >= 0).all()
def test_cont_finder():
pred_dat = pd.read_csv(pred_path)
pred_dat = pred_dat.rename({"x": "pred_x", "y": "pred_y"}, axis=1)
true_lab = pd.read_csv(sample_data1, sep="\t")
test_dat = pred_dat[pred_dat["sampleID"] == "LESP_65"]
d_x = (max(test_dat["pred_x"]) - min(test_dat["pred_x"])) / 10
d_y = (max(test_dat["pred_y"]) - min(test_dat["pred_y"])) / 10
test_xlim = min(test_dat["pred_x"]) - d_x, max(test_dat["pred_x"]) + d_x
test_ylim = min(test_dat["pred_y"]) - d_y, max(test_dat["pred_y"]) + d_y
X, Y = np.mgrid[
test_xlim[0]:test_xlim[1]:200j, test_ylim[0]:test_ylim[1]:200j
]
positions = np.vstack([X.ravel(), Y.ravel()])
values = np.vstack([test_dat["pred_x"], test_dat["pred_y"]])
kernel = stats.gaussian_kde(values)
Z = np.reshape(kernel(positions).T, X.shape)
new_z = Z / np.max(Z)
fig = plt.figure(figsize=(8, 8))
ax = fig.gca()
cset = ax.contour(X, Y, new_z, 10, colors="black")
cset.levels = -np.sort(-cset.levels)
res = contour_classifier.cont_finder(true_lab, cset)
assert len(res) == 2
assert res[0] == "Baccalieu"
assert res[1] == 0.4
plt.close()
def test_kfcv_contour():
with pytest.raises(ValueError, match="path to sample_data incorrect"):
contour_classifier.kfcv(
sample_data="incorrect",
gen_dat=infile_all_vcf,
save_dir="tests/test_inputs/kfcv",
)
pred_labels, true_labels, report = contour_classifier.kfcv(
sample_data=sample_data1,
gen_dat=infile_all_vcf,
n_splits=2,
n_runs=2,
max_epochs=1,
nboots=10,
save_dir="tests/test_inputs/kfcv",
)
true_dat = pd.read_csv(sample_data1, sep="\t")
assert len(pred_labels) == len(true_labels)
# Because function was run for 2 iters
assert len(true_dat) * 2 == len(pred_labels)
assert isinstance(report, pd.core.frame.DataFrame)
| [
"numpy.load",
"os.remove",
"pandas.read_csv",
"pop_finder.contour_classifier.cont_finder",
"os.path.isfile",
"matplotlib.pyplot.figure",
"shutil.rmtree",
"pop_finder.contour_classifier.kfcv",
"pandas.DataFrame",
"pop_finder.contour_classifier.contour_classifier",
"matplotlib.pyplot.close",
"po... | [((766, 806), 'numpy.load', 'np.load', (['"""tests/test_inputs/X_train.npy"""'], {}), "('tests/test_inputs/X_train.npy')\n", (773, 806), True, 'import numpy as np\n'), ((823, 840), 'numpy.zeros', 'np.zeros', ([], {'shape': '(0)'}), '(shape=0)\n', (831, 840), True, 'import numpy as np\n'), ((851, 895), 'pandas.read_csv', 'pd.read_csv', (['"""tests/test_inputs/y_train.csv"""'], {}), "('tests/test_inputs/y_train.csv')\n", (862, 895), True, 'import pandas as pd\n'), ((912, 926), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (924, 926), True, 'import pandas as pd\n'), ((936, 975), 'numpy.load', 'np.load', (['"""tests/test_inputs/X_test.npy"""'], {}), "('tests/test_inputs/X_test.npy')\n", (943, 975), True, 'import numpy as np\n'), ((991, 1008), 'numpy.zeros', 'np.zeros', ([], {'shape': '(0)'}), '(shape=0)\n', (999, 1008), True, 'import numpy as np\n'), ((1018, 1061), 'pandas.read_csv', 'pd.read_csv', (['"""tests/test_inputs/y_test.csv"""'], {}), "('tests/test_inputs/y_test.csv')\n", (1029, 1061), True, 'import pandas as pd\n'), ((1077, 1091), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1089, 1091), True, 'import pandas as pd\n'), ((1103, 1153), 'pandas.read_csv', 'pd.read_csv', (['"""tests/test_inputs/test_unknowns.csv"""'], {}), "('tests/test_inputs/test_unknowns.csv')\n", (1114, 1153), True, 'import pandas as pd\n'), ((1171, 1185), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1183, 1185), True, 'import pandas as pd\n'), ((1194, 1232), 'numpy.load', 'np.load', (['"""tests/test_inputs/ukgen.npy"""'], {}), "('tests/test_inputs/ukgen.npy')\n", (1201, 1232), True, 'import numpy as np\n'), ((1247, 1264), 'numpy.zeros', 'np.zeros', ([], {'shape': '(0)'}), '(shape=0)\n', (1255, 1264), True, 'import numpy as np\n'), ((1379, 1425), 'pop_finder.pop_finder.read_data', 'pop_finder.read_data', (['infile_all', 'sample_data2'], {}), '(infile_all, sample_data2)\n', (1399, 1425), False, 'from pop_finder import pop_finder\n'), ((1660, 1717), 'pop_finder.pop_finder.read_data', 'pop_finder.read_data', (['infile_all', 'sample_data1'], {'kfcv': '(True)'}), '(infile_all, sample_data1, kfcv=True)\n', (1680, 1717), False, 'from pop_finder import pop_finder\n'), ((2779, 2840), 'pop_finder.pop_finder.classifierHyperModel', 'pop_finder.classifierHyperModel', ([], {'input_shape': '(2)', 'num_classes': '(2)'}), '(input_shape=2, num_classes=2)\n', (2810, 2840), False, 'from pop_finder import pop_finder\n'), ((3069, 3215), 'pop_finder.pop_finder.hyper_tune', 'pop_finder.hyper_tune', ([], {'infile': 'infile_all', 'sample_data': 'sample_data2', 'max_epochs': '(10)', 'save_dir': '"""tests/hyper_tune_test_out"""', 'mod_name': '"""hyper_tune"""'}), "(infile=infile_all, sample_data=sample_data2,\n max_epochs=10, save_dir='tests/hyper_tune_test_out', mod_name='hyper_tune')\n", (3090, 3215), False, 'from pop_finder import pop_finder\n'), ((3416, 3459), 'os.path.exists', 'os.path.exists', (['"""tests/hyper_tune_test_out"""'], {}), "('tests/hyper_tune_test_out')\n", (3430, 3459), False, 'import os\n'), ((3471, 3523), 'os.path.exists', 'os.path.exists', (['"""tests/hyper_tune_test_out/best_mod"""'], {}), "('tests/hyper_tune_test_out/best_mod')\n", (3485, 3523), False, 'import os\n'), ((3535, 3590), 'os.path.exists', 'os.path.exists', (['"""tests/hyper_tune_test_out/X_train.npy"""'], {}), "('tests/hyper_tune_test_out/X_train.npy')\n", (3549, 3590), False, 'import os\n'), ((3602, 3656), 'os.path.exists', 'os.path.exists', (['"""tests/hyper_tune_test_out/X_test.npy"""'], {}), "('tests/hyper_tune_test_out/X_test.npy')\n", (3616, 3656), False, 'import os\n'), ((3668, 3723), 'os.path.exists', 'os.path.exists', (['"""tests/hyper_tune_test_out/y_train.csv"""'], {}), "('tests/hyper_tune_test_out/y_train.csv')\n", (3682, 3723), False, 'import os\n'), ((3735, 3789), 'os.path.exists', 'os.path.exists', (['"""tests/hyper_tune_test_out/y_test.csv"""'], {}), "('tests/hyper_tune_test_out/y_test.csv')\n", (3749, 3789), False, 'import os\n'), ((3830, 3882), 'os.path.exists', 'os.path.exists', (['"""tests/hyper_tune_test_out/best_mod"""'], {}), "('tests/hyper_tune_test_out/best_mod')\n", (3844, 3882), False, 'import os\n'), ((7489, 7674), 'pop_finder.pop_finder.kfcv', 'pop_finder.kfcv', ([], {'infile': 'infile_all', 'sample_data': 'sample_data2', 'n_splits': '(3)', 'n_reps': '(1)', 'patience': '(10)', 'max_epochs': '(10)', 'save_dir': '"""tests/kfcv_test_output"""', 'mod_path': '"""hyper_tune_test_out"""'}), "(infile=infile_all, sample_data=sample_data2, n_splits=3,\n n_reps=1, patience=10, max_epochs=10, save_dir='tests/kfcv_test_output',\n mod_path='hyper_tune_test_out')\n", (7504, 7674), False, 'from pop_finder import pop_finder\n'), ((7906, 8116), 'pop_finder.pop_finder.kfcv', 'pop_finder.kfcv', ([], {'infile': 'infile_all', 'sample_data': 'sample_data2', 'n_splits': '(3)', 'n_reps': '(1)', 'ensemble': '(True)', 'nbags': '(2)', 'patience': '(10)', 'max_epochs': '(10)', 'save_dir': '"""tests/kfcv_test_output"""', 'mod_path': '"""hyper_tune_test_out"""'}), "(infile=infile_all, sample_data=sample_data2, n_splits=3,\n n_reps=1, ensemble=True, nbags=2, patience=10, max_epochs=10, save_dir=\n 'tests/kfcv_test_output', mod_path='hyper_tune_test_out')\n", (7921, 8116), False, 'from pop_finder import pop_finder\n'), ((11597, 11768), 'pop_finder.pop_finder.pop_finder', 'pop_finder.pop_finder', ([], {'X_train': 'X_train', 'y_train': 'y_train', 'X_test': 'X_test', 'y_test': 'y_test', 'unknowns': 'unknowns', 'ukgen': 'ukgen', 'save_dir': '"""tests/test_output"""', 'max_epochs': '(10)'}), "(X_train=X_train, y_train=y_train, X_test=X_test,\n y_test=y_test, unknowns=unknowns, ukgen=ukgen, save_dir=\n 'tests/test_output', max_epochs=10)\n", (11618, 11768), False, 'from pop_finder import pop_finder\n'), ((11900, 12094), 'pop_finder.pop_finder.pop_finder', 'pop_finder.pop_finder', ([], {'X_train': 'X_train', 'y_train': 'y_train', 'X_test': 'X_test', 'y_test': 'y_test', 'unknowns': 'unknowns', 'ukgen': 'ukgen', 'ensemble': '(True)', 'nbags': '(2)', 'save_dir': '"""tests/test_output"""', 'max_epochs': '(10)'}), "(X_train=X_train, y_train=y_train, X_test=X_test,\n y_test=y_test, unknowns=unknowns, ukgen=ukgen, ensemble=True, nbags=2,\n save_dir='tests/test_output', max_epochs=10)\n", (11921, 12094), False, 'from pop_finder import pop_finder\n'), ((22269, 22372), 'pop_finder.pop_finder.run_neural_net', 'pop_finder.run_neural_net', (['infile_all', 'sample_data2'], {'patience': '(10)', 'max_epochs': '(2)', 'save_dir': 'save_path'}), '(infile_all, sample_data2, patience=10, max_epochs\n =2, save_dir=save_path)\n', (22294, 22372), False, 'from pop_finder import pop_finder\n'), ((22464, 22506), 'os.path.isfile', 'os.path.isfile', (["(save_path + '/metrics.csv')"], {}), "(save_path + '/metrics.csv')\n", (22478, 22506), False, 'import os\n'), ((22518, 22563), 'os.path.isfile', 'os.path.isfile', (["(save_path + '/pop_assign.csv')"], {}), "(save_path + '/pop_assign.csv')\n", (22532, 22563), False, 'import os\n'), ((22568, 22592), 'shutil.rmtree', 'shutil.rmtree', (['save_path'], {}), '(save_path)\n', (22581, 22592), False, 'import shutil\n'), ((22598, 22744), 'pop_finder.pop_finder.run_neural_net', 'pop_finder.run_neural_net', (['infile_all', 'sample_data2'], {'patience': '(10)', 'max_epochs': '(2)', 'ensemble': '(True)', 'nbags': '(2)', 'try_stacking': '(True)', 'save_dir': 'save_path'}), '(infile_all, sample_data2, patience=10, max_epochs\n =2, ensemble=True, nbags=2, try_stacking=True, save_dir=save_path)\n', (22623, 22744), False, 'from pop_finder import pop_finder\n'), ((22860, 22916), 'os.path.isfile', 'os.path.isfile', (["(save_path + '/ensemble_test_results.csv')"], {}), "(save_path + '/ensemble_test_results.csv')\n", (22874, 22916), False, 'import os\n'), ((22928, 22982), 'os.path.isfile', 'os.path.isfile', (["(save_path + '/pop_assign_ensemble.csv')"], {}), "(save_path + '/pop_assign_ensemble.csv')\n", (22942, 22982), False, 'import os\n'), ((22994, 23036), 'os.path.isfile', 'os.path.isfile', (["(save_path + '/metrics.csv')"], {}), "(save_path + '/metrics.csv')\n", (23008, 23036), False, 'import os\n'), ((23048, 23099), 'os.path.isfile', 'os.path.isfile', (["(save_path + '/pop_assign_freqs.csv')"], {}), "(save_path + '/pop_assign_freqs.csv')\n", (23062, 23099), False, 'import os\n'), ((23104, 23128), 'shutil.rmtree', 'shutil.rmtree', (['save_path'], {}), '(save_path)\n', (23117, 23128), False, 'import shutil\n'), ((26078, 26150), 'pop_finder.pop_finder.structure_plot', 'pop_finder.structure_plot', ([], {'save_dir': '"""tests/test_inputs/kfcv_test_output"""'}), "(save_dir='tests/test_inputs/kfcv_test_output')\n", (26103, 26150), False, 'from pop_finder import pop_finder\n'), ((26162, 26233), 'os.path.exists', 'os.path.exists', (['"""tests/test_inputs/kfcv_test_output/structure_plot.png"""'], {}), "('tests/test_inputs/kfcv_test_output/structure_plot.png')\n", (26176, 26233), False, 'import os\n'), ((26250, 26321), 'os.path.exists', 'os.path.exists', (['"""tests/test_inputs/kfcv_test_output/structure_plot.png"""'], {}), "('tests/test_inputs/kfcv_test_output/structure_plot.png')\n", (26264, 26321), False, 'import os\n'), ((26439, 26540), 'pop_finder.pop_finder.structure_plot', 'pop_finder.structure_plot', ([], {'save_dir': '"""tests/test_inputs/kfcv_ensemble_test_output"""', 'ensemble': '(True)'}), "(save_dir=\n 'tests/test_inputs/kfcv_ensemble_test_output', ensemble=True)\n", (26464, 26540), False, 'from pop_finder import pop_finder\n'), ((26569, 26654), 'os.path.exists', 'os.path.exists', (['"""tests/test_inputs/kfcv_ensemble_test_output/structure_plot.png"""'], {}), "('tests/test_inputs/kfcv_ensemble_test_output/structure_plot.png'\n )\n", (26583, 26654), False, 'import os\n'), ((26671, 26756), 'os.path.exists', 'os.path.exists', (['"""tests/test_inputs/kfcv_ensemble_test_output/structure_plot.png"""'], {}), "('tests/test_inputs/kfcv_ensemble_test_output/structure_plot.png'\n )\n", (26685, 26756), False, 'import os\n'), ((29923, 30030), 'pop_finder.contour_classifier.contour_classifier', 'contour_classifier.contour_classifier', ([], {'sample_data': 'sample_data2', 'save_dir': '"""tests/test_inputs/test_out"""'}), "(sample_data=sample_data2, save_dir=\n 'tests/test_inputs/test_out')\n", (29960, 30030), False, 'from pop_finder import contour_classifier\n'), ((30394, 30416), 'pandas.read_csv', 'pd.read_csv', (['pred_path'], {}), '(pred_path)\n', (30405, 30416), True, 'import pandas as pd\n'), ((30503, 30538), 'pandas.read_csv', 'pd.read_csv', (['sample_data1'], {'sep': '"""\t"""'}), "(sample_data1, sep='\\t')\n", (30514, 30538), True, 'import pandas as pd\n'), ((31047, 31098), 'numpy.vstack', 'np.vstack', (["[test_dat['pred_x'], test_dat['pred_y']]"], {}), "([test_dat['pred_x'], test_dat['pred_y']])\n", (31056, 31098), True, 'import numpy as np\n'), ((31112, 31138), 'scipy.stats.gaussian_kde', 'stats.gaussian_kde', (['values'], {}), '(values)\n', (31130, 31138), False, 'from scipy import stats\n'), ((31224, 31250), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (31234, 31250), True, 'import matplotlib.pyplot as plt\n'), ((31376, 31422), 'pop_finder.contour_classifier.cont_finder', 'contour_classifier.cont_finder', (['true_lab', 'cset'], {}), '(true_lab, cset)\n', (31406, 31422), False, 'from pop_finder import contour_classifier\n'), ((31510, 31521), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (31519, 31521), True, 'import matplotlib.pyplot as plt\n'), ((31828, 31992), 'pop_finder.contour_classifier.kfcv', 'contour_classifier.kfcv', ([], {'sample_data': 'sample_data1', 'gen_dat': 'infile_all_vcf', 'n_splits': '(2)', 'n_runs': '(2)', 'max_epochs': '(1)', 'nboots': '(10)', 'save_dir': '"""tests/test_inputs/kfcv"""'}), "(sample_data=sample_data1, gen_dat=infile_all_vcf,\n n_splits=2, n_runs=2, max_epochs=1, nboots=10, save_dir=\n 'tests/test_inputs/kfcv')\n", (31851, 31992), False, 'from pop_finder import contour_classifier\n'), ((32063, 32098), 'pandas.read_csv', 'pd.read_csv', (['sample_data1'], {'sep': '"""\t"""'}), "(sample_data1, sep='\\t')\n", (32074, 32098), True, 'import pandas as pd\n'), ((1894, 1958), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Path to infile does not exist"""'}), "(ValueError, match='Path to infile does not exist')\n", (1907, 1958), False, 'import pytest\n'), ((1968, 2030), 'pop_finder.pop_finder.read_data', 'pop_finder.read_data', ([], {'infile': '"""hello"""', 'sample_data': 'sample_data2'}), "(infile='hello', sample_data=sample_data2)\n", (1988, 2030), False, 'from pop_finder import pop_finder\n'), ((2040, 2131), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Infile must have extension \'zarr\', \'vcf\', or \'hdf5\'"""'}), '(ValueError, match=\n "Infile must have extension \'zarr\', \'vcf\', or \'hdf5\'")\n', (2053, 2131), False, 'import pytest\n'), ((2150, 2217), 'pop_finder.pop_finder.read_data', 'pop_finder.read_data', ([], {'infile': 'sample_data1', 'sample_data': 'sample_data2'}), '(infile=sample_data1, sample_data=sample_data2)\n', (2170, 2217), False, 'from pop_finder import pop_finder\n'), ((2227, 2296), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Path to sample_data does not exist"""'}), "(ValueError, match='Path to sample_data does not exist')\n", (2240, 2296), False, 'import pytest\n'), ((2329, 2382), 'pop_finder.pop_finder.read_data', 'pop_finder.read_data', (['infile_all'], {'sample_data': '"""hello"""'}), "(infile_all, sample_data='hello')\n", (2349, 2382), False, 'from pop_finder import pop_finder\n'), ((2392, 2468), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""sample_data does not have correct columns"""'}), "(ValueError, match='sample_data does not have correct columns')\n", (2405, 2468), False, 'import pytest\n'), ((2501, 2559), 'pop_finder.pop_finder.read_data', 'pop_finder.read_data', (['infile_all'], {'sample_data': 'sample_data4'}), '(infile_all, sample_data=sample_data4)\n', (2521, 2559), False, 'from pop_finder import pop_finder\n'), ((2569, 2665), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""sample ordering failed! Check that sample IDs match VCF."""'}), "(ValueError, match=\n 'sample ordering failed! Check that sample IDs match VCF.')\n", (2582, 2665), False, 'import pytest\n'), ((2692, 2739), 'pop_finder.pop_finder.read_data', 'pop_finder.read_data', (['infile_kfcv', 'sample_data3'], {}), '(infile_kfcv, sample_data3)\n', (2712, 2739), False, 'from pop_finder import pop_finder\n'), ((3892, 3943), 'shutil.rmtree', 'shutil.rmtree', (['"""tests/hyper_tune_test_out/best_mod"""'], {}), "('tests/hyper_tune_test_out/best_mod')\n", (3905, 3943), False, 'import shutil\n'), ((4007, 4064), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""train_prop is too high"""'}), "(ValueError, match='train_prop is too high')\n", (4020, 4064), False, 'import pytest\n'), ((4074, 4242), 'pop_finder.pop_finder.hyper_tune', 'pop_finder.hyper_tune', ([], {'infile': 'infile_all', 'sample_data': 'sample_data2', 'max_epochs': '(10)', 'save_dir': '"""tests/hyper_tune_test_out"""', 'mod_name': '"""hyper_tune"""', 'train_prop': '(0.99)'}), "(infile=infile_all, sample_data=sample_data2,\n max_epochs=10, save_dir='tests/hyper_tune_test_out', mod_name=\n 'hyper_tune', train_prop=0.99)\n", (4095, 4242), False, 'from pop_finder import pop_finder\n'), ((4378, 4434), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""infile does not exist"""'}), "(ValueError, match='infile does not exist')\n", (4391, 4434), False, 'import pytest\n'), ((4444, 4620), 'pop_finder.pop_finder.hyper_tune', 'pop_finder.hyper_tune', ([], {'infile': '"""tests/test_inputs/onlyAtl_500.vcf"""', 'sample_data': 'sample_data2', 'max_epochs': '(10)', 'save_dir': '"""tests/hyper_tune_test_out"""', 'mod_name': '"""hyper_tune"""'}), "(infile='tests/test_inputs/onlyAtl_500.vcf',\n sample_data=sample_data2, max_epochs=10, save_dir=\n 'tests/hyper_tune_test_out', mod_name='hyper_tune')\n", (4465, 4620), False, 'from pop_finder import pop_finder\n'), ((4725, 4786), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""sample_data does not exist"""'}), "(ValueError, match='sample_data does not exist')\n", (4738, 4786), False, 'import pytest\n'), ((4796, 4941), 'pop_finder.pop_finder.hyper_tune', 'pop_finder.hyper_tune', ([], {'infile': 'infile_all', 'sample_data': '"""hello.txt"""', 'max_epochs': '(10)', 'save_dir': '"""tests/hyper_tune_test_out"""', 'mod_name': '"""hyper_tune"""'}), "(infile=infile_all, sample_data='hello.txt',\n max_epochs=10, save_dir='tests/hyper_tune_test_out', mod_name='hyper_tune')\n", (4817, 4941), False, 'from pop_finder import pop_finder\n'), ((5052, 5115), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""max_trials should be integer"""'}), "(ValueError, match='max_trials should be integer')\n", (5065, 5115), False, 'import pytest\n'), ((5125, 5291), 'pop_finder.pop_finder.hyper_tune', 'pop_finder.hyper_tune', ([], {'infile': 'infile_all', 'sample_data': 'sample_data2', 'max_epochs': '(10)', 'max_trials': '(1.5)', 'save_dir': '"""tests/hyper_tune_test_out"""', 'mod_name': '"""hyper_tune"""'}), "(infile=infile_all, sample_data=sample_data2,\n max_epochs=10, max_trials=1.5, save_dir='tests/hyper_tune_test_out',\n mod_name='hyper_tune')\n", (5146, 5291), False, 'from pop_finder import pop_finder\n'), ((5414, 5481), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""runs_per_trial should be integer"""'}), "(ValueError, match='runs_per_trial should be integer')\n", (5427, 5481), False, 'import pytest\n'), ((5491, 5661), 'pop_finder.pop_finder.hyper_tune', 'pop_finder.hyper_tune', ([], {'infile': 'infile_all', 'sample_data': 'sample_data2', 'max_epochs': '(10)', 'runs_per_trial': '(1.2)', 'save_dir': '"""tests/hyper_tune_test_out"""', 'mod_name': '"""hyper_tune"""'}), "(infile=infile_all, sample_data=sample_data2,\n max_epochs=10, runs_per_trial=1.2, save_dir='tests/hyper_tune_test_out',\n mod_name='hyper_tune')\n", (5512, 5661), False, 'from pop_finder import pop_finder\n'), ((5780, 5843), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""max_epochs should be integer"""'}), "(ValueError, match='max_epochs should be integer')\n", (5793, 5843), False, 'import pytest\n'), ((5853, 6006), 'pop_finder.pop_finder.hyper_tune', 'pop_finder.hyper_tune', ([], {'infile': 'infile_all', 'sample_data': 'sample_data2', 'max_epochs': '"""10"""', 'save_dir': '"""tests/hyper_tune_test_out"""', 'mod_name': '"""hyper_tune"""'}), "(infile=infile_all, sample_data=sample_data2,\n max_epochs='10', save_dir='tests/hyper_tune_test_out', mod_name=\n 'hyper_tune')\n", (5874, 6006), False, 'from pop_finder import pop_finder\n'), ((6112, 6173), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""train_prop should be float"""'}), "(ValueError, match='train_prop should be float')\n", (6125, 6173), False, 'import pytest\n'), ((6183, 6348), 'pop_finder.pop_finder.hyper_tune', 'pop_finder.hyper_tune', ([], {'infile': 'infile_all', 'sample_data': 'sample_data2', 'max_epochs': '(10)', 'save_dir': '"""tests/hyper_tune_test_out"""', 'mod_name': '"""hyper_tune"""', 'train_prop': '(1)'}), "(infile=infile_all, sample_data=sample_data2,\n max_epochs=10, save_dir='tests/hyper_tune_test_out', mod_name=\n 'hyper_tune', train_prop=1)\n", (6204, 6348), False, 'from pop_finder import pop_finder\n'), ((6456, 6521), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""seed should be integer or None"""'}), "(ValueError, match='seed should be integer or None')\n", (6469, 6521), False, 'import pytest\n'), ((6531, 6708), 'pop_finder.pop_finder.hyper_tune', 'pop_finder.hyper_tune', ([], {'infile': 'infile_all', 'sample_data': 'sample_data2', 'max_epochs': '(10)', 'save_dir': '"""tests/hyper_tune_test_out"""', 'mod_name': '"""hyper_tune"""', 'train_prop': '(0.8)', 'seed': '"""2"""'}), "(infile=infile_all, sample_data=sample_data2,\n max_epochs=10, save_dir='tests/hyper_tune_test_out', mod_name=\n 'hyper_tune', train_prop=0.8, seed='2')\n", (6552, 6708), False, 'from pop_finder import pop_finder\n'), ((6832, 6892), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""save_dir should be string"""'}), "(ValueError, match='save_dir should be string')\n", (6845, 6892), False, 'import pytest\n'), ((6902, 7038), 'pop_finder.pop_finder.hyper_tune', 'pop_finder.hyper_tune', ([], {'infile': 'infile_all', 'sample_data': 'sample_data2', 'max_epochs': '(10)', 'save_dir': '(2)', 'mod_name': '"""hyper_tune"""', 'train_prop': '(0.8)'}), "(infile=infile_all, sample_data=sample_data2,\n max_epochs=10, save_dir=2, mod_name='hyper_tune', train_prop=0.8)\n", (6923, 7038), False, 'from pop_finder import pop_finder\n'), ((7155, 7215), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""mod_name should be string"""'}), "(ValueError, match='mod_name should be string')\n", (7168, 7215), False, 'import pytest\n'), ((7225, 7380), 'pop_finder.pop_finder.hyper_tune', 'pop_finder.hyper_tune', ([], {'infile': 'infile_all', 'sample_data': 'sample_data2', 'max_epochs': '(10)', 'save_dir': '"""tests/hyper_tune_test_out"""', 'mod_name': '(2)', 'train_prop': '(0.8)'}), "(infile=infile_all, sample_data=sample_data2,\n max_epochs=10, save_dir='tests/hyper_tune_test_out', mod_name=2,\n train_prop=0.8)\n", (7246, 7380), False, 'from pop_finder import pop_finder\n'), ((8356, 8420), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""path to infile does not exist"""'}), "(ValueError, match='path to infile does not exist')\n", (8369, 8420), False, 'import pytest\n'), ((8430, 8616), 'pop_finder.pop_finder.kfcv', 'pop_finder.kfcv', ([], {'infile': '"""hello.txt"""', 'sample_data': 'sample_data2', 'n_splits': '(3)', 'n_reps': '(1)', 'patience': '(10)', 'max_epochs': '(10)', 'save_dir': '"""tests/kfcv_test_output"""', 'mod_path': '"""hyper_tune_test_out"""'}), "(infile='hello.txt', sample_data=sample_data2, n_splits=3,\n n_reps=1, patience=10, max_epochs=10, save_dir='tests/kfcv_test_output',\n mod_path='hyper_tune_test_out')\n", (8445, 8616), False, 'from pop_finder import pop_finder\n'), ((8758, 8822), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""path to sample_data incorrect"""'}), "(ValueError, match='path to sample_data incorrect')\n", (8771, 8822), False, 'import pytest\n'), ((8832, 9016), 'pop_finder.pop_finder.kfcv', 'pop_finder.kfcv', ([], {'infile': 'infile_all', 'sample_data': '"""hello.txt"""', 'n_splits': '(3)', 'n_reps': '(1)', 'patience': '(10)', 'max_epochs': '(10)', 'save_dir': '"""tests/kfcv_test_output"""', 'mod_path': '"""hyper_tune_test_out"""'}), "(infile=infile_all, sample_data='hello.txt', n_splits=3,\n n_reps=1, patience=10, max_epochs=10, save_dir='tests/kfcv_test_output',\n mod_path='hyper_tune_test_out')\n", (8847, 9016), False, 'from pop_finder import pop_finder\n'), ((9153, 9217), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""n_splits should be an integer"""'}), "(ValueError, match='n_splits should be an integer')\n", (9166, 9217), False, 'import pytest\n'), ((9227, 9414), 'pop_finder.pop_finder.kfcv', 'pop_finder.kfcv', ([], {'infile': 'infile_all', 'sample_data': 'sample_data2', 'n_splits': '(1.5)', 'n_reps': '(1)', 'patience': '(10)', 'max_epochs': '(10)', 'save_dir': '"""tests/kfcv_test_output"""', 'mod_path': '"""hyper_tune_test_out"""'}), "(infile=infile_all, sample_data=sample_data2, n_splits=1.5,\n n_reps=1, patience=10, max_epochs=10, save_dir='tests/kfcv_test_output',\n mod_path='hyper_tune_test_out')\n", (9242, 9414), False, 'from pop_finder import pop_finder\n'), ((9549, 9611), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""n_reps should be an integer"""'}), "(ValueError, match='n_reps should be an integer')\n", (9562, 9611), False, 'import pytest\n'), ((9621, 9809), 'pop_finder.pop_finder.kfcv', 'pop_finder.kfcv', ([], {'infile': 'infile_all', 'sample_data': 'sample_data2', 'n_splits': '(3)', 'n_reps': '(1.5)', 'patience': '(10)', 'max_epochs': '(10)', 'save_dir': '"""tests/kfcv_test_output"""', 'mod_path': '"""hyper_tune_test_out"""'}), "(infile=infile_all, sample_data=sample_data2, n_splits=3,\n n_reps=1.5, patience=10, max_epochs=10, save_dir=\n 'tests/kfcv_test_output', mod_path='hyper_tune_test_out')\n", (9636, 9809), False, 'from pop_finder import pop_finder\n'), ((9945, 10008), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""ensemble should be a boolean"""'}), "(ValueError, match='ensemble should be a boolean')\n", (9958, 10008), False, 'import pytest\n'), ((10018, 10221), 'pop_finder.pop_finder.kfcv', 'pop_finder.kfcv', ([], {'infile': 'infile_all', 'sample_data': 'sample_data2', 'n_splits': '(3)', 'n_reps': '(1)', 'ensemble': '"""True"""', 'patience': '(10)', 'max_epochs': '(10)', 'save_dir': '"""tests/kfcv_test_output"""', 'mod_path': '"""hyper_tune_test_out"""'}), "(infile=infile_all, sample_data=sample_data2, n_splits=3,\n n_reps=1, ensemble='True', patience=10, max_epochs=10, save_dir=\n 'tests/kfcv_test_output', mod_path='hyper_tune_test_out')\n", (10033, 10221), False, 'from pop_finder import pop_finder\n'), ((10369, 10431), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""save_dir should be a string"""'}), "(ValueError, match='save_dir should be a string')\n", (10382, 10431), False, 'import pytest\n'), ((10441, 10604), 'pop_finder.pop_finder.kfcv', 'pop_finder.kfcv', ([], {'infile': 'infile_all', 'sample_data': 'sample_data2', 'n_splits': '(3)', 'n_reps': '(1)', 'patience': '(10)', 'max_epochs': '(10)', 'save_dir': '(2)', 'mod_path': '"""hyper_tune_test_out"""'}), "(infile=infile_all, sample_data=sample_data2, n_splits=3,\n n_reps=1, patience=10, max_epochs=10, save_dir=2, mod_path=\n 'hyper_tune_test_out')\n", (10456, 10604), False, 'from pop_finder import pop_finder\n'), ((10731, 10797), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""n_splits must be greater than 1"""'}), "(ValueError, match='n_splits must be greater than 1')\n", (10744, 10797), False, 'import pytest\n'), ((10807, 10992), 'pop_finder.pop_finder.kfcv', 'pop_finder.kfcv', ([], {'infile': 'infile_all', 'sample_data': 'sample_data2', 'n_splits': '(1)', 'n_reps': '(1)', 'patience': '(10)', 'max_epochs': '(10)', 'save_dir': '"""tests/kfcv_test_output"""', 'mod_path': '"""hyper_tune_test_out"""'}), "(infile=infile_all, sample_data=sample_data2, n_splits=1,\n n_reps=1, patience=10, max_epochs=10, save_dir='tests/kfcv_test_output',\n mod_path='hyper_tune_test_out')\n", (10822, 10992), False, 'from pop_finder import pop_finder\n'), ((11152, 11241), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""n_splits cannot be greater than number of samples"""'}), "(ValueError, match=\n 'n_splits cannot be greater than number of samples')\n", (11165, 11241), False, 'import pytest\n'), ((11269, 11455), 'pop_finder.pop_finder.kfcv', 'pop_finder.kfcv', ([], {'infile': 'infile_all', 'sample_data': 'sample_data2', 'n_splits': '(10)', 'n_reps': '(1)', 'patience': '(10)', 'max_epochs': '(10)', 'save_dir': '"""tests/kfcv_test_output"""', 'mod_path': '"""hyper_tune_test_out"""'}), "(infile=infile_all, sample_data=sample_data2, n_splits=10,\n n_reps=1, patience=10, max_epochs=10, save_dir='tests/kfcv_test_output',\n mod_path='hyper_tune_test_out')\n", (11284, 11455), False, 'from pop_finder import pop_finder\n'), ((12291, 12359), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""y_train is not a pandas dataframe"""'}), "(ValueError, match='y_train is not a pandas dataframe')\n", (12304, 12359), False, 'import pytest\n'), ((12369, 12534), 'pop_finder.pop_finder.pop_finder', 'pop_finder.pop_finder', ([], {'X_train': 'X_train', 'y_train': '(2)', 'X_test': 'X_test', 'y_test': 'y_test', 'unknowns': 'unknowns', 'ukgen': 'ukgen', 'save_dir': '"""tests/test_output"""', 'max_epochs': '(10)'}), "(X_train=X_train, y_train=2, X_test=X_test, y_test=\n y_test, unknowns=unknowns, ukgen=ukgen, save_dir='tests/test_output',\n max_epochs=10)\n", (12390, 12534), False, 'from pop_finder import pop_finder\n'), ((12642, 12705), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""y_train exists, but is empty"""'}), "(ValueError, match='y_train exists, but is empty')\n", (12655, 12705), False, 'import pytest\n'), ((12715, 12892), 'pop_finder.pop_finder.pop_finder', 'pop_finder.pop_finder', ([], {'X_train': 'X_train', 'y_train': 'y_train_empty', 'X_test': 'X_test', 'y_test': 'y_test', 'unknowns': 'unknowns', 'ukgen': 'ukgen', 'save_dir': '"""tests/test_output"""', 'max_epochs': '(10)'}), "(X_train=X_train, y_train=y_train_empty, X_test=X_test,\n y_test=y_test, unknowns=unknowns, ukgen=ukgen, save_dir=\n 'tests/test_output', max_epochs=10)\n", (12736, 12892), False, 'from pop_finder import pop_finder\n'), ((13000, 13067), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""y_test is not a pandas dataframe"""'}), "(ValueError, match='y_test is not a pandas dataframe')\n", (13013, 13067), False, 'import pytest\n'), ((13077, 13242), 'pop_finder.pop_finder.pop_finder', 'pop_finder.pop_finder', ([], {'X_train': 'X_train', 'y_train': 'y_train', 'X_test': 'X_test', 'y_test': '(2)', 'unknowns': 'unknowns', 'ukgen': 'ukgen', 'save_dir': '"""tests/test_output"""', 'max_epochs': '(10)'}), "(X_train=X_train, y_train=y_train, X_test=X_test,\n y_test=2, unknowns=unknowns, ukgen=ukgen, save_dir='tests/test_output',\n max_epochs=10)\n", (13098, 13242), False, 'from pop_finder import pop_finder\n'), ((13351, 13413), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""y_test exists, but is empty"""'}), "(ValueError, match='y_test exists, but is empty')\n", (13364, 13413), False, 'import pytest\n'), ((13423, 13600), 'pop_finder.pop_finder.pop_finder', 'pop_finder.pop_finder', ([], {'X_train': 'X_train', 'y_train': 'y_train', 'X_test': 'X_test', 'y_test': 'y_test_empty', 'unknowns': 'unknowns', 'ukgen': 'ukgen', 'save_dir': '"""tests/test_output"""', 'max_epochs': '(10)'}), "(X_train=X_train, y_train=y_train, X_test=X_test,\n y_test=y_test_empty, unknowns=unknowns, ukgen=ukgen, save_dir=\n 'tests/test_output', max_epochs=10)\n", (13444, 13600), False, 'from pop_finder import pop_finder\n'), ((13708, 13771), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""X_train is not a numpy array"""'}), "(ValueError, match='X_train is not a numpy array')\n", (13721, 13771), False, 'import pytest\n'), ((13781, 13946), 'pop_finder.pop_finder.pop_finder', 'pop_finder.pop_finder', ([], {'X_train': '(2)', 'y_train': 'y_train', 'X_test': 'X_test', 'y_test': 'y_test', 'unknowns': 'unknowns', 'ukgen': 'ukgen', 'save_dir': '"""tests/test_output"""', 'max_epochs': '(10)'}), "(X_train=2, y_train=y_train, X_test=X_test, y_test=\n y_test, unknowns=unknowns, ukgen=ukgen, save_dir='tests/test_output',\n max_epochs=10)\n", (13802, 13946), False, 'from pop_finder import pop_finder\n'), ((14054, 14117), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""X_train exists, but is empty"""'}), "(ValueError, match='X_train exists, but is empty')\n", (14067, 14117), False, 'import pytest\n'), ((14127, 14304), 'pop_finder.pop_finder.pop_finder', 'pop_finder.pop_finder', ([], {'X_train': 'X_train_empty', 'y_train': 'y_train', 'X_test': 'X_test', 'y_test': 'y_test', 'unknowns': 'unknowns', 'ukgen': 'ukgen', 'save_dir': '"""tests/test_output"""', 'max_epochs': '(10)'}), "(X_train=X_train_empty, y_train=y_train, X_test=X_test,\n y_test=y_test, unknowns=unknowns, ukgen=ukgen, save_dir=\n 'tests/test_output', max_epochs=10)\n", (14148, 14304), False, 'from pop_finder import pop_finder\n'), ((14412, 14474), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""X_test is not a numpy array"""'}), "(ValueError, match='X_test is not a numpy array')\n", (14425, 14474), False, 'import pytest\n'), ((14484, 14650), 'pop_finder.pop_finder.pop_finder', 'pop_finder.pop_finder', ([], {'X_train': 'X_train', 'y_train': 'y_train', 'X_test': '(2)', 'y_test': 'y_test', 'unknowns': 'unknowns', 'ukgen': 'ukgen', 'save_dir': '"""tests/test_output"""', 'max_epochs': '(10)'}), "(X_train=X_train, y_train=y_train, X_test=2, y_test=\n y_test, unknowns=unknowns, ukgen=ukgen, save_dir='tests/test_output',\n max_epochs=10)\n", (14505, 14650), False, 'from pop_finder import pop_finder\n'), ((14758, 14820), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""X_test exists, but is empty"""'}), "(ValueError, match='X_test exists, but is empty')\n", (14771, 14820), False, 'import pytest\n'), ((14830, 15007), 'pop_finder.pop_finder.pop_finder', 'pop_finder.pop_finder', ([], {'X_train': 'X_train', 'y_train': 'y_train', 'X_test': 'X_test_empty', 'y_test': 'y_test', 'unknowns': 'unknowns', 'ukgen': 'ukgen', 'save_dir': '"""tests/test_output"""', 'max_epochs': '(10)'}), "(X_train=X_train, y_train=y_train, X_test=X_test_empty,\n y_test=y_test, unknowns=unknowns, ukgen=ukgen, save_dir=\n 'tests/test_output', max_epochs=10)\n", (14851, 15007), False, 'from pop_finder import pop_finder\n'), ((15115, 15176), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""ukgen is not a numpy array"""'}), "(ValueError, match='ukgen is not a numpy array')\n", (15128, 15176), False, 'import pytest\n'), ((15186, 15352), 'pop_finder.pop_finder.pop_finder', 'pop_finder.pop_finder', ([], {'X_train': 'X_train', 'y_train': 'y_train', 'X_test': 'X_test', 'y_test': 'y_test', 'unknowns': 'unknowns', 'ukgen': '(2)', 'save_dir': '"""tests/test_output"""', 'max_epochs': '(10)'}), "(X_train=X_train, y_train=y_train, X_test=X_test,\n y_test=y_test, unknowns=unknowns, ukgen=2, save_dir='tests/test_output',\n max_epochs=10)\n", (15207, 15352), False, 'from pop_finder import pop_finder\n'), ((15461, 15522), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""ukgen exists, but is empty"""'}), "(ValueError, match='ukgen exists, but is empty')\n", (15474, 15522), False, 'import pytest\n'), ((15532, 15709), 'pop_finder.pop_finder.pop_finder', 'pop_finder.pop_finder', ([], {'X_train': 'X_train', 'y_train': 'y_train', 'X_test': 'X_test', 'y_test': 'y_test', 'unknowns': 'unknowns', 'ukgen': 'ukgen_empty', 'save_dir': '"""tests/test_output"""', 'max_epochs': '(10)'}), "(X_train=X_train, y_train=y_train, X_test=X_test,\n y_test=y_test, unknowns=unknowns, ukgen=ukgen_empty, save_dir=\n 'tests/test_output', max_epochs=10)\n", (15553, 15709), False, 'from pop_finder import pop_finder\n'), ((15817, 15884), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""unknowns is not pandas dataframe"""'}), "(ValueError, match='unknowns is not pandas dataframe')\n", (15830, 15884), False, 'import pytest\n'), ((15894, 16067), 'pop_finder.pop_finder.pop_finder', 'pop_finder.pop_finder', ([], {'X_train': 'X_train', 'y_train': 'y_train', 'X_test': 'X_test', 'y_test': 'y_test', 'unknowns': '"""unknowns"""', 'ukgen': 'ukgen', 'save_dir': '"""tests/test_output"""', 'max_epochs': '(10)'}), "(X_train=X_train, y_train=y_train, X_test=X_test,\n y_test=y_test, unknowns='unknowns', ukgen=ukgen, save_dir=\n 'tests/test_output', max_epochs=10)\n", (15915, 16067), False, 'from pop_finder import pop_finder\n'), ((16175, 16239), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""unknowns exists, but is empty"""'}), "(ValueError, match='unknowns exists, but is empty')\n", (16188, 16239), False, 'import pytest\n'), ((16249, 16426), 'pop_finder.pop_finder.pop_finder', 'pop_finder.pop_finder', ([], {'X_train': 'X_train', 'y_train': 'y_train', 'X_test': 'X_test', 'y_test': 'y_test', 'unknowns': 'unknowns_empty', 'ukgen': 'ukgen', 'save_dir': '"""tests/test_output"""', 'max_epochs': '(10)'}), "(X_train=X_train, y_train=y_train, X_test=X_test,\n y_test=y_test, unknowns=unknowns_empty, ukgen=ukgen, save_dir=\n 'tests/test_output', max_epochs=10)\n", (16270, 16426), False, 'from pop_finder import pop_finder\n'), ((16534, 16597), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""ensemble should be a boolean"""'}), "(ValueError, match='ensemble should be a boolean')\n", (16547, 16597), False, 'import pytest\n'), ((16607, 16794), 'pop_finder.pop_finder.pop_finder', 'pop_finder.pop_finder', ([], {'X_train': 'X_train', 'y_train': 'y_train', 'X_test': 'X_test', 'y_test': 'y_test', 'unknowns': 'unknowns', 'ukgen': 'ukgen', 'ensemble': '"""True"""', 'save_dir': '"""tests/test_output"""', 'max_epochs': '(10)'}), "(X_train=X_train, y_train=y_train, X_test=X_test,\n y_test=y_test, unknowns=unknowns, ukgen=ukgen, ensemble='True',\n save_dir='tests/test_output', max_epochs=10)\n", (16628, 16794), False, 'from pop_finder import pop_finder\n'), ((16915, 16982), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""try_stacking should be a boolean"""'}), "(ValueError, match='try_stacking should be a boolean')\n", (16928, 16982), False, 'import pytest\n'), ((16992, 17183), 'pop_finder.pop_finder.pop_finder', 'pop_finder.pop_finder', ([], {'X_train': 'X_train', 'y_train': 'y_train', 'X_test': 'X_test', 'y_test': 'y_test', 'unknowns': 'unknowns', 'ukgen': 'ukgen', 'try_stacking': '"""True"""', 'save_dir': '"""tests/test_output"""', 'max_epochs': '(10)'}), "(X_train=X_train, y_train=y_train, X_test=X_test,\n y_test=y_test, unknowns=unknowns, ukgen=ukgen, try_stacking='True',\n save_dir='tests/test_output', max_epochs=10)\n", (17013, 17183), False, 'from pop_finder import pop_finder\n'), ((17304, 17365), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""nbags should be an integer"""'}), "(ValueError, match='nbags should be an integer')\n", (17317, 17365), False, 'import pytest\n'), ((17375, 17571), 'pop_finder.pop_finder.pop_finder', 'pop_finder.pop_finder', ([], {'X_train': 'X_train', 'y_train': 'y_train', 'X_test': 'X_test', 'y_test': 'y_test', 'unknowns': 'unknowns', 'ukgen': 'ukgen', 'ensemble': '(True)', 'nbags': '(1.5)', 'save_dir': '"""tests/test_output"""', 'max_epochs': '(10)'}), "(X_train=X_train, y_train=y_train, X_test=X_test,\n y_test=y_test, unknowns=unknowns, ukgen=ukgen, ensemble=True, nbags=1.5,\n save_dir='tests/test_output', max_epochs=10)\n", (17396, 17571), False, 'from pop_finder import pop_finder\n'), ((17704, 17767), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""train_prop should be a float"""'}), "(ValueError, match='train_prop should be a float')\n", (17717, 17767), False, 'import pytest\n'), ((17777, 17962), 'pop_finder.pop_finder.pop_finder', 'pop_finder.pop_finder', ([], {'X_train': 'X_train', 'y_train': 'y_train', 'X_test': 'X_test', 'y_test': 'y_test', 'unknowns': 'unknowns', 'ukgen': 'ukgen', 'train_prop': '(1)', 'save_dir': '"""tests/test_output"""', 'max_epochs': '(10)'}), "(X_train=X_train, y_train=y_train, X_test=X_test,\n y_test=y_test, unknowns=unknowns, ukgen=ukgen, train_prop=1, save_dir=\n 'tests/test_output', max_epochs=10)\n", (17798, 17962), False, 'from pop_finder import pop_finder\n'), ((18082, 18144), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""predict should be a boolean"""'}), "(ValueError, match='predict should be a boolean')\n", (18095, 18144), False, 'import pytest\n'), ((18154, 18341), 'pop_finder.pop_finder.pop_finder', 'pop_finder.pop_finder', ([], {'X_train': 'X_train', 'y_train': 'y_train', 'X_test': 'X_test', 'y_test': 'y_test', 'unknowns': 'unknowns', 'ukgen': 'ukgen', 'predict': '"""True"""', 'save_dir': '"""tests/test_output"""', 'max_epochs': '(10)'}), "(X_train=X_train, y_train=y_train, X_test=X_test,\n y_test=y_test, unknowns=unknowns, ukgen=ukgen, predict='True', save_dir\n ='tests/test_output', max_epochs=10)\n", (18175, 18341), False, 'from pop_finder import pop_finder\n'), ((18461, 18523), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""save_dir should be a string"""'}), "(ValueError, match='save_dir should be a string')\n", (18474, 18523), False, 'import pytest\n'), ((18533, 18681), 'pop_finder.pop_finder.pop_finder', 'pop_finder.pop_finder', ([], {'X_train': 'X_train', 'y_train': 'y_train', 'X_test': 'X_test', 'y_test': 'y_test', 'unknowns': 'unknowns', 'ukgen': 'ukgen', 'save_dir': '(2)', 'max_epochs': '(10)'}), '(X_train=X_train, y_train=y_train, X_test=X_test,\n y_test=y_test, unknowns=unknowns, ukgen=ukgen, save_dir=2, max_epochs=10)\n', (18554, 18681), False, 'from pop_finder import pop_finder\n'), ((18794, 18861), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""save_weights should be a boolean"""'}), "(ValueError, match='save_weights should be a boolean')\n", (18807, 18861), False, 'import pytest\n'), ((18871, 19062), 'pop_finder.pop_finder.pop_finder', 'pop_finder.pop_finder', ([], {'X_train': 'X_train', 'y_train': 'y_train', 'X_test': 'X_test', 'y_test': 'y_test', 'unknowns': 'unknowns', 'ukgen': 'ukgen', 'save_weights': '"""True"""', 'save_dir': '"""tests/test_output"""', 'max_epochs': '(10)'}), "(X_train=X_train, y_train=y_train, X_test=X_test,\n y_test=y_test, unknowns=unknowns, ukgen=ukgen, save_weights='True',\n save_dir='tests/test_output', max_epochs=10)\n", (18892, 19062), False, 'from pop_finder import pop_finder\n'), ((19183, 19247), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""patience should be an integer"""'}), "(ValueError, match='patience should be an integer')\n", (19196, 19247), False, 'import pytest\n'), ((19257, 19442), 'pop_finder.pop_finder.pop_finder', 'pop_finder.pop_finder', ([], {'X_train': 'X_train', 'y_train': 'y_train', 'X_test': 'X_test', 'y_test': 'y_test', 'unknowns': 'unknowns', 'ukgen': 'ukgen', 'patience': '(5.6)', 'save_dir': '"""tests/test_output"""', 'max_epochs': '(10)'}), "(X_train=X_train, y_train=y_train, X_test=X_test,\n y_test=y_test, unknowns=unknowns, ukgen=ukgen, patience=5.6, save_dir=\n 'tests/test_output', max_epochs=10)\n", (19278, 19442), False, 'from pop_finder import pop_finder\n'), ((19562, 19628), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""batch_size should be an integer"""'}), "(ValueError, match='batch_size should be an integer')\n", (19575, 19628), False, 'import pytest\n'), ((19638, 19825), 'pop_finder.pop_finder.pop_finder', 'pop_finder.pop_finder', ([], {'X_train': 'X_train', 'y_train': 'y_train', 'X_test': 'X_test', 'y_test': 'y_test', 'unknowns': 'unknowns', 'ukgen': 'ukgen', 'batch_size': '(5.6)', 'save_dir': '"""tests/test_output"""', 'max_epochs': '(10)'}), "(X_train=X_train, y_train=y_train, X_test=X_test,\n y_test=y_test, unknowns=unknowns, ukgen=ukgen, batch_size=5.6, save_dir\n ='tests/test_output', max_epochs=10)\n", (19659, 19825), False, 'from pop_finder import pop_finder\n'), ((19945, 20011), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""max_epochs should be an integer"""'}), "(ValueError, match='max_epochs should be an integer')\n", (19958, 20011), False, 'import pytest\n'), ((20021, 20193), 'pop_finder.pop_finder.pop_finder', 'pop_finder.pop_finder', ([], {'X_train': 'X_train', 'y_train': 'y_train', 'X_test': 'X_test', 'y_test': 'y_test', 'unknowns': 'unknowns', 'ukgen': 'ukgen', 'max_epochs': '(5.6)', 'save_dir': '"""tests/test_output"""'}), "(X_train=X_train, y_train=y_train, X_test=X_test,\n y_test=y_test, unknowns=unknowns, ukgen=ukgen, max_epochs=5.6, save_dir\n ='tests/test_output')\n", (20042, 20193), False, 'from pop_finder import pop_finder\n'), ((20301, 20368), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""plot_history should be a boolean"""'}), "(ValueError, match='plot_history should be a boolean')\n", (20314, 20368), False, 'import pytest\n'), ((20378, 20569), 'pop_finder.pop_finder.pop_finder', 'pop_finder.pop_finder', ([], {'X_train': 'X_train', 'y_train': 'y_train', 'X_test': 'X_test', 'y_test': 'y_test', 'unknowns': 'unknowns', 'ukgen': 'ukgen', 'plot_history': '"""True"""', 'save_dir': '"""tests/test_output"""', 'max_epochs': '(10)'}), "(X_train=X_train, y_train=y_train, X_test=X_test,\n y_test=y_test, unknowns=unknowns, ukgen=ukgen, plot_history='True',\n save_dir='tests/test_output', max_epochs=10)\n", (20399, 20569), False, 'from pop_finder import pop_finder\n'), ((20690, 20760), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""mod_path should be a string or None"""'}), "(ValueError, match='mod_path should be a string or None')\n", (20703, 20760), False, 'import pytest\n'), ((20793, 20976), 'pop_finder.pop_finder.pop_finder', 'pop_finder.pop_finder', ([], {'X_train': 'X_train', 'y_train': 'y_train', 'X_test': 'X_test', 'y_test': 'y_test', 'unknowns': 'unknowns', 'ukgen': 'ukgen', 'mod_path': '(2)', 'save_dir': '"""tests/test_output"""', 'max_epochs': '(10)'}), "(X_train=X_train, y_train=y_train, X_test=X_test,\n y_test=y_test, unknowns=unknowns, ukgen=ukgen, mod_path=2, save_dir=\n 'tests/test_output', max_epochs=10)\n", (20814, 20976), False, 'from pop_finder import pop_finder\n'), ((21096, 21163), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""unknowns is not pandas dataframe"""'}), "(ValueError, match='unknowns is not pandas dataframe')\n", (21109, 21163), False, 'import pytest\n'), ((21173, 21343), 'pop_finder.pop_finder.pop_finder', 'pop_finder.pop_finder', ([], {'X_train': 'X_train', 'y_train': 'y_train', 'X_test': 'X_test', 'y_test': 'y_test', 'unknowns': '"""hello"""', 'ukgen': 'ukgen', 'save_dir': '"""tests/test_output"""', 'max_epochs': '(10)'}), "(X_train=X_train, y_train=y_train, X_test=X_test,\n y_test=y_test, unknowns='hello', ukgen=ukgen, save_dir=\n 'tests/test_output', max_epochs=10)\n", (21194, 21343), False, 'from pop_finder import pop_finder\n'), ((21451, 21515), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""unknowns exists, but is empty"""'}), "(ValueError, match='unknowns exists, but is empty')\n", (21464, 21515), False, 'import pytest\n'), ((21525, 21702), 'pop_finder.pop_finder.pop_finder', 'pop_finder.pop_finder', ([], {'X_train': 'X_train', 'y_train': 'y_train', 'X_test': 'X_test', 'y_test': 'y_test', 'unknowns': 'unknowns_empty', 'ukgen': 'ukgen', 'save_dir': '"""tests/test_output"""', 'max_epochs': '(10)'}), "(X_train=X_train, y_train=y_train, X_test=X_test,\n y_test=y_test, unknowns=unknowns_empty, ukgen=ukgen, save_dir=\n 'tests/test_output', max_epochs=10)\n", (21546, 21702), False, 'from pop_finder import pop_finder\n'), ((21810, 21867), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""train_prop is too high"""'}), "(ValueError, match='train_prop is too high')\n", (21823, 21867), False, 'import pytest\n'), ((21877, 22076), 'pop_finder.pop_finder.pop_finder', 'pop_finder.pop_finder', ([], {'X_train': 'X_train', 'y_train': 'y_train', 'X_test': 'X_test', 'y_test': 'y_test', 'unknowns': 'unknowns', 'ukgen': 'ukgen', 'save_dir': '"""tests/test_output"""', 'max_epochs': '(10)', 'train_prop': '(0.99)', 'seed': '(1234)'}), "(X_train=X_train, y_train=y_train, X_test=X_test,\n y_test=y_test, unknowns=unknowns, ukgen=ukgen, save_dir=\n 'tests/test_output', max_epochs=10, train_prop=0.99, seed=1234)\n", (21898, 22076), False, 'from pop_finder import pop_finder\n'), ((23158, 23222), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Path to infile does not exist"""'}), "(ValueError, match='Path to infile does not exist')\n", (23171, 23222), False, 'import pytest\n'), ((23232, 23350), 'pop_finder.pop_finder.run_neural_net', 'pop_finder.run_neural_net', ([], {'infile': '"""hello"""', 'sample_data': 'sample_data2', 'patience': '(10)', 'max_epochs': '(2)', 'save_dir': 'save_path'}), "(infile='hello', sample_data=sample_data2,\n patience=10, max_epochs=2, save_dir=save_path)\n", (23257, 23350), False, 'from pop_finder import pop_finder\n'), ((23427, 23496), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Path to sample_data does not exist"""'}), "(ValueError, match='Path to sample_data does not exist')\n", (23440, 23496), False, 'import pytest\n'), ((23506, 23615), 'pop_finder.pop_finder.run_neural_net', 'pop_finder.run_neural_net', (['infile_all'], {'sample_data': '"""hello"""', 'patience': '(10)', 'max_epochs': '(2)', 'save_dir': 'save_path'}), "(infile_all, sample_data='hello', patience=10,\n max_epochs=2, save_dir=save_path)\n", (23531, 23615), False, 'from pop_finder import pop_finder\n'), ((23692, 23765), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""save_allele_counts should be a boolean"""'}), "(ValueError, match='save_allele_counts should be a boolean')\n", (23705, 23765), False, 'import pytest\n'), ((23798, 23928), 'pop_finder.pop_finder.run_neural_net', 'pop_finder.run_neural_net', (['infile_all', 'sample_data2'], {'save_allele_counts': '"""True"""', 'patience': '(10)', 'max_epochs': '(2)', 'save_dir': 'save_path'}), "(infile_all, sample_data2, save_allele_counts=\n 'True', patience=10, max_epochs=2, save_dir=save_path)\n", (23823, 23928), False, 'from pop_finder import pop_finder\n'), ((24016, 24093), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""mod_path should either be a string or None"""'}), "(ValueError, match='mod_path should either be a string or None')\n", (24029, 24093), False, 'import pytest\n'), ((24126, 24240), 'pop_finder.pop_finder.run_neural_net', 'pop_finder.run_neural_net', (['infile_all', 'sample_data2'], {'mod_path': '(2)', 'patience': '(10)', 'max_epochs': '(2)', 'save_dir': 'save_path'}), '(infile_all, sample_data2, mod_path=2, patience=10,\n max_epochs=2, save_dir=save_path)\n', (24151, 24240), False, 'from pop_finder import pop_finder\n'), ((24329, 24395), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Path to mod_path does not exist"""'}), "(ValueError, match='Path to mod_path does not exist')\n", (24342, 24395), False, 'import pytest\n'), ((24405, 24525), 'pop_finder.pop_finder.run_neural_net', 'pop_finder.run_neural_net', (['infile_all', 'sample_data2'], {'mod_path': '"""hello"""', 'patience': '(10)', 'max_epochs': '(2)', 'save_dir': 'save_path'}), "(infile_all, sample_data2, mod_path='hello',\n patience=10, max_epochs=2, save_dir=save_path)\n", (24430, 24525), False, 'from pop_finder import pop_finder\n'), ((24614, 24677), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""train_prop should be a float"""'}), "(ValueError, match='train_prop should be a float')\n", (24627, 24677), False, 'import pytest\n'), ((24687, 24804), 'pop_finder.pop_finder.run_neural_net', 'pop_finder.run_neural_net', (['infile_all', 'sample_data2'], {'patience': '(10)', 'max_epochs': '(2)', 'save_dir': 'save_path', 'train_prop': '(1)'}), '(infile_all, sample_data2, patience=10, max_epochs\n =2, save_dir=save_path, train_prop=1)\n', (24712, 24804), False, 'from pop_finder import pop_finder\n'), ((24892, 24949), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""train_prop is too high"""'}), "(ValueError, match='train_prop is too high')\n", (24905, 24949), False, 'import pytest\n'), ((24959, 25079), 'pop_finder.pop_finder.run_neural_net', 'pop_finder.run_neural_net', (['infile_all', 'sample_data2'], {'patience': '(10)', 'max_epochs': '(2)', 'save_dir': 'save_path', 'train_prop': '(0.99)'}), '(infile_all, sample_data2, patience=10, max_epochs\n =2, save_dir=save_path, train_prop=0.99)\n', (24984, 25079), False, 'from pop_finder import pop_finder\n'), ((25213, 25273), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""save_dir should be string"""'}), "(ValueError, match='save_dir should be string')\n", (25226, 25273), False, 'import pytest\n'), ((25283, 25317), 'pop_finder.pop_finder.assign_plot', 'pop_finder.assign_plot', ([], {'save_dir': '(2)'}), '(save_dir=2)\n', (25305, 25317), False, 'from pop_finder import pop_finder\n'), ((25327, 25388), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""ensemble should be boolean"""'}), "(ValueError, match='ensemble should be boolean')\n", (25340, 25388), False, 'import pytest\n'), ((25398, 25455), 'pop_finder.pop_finder.assign_plot', 'pop_finder.assign_plot', ([], {'save_dir': '"""hello"""', 'ensemble': '"""True"""'}), "(save_dir='hello', ensemble='True')\n", (25420, 25455), False, 'from pop_finder import pop_finder\n'), ((25465, 25527), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""col_scheme should be string"""'}), "(ValueError, match='col_scheme should be string')\n", (25478, 25527), False, 'import pytest\n'), ((25537, 25607), 'pop_finder.pop_finder.assign_plot', 'pop_finder.assign_plot', ([], {'save_dir': '"""hello"""', 'ensemble': '(False)', 'col_scheme': '(1)'}), "(save_dir='hello', ensemble=False, col_scheme=1)\n", (25559, 25607), False, 'from pop_finder import pop_finder\n'), ((25679, 25766), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""pop_assign_freqs.csv does not exist in save_dir"""'}), "(ValueError, match=\n 'pop_assign_freqs.csv does not exist in save_dir')\n", (25692, 25766), False, 'import pytest\n'), ((25793, 25848), 'pop_finder.pop_finder.assign_plot', 'pop_finder.assign_plot', ([], {'save_dir': '"""hello"""', 'ensemble': '(True)'}), "(save_dir='hello', ensemble=True)\n", (25815, 25848), False, 'from pop_finder import pop_finder\n'), ((25858, 25934), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""pop_assign.csv does not exist in save_dir"""'}), "(ValueError, match='pop_assign.csv does not exist in save_dir')\n", (25871, 25934), False, 'import pytest\n'), ((25967, 26023), 'pop_finder.pop_finder.assign_plot', 'pop_finder.assign_plot', ([], {'save_dir': '"""hello"""', 'ensemble': '(False)'}), "(save_dir='hello', ensemble=False)\n", (25989, 26023), False, 'from pop_finder import pop_finder\n'), ((26345, 26411), 'os.remove', 'os.remove', (['"""tests/test_inputs/kfcv_test_output/structure_plot.png"""'], {}), "('tests/test_inputs/kfcv_test_output/structure_plot.png')\n", (26354, 26411), False, 'import os\n'), ((26775, 26850), 'os.remove', 'os.remove', (['"""tests/test_inputs/kfcv_ensemble_test_output/structure_plot.png"""'], {}), "('tests/test_inputs/kfcv_ensemble_test_output/structure_plot.png')\n", (26784, 26850), False, 'import os\n'), ((26902, 26974), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Path to ensemble_preds does not exist"""'}), "(ValueError, match='Path to ensemble_preds does not exist')\n", (26915, 26974), False, 'import pytest\n'), ((27007, 27069), 'pop_finder.pop_finder.structure_plot', 'pop_finder.structure_plot', ([], {'save_dir': '"""incorrect"""', 'ensemble': '(True)'}), "(save_dir='incorrect', ensemble=True)\n", (27032, 27069), False, 'from pop_finder import pop_finder\n'), ((27080, 27143), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Path to preds does not exist"""'}), "(ValueError, match='Path to preds does not exist')\n", (27093, 27143), False, 'import pytest\n'), ((27176, 27239), 'pop_finder.pop_finder.structure_plot', 'pop_finder.structure_plot', ([], {'save_dir': '"""incorrect"""', 'ensemble': '(False)'}), "(save_dir='incorrect', ensemble=False)\n", (27201, 27239), False, 'from pop_finder import pop_finder\n'), ((27284, 27348), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""col_scheme should be a string"""'}), "(ValueError, match='col_scheme should be a string')\n", (27297, 27348), False, 'import pytest\n'), ((27381, 27487), 'pop_finder.pop_finder.structure_plot', 'pop_finder.structure_plot', ([], {'save_dir': '"""tests/test_inputs/kfcv_test_output"""', 'ensemble': '(False)', 'col_scheme': '(2)'}), "(save_dir='tests/test_inputs/kfcv_test_output',\n ensemble=False, col_scheme=2)\n", (27406, 27487), False, 'from pop_finder import pop_finder\n'), ((27561, 27619), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""save_dir does not exist"""'}), "(ValueError, match='save_dir does not exist')\n", (27574, 27619), False, 'import pytest\n'), ((27629, 27719), 'pop_finder.contour_classifier.contour_classifier', 'contour_classifier.contour_classifier', ([], {'sample_data': 'sample_data1', 'save_dir': '"""incorrect"""'}), "(sample_data=sample_data1, save_dir=\n 'incorrect')\n", (27666, 27719), False, 'from pop_finder import contour_classifier\n'), ((27747, 27811), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""path to sample_data incorrect"""'}), "(ValueError, match='path to sample_data incorrect')\n", (27760, 27811), False, 'import pytest\n'), ((27821, 27927), 'pop_finder.contour_classifier.contour_classifier', 'contour_classifier.contour_classifier', ([], {'sample_data': '"""incorrect"""', 'save_dir': '"""tests/test_inputs/test_out"""'}), "(sample_data='incorrect', save_dir=\n 'tests/test_inputs/test_out')\n", (27858, 27927), False, 'from pop_finder import contour_classifier\n'), ((27955, 28020), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""path to genetic data incorrect"""'}), "(ValueError, match='path to genetic data incorrect')\n", (27968, 28020), False, 'import pytest\n'), ((28030, 28176), 'pop_finder.contour_classifier.contour_classifier', 'contour_classifier.contour_classifier', ([], {'sample_data': 'sample_data1', 'run_locator': '(True)', 'gen_dat': '"""incorrect"""', 'save_dir': '"""tests/test_inputs/test_out"""'}), "(sample_data=sample_data1, run_locator\n =True, gen_dat='incorrect', save_dir='tests/test_inputs/test_out')\n", (28067, 28176), False, 'from pop_finder import contour_classifier\n'), ((28241, 28296), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Cannot use hdf5 file"""'}), "(ValueError, match='Cannot use hdf5 file')\n", (28254, 28296), False, 'import pytest\n'), ((28306, 28451), 'pop_finder.contour_classifier.contour_classifier', 'contour_classifier.contour_classifier', ([], {'sample_data': 'sample_data1', 'run_locator': '(True)', 'gen_dat': 'infile_all', 'save_dir': '"""tests/test_inputs/test_out"""'}), "(sample_data=sample_data1, run_locator\n =True, gen_dat=infile_all, save_dir='tests/test_inputs/test_out')\n", (28343, 28451), False, 'from pop_finder import contour_classifier\n'), ((28516, 28561), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""bootstraps"""'}), "(ValueError, match='bootstraps')\n", (28529, 28561), False, 'import pytest\n'), ((28571, 28702), 'pop_finder.contour_classifier.contour_classifier', 'contour_classifier.contour_classifier', ([], {'sample_data': 'sample_data1', 'nboots': '(25)', 'save_dir': '"""tests/test_inputs/test_out"""', 'multi_iter': '(1)'}), "(sample_data=sample_data1, nboots=25,\n save_dir='tests/test_inputs/test_out', multi_iter=1)\n", (28608, 28702), False, 'from pop_finder import contour_classifier\n'), ((28768, 28813), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""bootstraps"""'}), "(ValueError, match='bootstraps')\n", (28781, 28813), False, 'import pytest\n'), ((28823, 28954), 'pop_finder.contour_classifier.contour_classifier', 'contour_classifier.contour_classifier', ([], {'sample_data': 'sample_data1', 'nboots': '(25)', 'save_dir': '"""tests/test_inputs/test_out"""', 'multi_iter': '(1)'}), "(sample_data=sample_data1, nboots=25,\n save_dir='tests/test_inputs/test_out', multi_iter=1)\n", (28860, 28954), False, 'from pop_finder import contour_classifier\n'), ((29020, 29105), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Something went wrong with the prediction data"""'}), "(ValueError, match='Something went wrong with the prediction data'\n )\n", (29033, 29105), False, 'import pytest\n'), ((29132, 29239), 'pop_finder.contour_classifier.contour_classifier', 'contour_classifier.contour_classifier', ([], {'sample_data': 'sample_data3', 'save_dir': '"""tests/test_inputs/test_out"""'}), "(sample_data=sample_data3, save_dir=\n 'tests/test_inputs/test_out')\n", (29169, 29239), False, 'from pop_finder import contour_classifier\n'), ((29279, 29379), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""sample_data file should have columns x, y, pop, and sampleID"""'}), "(ValueError, match=\n 'sample_data file should have columns x, y, pop, and sampleID')\n", (29292, 29379), False, 'import pytest\n'), ((29406, 29513), 'pop_finder.contour_classifier.contour_classifier', 'contour_classifier.contour_classifier', ([], {'sample_data': 'sample_data4', 'save_dir': '"""tests/test_inputs/test_out"""'}), "(sample_data=sample_data4, save_dir=\n 'tests/test_inputs/test_out')\n", (29443, 29513), False, 'from pop_finder import contour_classifier\n'), ((29553, 29622), 'pytest.raises', 'pytest.raises', (['Exception'], {'match': '"""Too few points to generate contours"""'}), "(Exception, match='Too few points to generate contours')\n", (29566, 29622), False, 'import pytest\n'), ((29655, 29833), 'pop_finder.contour_classifier.contour_classifier', 'contour_classifier.contour_classifier', ([], {'sample_data': 'sample_data2', 'run_locator': '(True)', 'gen_dat': 'infile_all_vcf', 'nboots': '(1)', 'max_epochs': '(1)', 'save_dir': '"""tests/test_inputs/test_out"""'}), "(sample_data=sample_data2, run_locator\n =True, gen_dat=infile_all_vcf, nboots=1, max_epochs=1, save_dir=\n 'tests/test_inputs/test_out')\n", (29692, 29833), False, 'from pop_finder import contour_classifier\n'), ((31204, 31213), 'numpy.max', 'np.max', (['Z'], {}), '(Z)\n', (31210, 31213), True, 'import numpy as np\n'), ((31344, 31365), 'numpy.sort', 'np.sort', (['(-cset.levels)'], {}), '(-cset.levels)\n', (31351, 31365), True, 'import numpy as np\n'), ((31559, 31623), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""path to sample_data incorrect"""'}), "(ValueError, match='path to sample_data incorrect')\n", (31572, 31623), False, 'import pytest\n'), ((31633, 31744), 'pop_finder.contour_classifier.kfcv', 'contour_classifier.kfcv', ([], {'sample_data': '"""incorrect"""', 'gen_dat': 'infile_all_vcf', 'save_dir': '"""tests/test_inputs/kfcv"""'}), "(sample_data='incorrect', gen_dat=infile_all_vcf,\n save_dir='tests/test_inputs/kfcv')\n", (31656, 31744), False, 'from pop_finder import contour_classifier\n')] |
# Compatibility Python 2/3
from __future__ import division, print_function, absolute_import
from builtins import range
# ----------------------------------------------------------------------------------------------------------------------
import opto
from dotmap import DotMap
import matplotlib.pyplot as plt
import numpy as np
import opto.data as rdata
import opto.utils as rutils
from opto.functions import *
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
NAMEFILE = 'PAREGO'
rutils.create_folder(nameFolder=NAMEFILE)
# To log file
fh = logging.FileHandler(NAMEFILE + '/logs.log')
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
task = MOP2()
stopCriteria = opto.opto.classes.StopCriteria(maxEvals=50)
p = DotMap()
p.verbosity = 1
p.visualize = 1
opt = opto.PAREGO(parameters=p, task=task, stopCriteria=stopCriteria)
opt.optimize()
logs = opt.get_logs()
logs.save(NAMEFILE + '/optimization.log')
logs = []
logs = rdata.load(NAMEFILE + '/optimization.log')
fx = logs.get_objectives()
x = logs.get_parameters()
PF_fx, PF_x = opto.opto.utils.paretoFront(fx, parameters=x) # Compute PF
# print(PF_x)
H = opto.opto.utils.HyperVolume(fx, referencePoint=task.get_hypervolume_reference_point()) # Hypervolume
print('Hypervolume: %f' % (H))
print('Elapsed Time: %f [s]' % (logs.get_final_time()))
if task.get_n_objectives() == 2:
plt.figure()
plt.ioff()
plt.scatter(fx[0], fx[1])
opto.opto.plot.paretoFront(PF_fx, drawConnectingLines=False)
plt.xlabel('Obj.Func. 1')
plt.ylabel('Obj.Func. 2')
# Only works for 2D functions
if task.get_n_parameters() == 2:
plt.figure()
plt.scatter(np.array(x[0]).squeeze(), np.array(x[1]).squeeze(), color='blue', clip_on=False, s=50)
plt.scatter(np.array(PF_x[0]).squeeze(), np.array(PF_x[1]).squeeze(), color='red', clip_on=False, s=80)
plt.xlabel('Variable 1')
plt.ylabel('Variable 2')
plt.xlim(task.get_bounds().to_list(0))
plt.ylim(task.get_bounds().to_list(1))
plt.show()
| [
"opto.opto.plot.paretoFront",
"matplotlib.pyplot.show",
"logging.FileHandler",
"matplotlib.pyplot.ioff",
"matplotlib.pyplot.scatter",
"opto.PAREGO",
"opto.opto.classes.StopCriteria",
"dotmap.DotMap",
"opto.utils.create_folder",
"matplotlib.pyplot.figure",
"numpy.array",
"opto.data.load",
"op... | [((438, 457), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (455, 457), False, 'import logging\n'), ((510, 551), 'opto.utils.create_folder', 'rutils.create_folder', ([], {'nameFolder': 'NAMEFILE'}), '(nameFolder=NAMEFILE)\n', (530, 551), True, 'import opto.utils as rutils\n'), ((572, 615), 'logging.FileHandler', 'logging.FileHandler', (["(NAMEFILE + '/logs.log')"], {}), "(NAMEFILE + '/logs.log')\n", (591, 615), False, 'import logging\n'), ((695, 738), 'opto.opto.classes.StopCriteria', 'opto.opto.classes.StopCriteria', ([], {'maxEvals': '(50)'}), '(maxEvals=50)\n', (725, 738), False, 'import opto\n'), ((744, 752), 'dotmap.DotMap', 'DotMap', ([], {}), '()\n', (750, 752), False, 'from dotmap import DotMap\n'), ((791, 854), 'opto.PAREGO', 'opto.PAREGO', ([], {'parameters': 'p', 'task': 'task', 'stopCriteria': 'stopCriteria'}), '(parameters=p, task=task, stopCriteria=stopCriteria)\n', (802, 854), False, 'import opto\n'), ((952, 994), 'opto.data.load', 'rdata.load', (["(NAMEFILE + '/optimization.log')"], {}), "(NAMEFILE + '/optimization.log')\n", (962, 994), True, 'import opto.data as rdata\n'), ((1064, 1109), 'opto.opto.utils.paretoFront', 'opto.opto.utils.paretoFront', (['fx'], {'parameters': 'x'}), '(fx, parameters=x)\n', (1091, 1109), False, 'import opto\n'), ((1370, 1382), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1380, 1382), True, 'import matplotlib.pyplot as plt\n'), ((1387, 1397), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (1395, 1397), True, 'import matplotlib.pyplot as plt\n'), ((1402, 1427), 'matplotlib.pyplot.scatter', 'plt.scatter', (['fx[0]', 'fx[1]'], {}), '(fx[0], fx[1])\n', (1413, 1427), True, 'import matplotlib.pyplot as plt\n'), ((1432, 1492), 'opto.opto.plot.paretoFront', 'opto.opto.plot.paretoFront', (['PF_fx'], {'drawConnectingLines': '(False)'}), '(PF_fx, drawConnectingLines=False)\n', (1458, 1492), False, 'import opto\n'), ((1497, 1522), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Obj.Func. 1"""'], {}), "('Obj.Func. 1')\n", (1507, 1522), True, 'import matplotlib.pyplot as plt\n'), ((1527, 1552), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Obj.Func. 2"""'], {}), "('Obj.Func. 2')\n", (1537, 1552), True, 'import matplotlib.pyplot as plt\n'), ((1621, 1633), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1631, 1633), True, 'import matplotlib.pyplot as plt\n'), ((1849, 1873), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Variable 1"""'], {}), "('Variable 1')\n", (1859, 1873), True, 'import matplotlib.pyplot as plt\n'), ((1878, 1902), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Variable 2"""'], {}), "('Variable 2')\n", (1888, 1902), True, 'import matplotlib.pyplot as plt\n'), ((1993, 2003), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2001, 2003), True, 'import matplotlib.pyplot as plt\n'), ((1650, 1664), 'numpy.array', 'np.array', (['x[0]'], {}), '(x[0])\n', (1658, 1664), True, 'import numpy as np\n'), ((1676, 1690), 'numpy.array', 'np.array', (['x[1]'], {}), '(x[1])\n', (1684, 1690), True, 'import numpy as np\n'), ((1753, 1770), 'numpy.array', 'np.array', (['PF_x[0]'], {}), '(PF_x[0])\n', (1761, 1770), True, 'import numpy as np\n'), ((1782, 1799), 'numpy.array', 'np.array', (['PF_x[1]'], {}), '(PF_x[1])\n', (1790, 1799), True, 'import numpy as np\n')] |
# This code demonstrates subtle crime I with Compressed sensing
# Run the *fast* experiment to see the images & NRMSE valus on top of them.
# Run the *long* experiment (10 slices x 3 samlpling mask realizations each) to get statistics.
# Then run the script CS_DL_knee_prep_NRMSE_figure.py to produce the statistics graphs
#############################################################
# calibration run - this code (1_knee_calib_CS_lada.py)
##########################################################################################
import os
import h5py
import numpy as np
import sigpy as sp
from sigpy import mri as mr
from subtle_data_crimes.functions.sampling_funcs import gen_2D_var_dens_mask
#################################################################################
## Experiment set-up
#################################################################################
R = 4
pad_ratio_vec = np.array([1, 2])
sampling_type_vec = np.array([1, 2]) # 0 = random, 1 = weak var-dens, 2 = strong var-dens
sampling_flag = '2D'
num_slices = 1
im_type_str = 'full_im' # Options: 'full_im' / 'blocks' (blocks are used for training Deep Learning models, not for CS & DictL).
data_type = 'pathology_1'
# data_type = 'pathology_2'
if data_type == 'pathology_1':
pathology_slice = 22
lamda = 1e-3
gold_dict = {} # a python dictionary that will contain the gold standard recons
CS_recs_dict = {} # a python dictionary that will contain the reconstructions obtained with Compressed Sensing
# #################################################################################
# ## Experiments
# #################################################################################
for pad_i, pad_ratio in enumerate(pad_ratio_vec):
print(f'##################### pad ratio {pad_ratio} ################################')
t = 0 # counts loaded scans. each scan contains multiple slices.
ns = 0 # counts loaded slices
if (pad_ratio == 1) | (pad_ratio == 2):
pad_ratio_str = int(pad_ratio)
# # update the next field and make sure that it's the same one as defined in Fig4_pathology_example/data_prep.py
FatSat_processed_data_folder = "/mikQNAP/NYU_knee_data/efrat/public_repo_check/zpad_FatSat_data/"
data_path = FatSat_processed_data_folder + data_type + "/pad_" + str(
int(100 * pad_ratio)) + "/" + im_type_str + "/"
files_list = os.listdir(data_path)
while ns < num_slices:
print(' === loading h5 file {} === '.format(t))
# Load k-space data
filename_h5 = data_path + files_list[t]
# print('t=', t)
# print('filename_h5=', filename_h5)
f = h5py.File(filename_h5, 'r')
t += 1 # update the number of LOADED scans. Each scan contains multiple slices
kspace_preprocessed_multislice = f["kspace"]
im_RSS_multislice = f[
"reconstruction"] # these are the RSS images produced from the zero-padded k-space - see fig. 1 in the paper
n_slices_in_scan = kspace_preprocessed_multislice.shape[0]
print(f'pad_ratio {pad_ratio} t={t}')
for s_i in range(n_slices_in_scan):
if s_i == pathology_slice:
print(f'slice {s_i}')
kspace_slice = kspace_preprocessed_multislice[s_i, :, :].squeeze()
im_RSS = im_RSS_multislice[s_i, :, :].squeeze()
ns += 1 # number of slices
print(f'ns={ns}')
imSize = im_RSS.shape
kspace_slice = np.expand_dims(kspace_slice, axis=0) # restore coil dimension (for Sigpy data format)
_, NX_padded, NY_padded = kspace_slice.shape # get size. Notice: the first one is the coils dimension
virtual_sens_maps = np.ones_like(
kspace_slice) # sens maps are all ones because we have a "single-coil" magnitude image.
# ------- gold standard rec -----------------
rec_gold = sp.ifft(kspace_slice)
rec_gold = rec_gold[0, :, :].squeeze() # remove artificial coil dim
rec_gold_rotated = np.abs(np.rot90(rec_gold, 2))
# fig = plt.figure()
# plt.imshow(np.rot90(np.abs(rec_gold),2), cmap="gray")
# plt.title('rec_gold')
# plt.colorbar()
# plt.show()
# check NaN values
assert np.isnan(rec_gold).any() == False, 'there are NaN values in rec_gold! scan {} slice {}'.format(n,
s_i)
img_shape = np.array([NX_padded, NY_padded])
# ----- Compressed Sensing recon ----------
for j in range(sampling_type_vec.shape[0]):
if sampling_type_vec[j] == 0: # random uniform
samp_type = 'random'
elif sampling_type_vec[j] == 1: # weak variable-density
samp_type = 'weak'
elif sampling_type_vec[j] == 2: # strong variable-density
samp_type = 'strong'
data_filename = f'{data_type}_R{R}_{samp_type}_VD'
# calib is assumed to be 12 for NX=640
calib_x = int(12 * im_RSS.shape[0] / 640)
calib_y = int(12 * im_RSS.shape[1] / 640)
calib = np.array([calib_x, calib_y])
mask, pdf, poly_degree = gen_2D_var_dens_mask(R, imSize, samp_type, calib=calib)
mask_expanded = np.expand_dims(mask,
axis=0) # add the empty coils dimension, for compatibility with Sigpy's dimension convention
kspace_sampled = np.multiply(kspace_slice, mask_expanded)
rec = mr.app.L1WaveletRecon(kspace_sampled, virtual_sens_maps, lamda=lamda, show_pbar=False).run()
rec_CS_rotated = np.abs(np.rot90(rec, 2))
gold_dict[pad_ratio, samp_type] = rec_gold_rotated
CS_recs_dict[pad_ratio, samp_type] = rec_CS_rotated
# # --------- display figures -----------
# A = error_metrics(rec_gold, rec)
# A.calc_NRMSE()
# A.calc_SSIM()
#
# #print(f'CS rec; NRMSE={A.NRMSE:.4f}')
#
# cmax = np.max([np.abs(rec_gold),np.abs(rec)])
#
#
# fig = plt.figure()
# plt.subplot(1,2,1)
# plt.imshow(np.abs(np.rot90(rec_gold,2)), cmap="gray")
# plt.title('rec_gold')
# plt.clim(0,cmax)
# plt.colorbar(shrink=0.25)
#
# plt.subplot(1,2,2)
# plt.imshow(np.abs(np.rot90(rec,2)),cmap="gray")
# plt.title(f'CS NRMSE {A.NRMSE:.3f}')
# plt.clim(0, cmax)
# plt.colorbar(shrink=0.25)
# plt.suptitle(f'{data_type} data; R={R}; pad_ratio={pad_ratio}; {samp_type} VD samp; scan {t}; slice {ns}')
# plt.show()
# # zoom-in coordinates for pathology 1
# x1 = 335
# x2 = 380
# y1 = 210
# y2 = 300
# # scale the zoom-in coordinates to fit changing image size
# x1s = int(335 * pad_ratio)
# x2s = int(380 * pad_ratio)
# y1s = int(210 * pad_ratio)
# y2s = int(300 * pad_ratio)
#
# cmax = np.max(np.abs(rec))
# # gold standard zoomed - png figure
# fig = plt.figure()
# plt.imshow(rec_gold_rotated[x1s:x2s, y1s:y2s], cmap="gray")
# plt.axis('off')
# plt.clim(0, cmax)
# plt.show()
#
# # gold standard zoomed - eps figure
# fig = plt.figure()
# plt.imshow(rec_gold_rotated[x1s:x2s, y1s:y2s], cmap="gray")
# plt.axis('off')
# plt.clim(0, cmax)
# plt.show()
# # rec CS zoomed - png figure
# fig = plt.figure()
# plt.imshow(rec_CS_rotated[x1s:x2s, y1s:y2s], cmap="gray")
# plt.axis('off')
# plt.clim(0, cmax)
# plt.show()
#
# # rec CS zoomed - eps figure
# fig = plt.figure()
# plt.imshow(rec_CS_rotated[x1s:x2s, y1s:y2s], cmap="gray")
# plt.axis('off')
# plt.clim(0, cmax)
# plt.show()
# if pad_ratio==2:
# # gold standard full-size .eps figure
# fig = plt.figure()
# plt.imshow(rec_gold_rotated, cmap="gray")
# plt.axis('off')
# plt.clim(0, cmax)
# plt.show()
#
# # gold standard full-size .png figure
# fig = plt.figure()
# plt.imshow(rec_gold_rotated, cmap="gray")
# plt.axis('off')
# plt.clim(0, cmax)
# plt.show()
# save the recons
results_dir = data_type + f'_results_R{R}/'
if not os.path.exists(results_dir):
os.makedirs(results_dir)
gold_filename = results_dir + '/gold_dict.npy'
np.save(gold_filename, gold_dict)
CS_rec_filename = results_dir + '/CS_dict.npy'
np.save(CS_rec_filename, CS_recs_dict)
| [
"h5py.File",
"numpy.save",
"numpy.ones_like",
"os.makedirs",
"sigpy.ifft",
"numpy.multiply",
"os.path.exists",
"numpy.expand_dims",
"numpy.isnan",
"sigpy.mri.app.L1WaveletRecon",
"subtle_data_crimes.functions.sampling_funcs.gen_2D_var_dens_mask",
"numpy.rot90",
"numpy.array",
"os.listdir"
... | [((935, 951), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (943, 951), True, 'import numpy as np\n'), ((975, 991), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (983, 991), True, 'import numpy as np\n'), ((10189, 10222), 'numpy.save', 'np.save', (['gold_filename', 'gold_dict'], {}), '(gold_filename, gold_dict)\n', (10196, 10222), True, 'import numpy as np\n'), ((10272, 10310), 'numpy.save', 'np.save', (['CS_rec_filename', 'CS_recs_dict'], {}), '(CS_rec_filename, CS_recs_dict)\n', (10279, 10310), True, 'import numpy as np\n'), ((2490, 2511), 'os.listdir', 'os.listdir', (['data_path'], {}), '(data_path)\n', (2500, 2511), False, 'import os\n'), ((10079, 10106), 'os.path.exists', 'os.path.exists', (['results_dir'], {}), '(results_dir)\n', (10093, 10106), False, 'import os\n'), ((10113, 10137), 'os.makedirs', 'os.makedirs', (['results_dir'], {}), '(results_dir)\n', (10124, 10137), False, 'import os\n'), ((2766, 2793), 'h5py.File', 'h5py.File', (['filename_h5', '"""r"""'], {}), "(filename_h5, 'r')\n", (2775, 2793), False, 'import h5py\n'), ((3652, 3688), 'numpy.expand_dims', 'np.expand_dims', (['kspace_slice'], {'axis': '(0)'}), '(kspace_slice, axis=0)\n', (3666, 3688), True, 'import numpy as np\n'), ((3898, 3924), 'numpy.ones_like', 'np.ones_like', (['kspace_slice'], {}), '(kspace_slice)\n', (3910, 3924), True, 'import numpy as np\n'), ((4117, 4138), 'sigpy.ifft', 'sp.ifft', (['kspace_slice'], {}), '(kspace_slice)\n', (4124, 4138), True, 'import sigpy as sp\n'), ((4824, 4856), 'numpy.array', 'np.array', (['[NX_padded, NY_padded]'], {}), '([NX_padded, NY_padded])\n', (4832, 4856), True, 'import numpy as np\n'), ((4268, 4289), 'numpy.rot90', 'np.rot90', (['rec_gold', '(2)'], {}), '(rec_gold, 2)\n', (4276, 4289), True, 'import numpy as np\n'), ((5639, 5667), 'numpy.array', 'np.array', (['[calib_x, calib_y]'], {}), '([calib_x, calib_y])\n', (5647, 5667), True, 'import numpy as np\n'), ((5716, 5771), 'subtle_data_crimes.functions.sampling_funcs.gen_2D_var_dens_mask', 'gen_2D_var_dens_mask', (['R', 'imSize', 'samp_type'], {'calib': 'calib'}), '(R, imSize, samp_type, calib=calib)\n', (5736, 5771), False, 'from subtle_data_crimes.functions.sampling_funcs import gen_2D_var_dens_mask\n'), ((5811, 5839), 'numpy.expand_dims', 'np.expand_dims', (['mask'], {'axis': '(0)'}), '(mask, axis=0)\n', (5825, 5839), True, 'import numpy as np\n'), ((6016, 6056), 'numpy.multiply', 'np.multiply', (['kspace_slice', 'mask_expanded'], {}), '(kspace_slice, mask_expanded)\n', (6027, 6056), True, 'import numpy as np\n'), ((6224, 6240), 'numpy.rot90', 'np.rot90', (['rec', '(2)'], {}), '(rec, 2)\n', (6232, 6240), True, 'import numpy as np\n'), ((4571, 4589), 'numpy.isnan', 'np.isnan', (['rec_gold'], {}), '(rec_gold)\n', (4579, 4589), True, 'import numpy as np\n'), ((6086, 6176), 'sigpy.mri.app.L1WaveletRecon', 'mr.app.L1WaveletRecon', (['kspace_sampled', 'virtual_sens_maps'], {'lamda': 'lamda', 'show_pbar': '(False)'}), '(kspace_sampled, virtual_sens_maps, lamda=lamda,\n show_pbar=False)\n', (6107, 6176), True, 'from sigpy import mri as mr\n')] |
import numpy as np
import torch as th
from tpp.processes.hawkes import neg_log_likelihood_old as nll_old
from tpp.processes.hawkes import neg_log_likelihood as nll_new
from tpp.utils.keras_preprocessing.sequence import pad_sequences
def test_nll():
n_seq = 10
my_alpha = 0.7
my_mu = 0.1
pad_id = -1.
my_window = 100
my_sizes = [np.random.randint(low=1, high=10) for _ in range(n_seq)]
my_points = [th.sort(th.rand(size=[s])).values for s in my_sizes]
nll_1 = [nll_old(mu=my_mu, alpha=my_alpha, points=p, window=my_window)
for p in my_points]
nll_1 = th.stack(nll_1, dim=0)
my_points_padded = pad_sequences(
my_points, padding="post", dtype=np.float32, value=pad_id)
my_points_padded = th.from_numpy(my_points_padded)
my_mask = (my_points_padded != pad_id).type(my_points_padded.dtype)
nll_2 = nll_new(
mu=my_mu, alpha=my_alpha,
sequences_padded=my_points_padded, sequence_mask=my_mask,
window=my_window)
assert np.allclose(nll_1, nll_2)
if __name__ == "__main__":
test_nll()
| [
"torch.stack",
"tpp.utils.keras_preprocessing.sequence.pad_sequences",
"numpy.allclose",
"tpp.processes.hawkes.neg_log_likelihood_old",
"numpy.random.randint",
"torch.rand",
"tpp.processes.hawkes.neg_log_likelihood",
"torch.from_numpy"
] | [((609, 631), 'torch.stack', 'th.stack', (['nll_1'], {'dim': '(0)'}), '(nll_1, dim=0)\n', (617, 631), True, 'import torch as th\n'), ((656, 728), 'tpp.utils.keras_preprocessing.sequence.pad_sequences', 'pad_sequences', (['my_points'], {'padding': '"""post"""', 'dtype': 'np.float32', 'value': 'pad_id'}), "(my_points, padding='post', dtype=np.float32, value=pad_id)\n", (669, 728), False, 'from tpp.utils.keras_preprocessing.sequence import pad_sequences\n'), ((761, 792), 'torch.from_numpy', 'th.from_numpy', (['my_points_padded'], {}), '(my_points_padded)\n', (774, 792), True, 'import torch as th\n'), ((878, 991), 'tpp.processes.hawkes.neg_log_likelihood', 'nll_new', ([], {'mu': 'my_mu', 'alpha': 'my_alpha', 'sequences_padded': 'my_points_padded', 'sequence_mask': 'my_mask', 'window': 'my_window'}), '(mu=my_mu, alpha=my_alpha, sequences_padded=my_points_padded,\n sequence_mask=my_mask, window=my_window)\n', (885, 991), True, 'from tpp.processes.hawkes import neg_log_likelihood as nll_new\n'), ((1025, 1050), 'numpy.allclose', 'np.allclose', (['nll_1', 'nll_2'], {}), '(nll_1, nll_2)\n', (1036, 1050), True, 'import numpy as np\n'), ((357, 390), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)'}), '(low=1, high=10)\n', (374, 390), True, 'import numpy as np\n'), ((502, 563), 'tpp.processes.hawkes.neg_log_likelihood_old', 'nll_old', ([], {'mu': 'my_mu', 'alpha': 'my_alpha', 'points': 'p', 'window': 'my_window'}), '(mu=my_mu, alpha=my_alpha, points=p, window=my_window)\n', (509, 563), True, 'from tpp.processes.hawkes import neg_log_likelihood_old as nll_old\n'), ((439, 456), 'torch.rand', 'th.rand', ([], {'size': '[s]'}), '(size=[s])\n', (446, 456), True, 'import torch as th\n')] |
import numpy as np
def is_sklearn_linear_classifier(obj):
"""
Checks if object is a sklearn linear classifier for a binary outcome
:param obj: object
"""
binary_flag = hasattr(obj, 'classes_') and len(obj.classes_) == 2
linear_flag = hasattr(obj, 'coef_') and hasattr(obj, 'intercept_')
return binary_flag and linear_flag
def parse_classifier_args(*args, **kwargs):
"""
helper function to parse coefficients and intercept from linear classifier arguments
*args and **kwargs can contain either:
- sklearn classifiers with 'coef_' and 'intercept_' fields (keyword: 'clf', 'classifier')
- vector of coefficients (keyword: 'coefficients')
- intercept: set to 0 by default (keyword: 'intercept')
returns:
w - np.array containing coefficients of linear classifier (finite, flattened)
t - float containing intercept of linear classifier (finite, float)
raises:
ValueError if fails to parse classifier arguments
:return:
"""
w, t = None, None
if 'clf' in kwargs:
assert is_sklearn_linear_classifier(kwargs['clf'])
w = kwargs['clf'].coef_
t = kwargs['clf'].intercept_
elif 'classifier' in kwargs:
assert is_sklearn_linear_classifier(kwargs['classifier'])
w = kwargs['classifier'].coef_
t = kwargs['classifier'].intercept_
elif 'coefficients' in kwargs:
w = kwargs.get('coefficients')
t = kwargs.get('intercept', 0.0)
elif len(args) == 1:
if is_sklearn_linear_classifier(args[0]):
w = args[0].coef_
t = args[0].intercept_
elif isinstance(args[0], (list, np.ndarray)):
w = np.array(args[0]).flatten()
t = 0.0
elif len(args) == 2:
w = args[0]
t = float(args[1])
else:
raise ValueError('failed to match classifier arguments')
w = np.array(w).flatten()
t = float(t)
assert np.isfinite(w).all()
assert np.isfinite(t)
return w, t
| [
"numpy.array",
"numpy.isfinite"
] | [((2083, 2097), 'numpy.isfinite', 'np.isfinite', (['t'], {}), '(t)\n', (2094, 2097), True, 'import numpy as np\n'), ((1998, 2009), 'numpy.array', 'np.array', (['w'], {}), '(w)\n', (2006, 2009), True, 'import numpy as np\n'), ((2050, 2064), 'numpy.isfinite', 'np.isfinite', (['w'], {}), '(w)\n', (2061, 2064), True, 'import numpy as np\n'), ((1780, 1797), 'numpy.array', 'np.array', (['args[0]'], {}), '(args[0])\n', (1788, 1797), True, 'import numpy as np\n')] |
import numpy as np
def generate_features(draw_graphs, raw_data, axes, sampling_freq, scale_axes):
# features is a 1D array, reshape so we have a matrix
raw_data = raw_data.reshape(int(len(raw_data) / len(axes)), len(axes))
features = []
graphs = []
# split out the data from all axes
for ax in range(0, len(axes)):
X = []
for ix in range(0, raw_data.shape[0]):
X.append(float(raw_data[ix][ax]))
# X now contains only the current axis
fx = np.array(X)
# process the signal here
fx = fx * scale_axes
# we need to return a 1D array again, so flatten here again
for f in fx:
features.append(f)
return { 'features': features, 'graphs': graphs }
| [
"numpy.array"
] | [((511, 522), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (519, 522), True, 'import numpy as np\n')] |
import os
from random import randint, seed
import torch
import numpy as np
import cv2
'''
Code adapted from https://github.com/MathiasGruber/PConv-Keras/blob/master/libs/util.py
'''
class MaskGenerator:
def __init__(self, channels=1, rand_seed=None, filepath=None, channels_first=True):
"""Convenience functions for generating masks to be used for inpainting training
Arguments:
height {int} -- Mask height
width {width} -- Mask width
Keyword Arguments:
channels {int} -- Channels to output (default: {1})
rand_seed {[type]} -- Random seed (default: {None})
filepath {[type]} -- Load masks from filepath. If None, generate masks with OpenCV (default: {None})
"""
self.height = None
self.width = None
self.channels = channels
self.filepath = filepath
self.channels_first = channels_first
# If filepath supplied, load the list of masks within the directory
self.mask_files = []
if self.filepath:
filenames = [f for f in os.listdir(self.filepath)]
self.mask_files = [f for f in filenames if
any(filetype in f.lower() for filetype in ['.jpeg', '.png', '.jpg'])]
print(">> Found {} masks in {}".format(len(self.mask_files), self.filepath))
# Seed for reproducibility
if rand_seed:
seed(rand_seed)
def _generate_mask(self):
"""Generates a random irregular mask with lines, circles and elipses"""
img = np.zeros((self.height, self.width, self.channels), np.uint8)
# Set size scale
size = int((self.width + self.height) * 0.03)
if self.width < 64 or self.height < 64:
raise Exception("Width and Height of mask must be at least 64!")
# Draw random lines
for _ in range(randint(1, 20)):
x1, x2 = randint(1, self.width), randint(1, self.width)
y1, y2 = randint(1, self.height), randint(1, self.height)
thickness = randint(3, size)
cv2.line(img, (x1, y1), (x2, y2), (1, 1, 1), thickness)
# Draw random circles
for _ in range(randint(1, 20)):
x1, y1 = randint(1, self.width), randint(1, self.height)
radius = randint(3, size)
cv2.circle(img, (x1, y1), radius, (1, 1, 1), -1)
# Draw random ellipses
for _ in range(randint(1, 20)):
x1, y1 = randint(1, self.width), randint(1, self.height)
s1, s2 = randint(1, self.width), randint(1, self.height)
a1, a2, a3 = randint(3, 180), randint(3, 180), randint(3, 180)
thickness = randint(3, size)
cv2.ellipse(img, (x1, y1), (s1, s2), a1, a2, a3, (1, 1, 1), thickness)
img_1 = 1 - img
if self.channels_first:
img_1 = np.moveaxis(img_1, -1, 0)
img_tensor = torch.tensor(img_1)
return img_tensor
def _load_mask(self, rotation=True, dilation=True, cropping=True):
"""Loads a mask from disk, and optionally augments it"""
# Read image
mask = cv2.imread(os.path.join(self.filepath, np.random.choice(self.mask_files, 1, replace=False)[0]))
# Random rotation
if rotation:
rand = np.random.randint(-180, 180)
M = cv2.getRotationMatrix2D((mask.shape[1] / 2, mask.shape[0] / 2), rand, 1.5)
mask = cv2.warpAffine(mask, M, (mask.shape[1], mask.shape[0]))
# Random dilation
if dilation:
rand = np.random.randint(5, 47)
kernel = np.ones((rand, rand), np.uint8)
mask = cv2.erode(mask, kernel, iterations=1)
# Random cropping
if cropping:
x = np.random.randint(0, mask.shape[1] - self.width)
y = np.random.randint(0, mask.shape[0] - self.height)
mask = mask[y:y + self.height, x:x + self.width]
return (mask > 1).astype(np.uint8)
def sample(self, height=None, width=None, random_seed=None):
"""Retrieve a random mask"""
if height is not None:
self.height = height
if width is not None:
self.width = width
if random_seed:
seed(random_seed)
if self.filepath and len(self.mask_files) > 0:
return self._load_mask()
else:
return self._generate_mask() | [
"cv2.line",
"os.listdir",
"numpy.moveaxis",
"cv2.circle",
"random.randint",
"numpy.zeros",
"numpy.ones",
"cv2.warpAffine",
"cv2.ellipse",
"random.seed",
"numpy.random.randint",
"numpy.random.choice",
"cv2.erode",
"cv2.getRotationMatrix2D",
"torch.tensor"
] | [((1585, 1645), 'numpy.zeros', 'np.zeros', (['(self.height, self.width, self.channels)', 'np.uint8'], {}), '((self.height, self.width, self.channels), np.uint8)\n', (1593, 1645), True, 'import numpy as np\n'), ((2940, 2959), 'torch.tensor', 'torch.tensor', (['img_1'], {}), '(img_1)\n', (2952, 2959), False, 'import torch\n'), ((1443, 1458), 'random.seed', 'seed', (['rand_seed'], {}), '(rand_seed)\n', (1447, 1458), False, 'from random import randint, seed\n'), ((1903, 1917), 'random.randint', 'randint', (['(1)', '(20)'], {}), '(1, 20)\n', (1910, 1917), False, 'from random import randint, seed\n'), ((2082, 2098), 'random.randint', 'randint', (['(3)', 'size'], {}), '(3, size)\n', (2089, 2098), False, 'from random import randint, seed\n'), ((2111, 2166), 'cv2.line', 'cv2.line', (['img', '(x1, y1)', '(x2, y2)', '(1, 1, 1)', 'thickness'], {}), '(img, (x1, y1), (x2, y2), (1, 1, 1), thickness)\n', (2119, 2166), False, 'import cv2\n'), ((2221, 2235), 'random.randint', 'randint', (['(1)', '(20)'], {}), '(1, 20)\n', (2228, 2235), False, 'from random import randint, seed\n'), ((2328, 2344), 'random.randint', 'randint', (['(3)', 'size'], {}), '(3, size)\n', (2335, 2344), False, 'from random import randint, seed\n'), ((2357, 2405), 'cv2.circle', 'cv2.circle', (['img', '(x1, y1)', 'radius', '(1, 1, 1)', '(-1)'], {}), '(img, (x1, y1), radius, (1, 1, 1), -1)\n', (2367, 2405), False, 'import cv2\n'), ((2461, 2475), 'random.randint', 'randint', (['(1)', '(20)'], {}), '(1, 20)\n', (2468, 2475), False, 'from random import randint, seed\n'), ((2715, 2731), 'random.randint', 'randint', (['(3)', 'size'], {}), '(3, size)\n', (2722, 2731), False, 'from random import randint, seed\n'), ((2744, 2814), 'cv2.ellipse', 'cv2.ellipse', (['img', '(x1, y1)', '(s1, s2)', 'a1', 'a2', 'a3', '(1, 1, 1)', 'thickness'], {}), '(img, (x1, y1), (s1, s2), a1, a2, a3, (1, 1, 1), thickness)\n', (2755, 2814), False, 'import cv2\n'), ((2892, 2917), 'numpy.moveaxis', 'np.moveaxis', (['img_1', '(-1)', '(0)'], {}), '(img_1, -1, 0)\n', (2903, 2917), True, 'import numpy as np\n'), ((3324, 3352), 'numpy.random.randint', 'np.random.randint', (['(-180)', '(180)'], {}), '(-180, 180)\n', (3341, 3352), True, 'import numpy as np\n'), ((3369, 3443), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(mask.shape[1] / 2, mask.shape[0] / 2)', 'rand', '(1.5)'], {}), '((mask.shape[1] / 2, mask.shape[0] / 2), rand, 1.5)\n', (3392, 3443), False, 'import cv2\n'), ((3463, 3518), 'cv2.warpAffine', 'cv2.warpAffine', (['mask', 'M', '(mask.shape[1], mask.shape[0])'], {}), '(mask, M, (mask.shape[1], mask.shape[0]))\n', (3477, 3518), False, 'import cv2\n'), ((3586, 3610), 'numpy.random.randint', 'np.random.randint', (['(5)', '(47)'], {}), '(5, 47)\n', (3603, 3610), True, 'import numpy as np\n'), ((3632, 3663), 'numpy.ones', 'np.ones', (['(rand, rand)', 'np.uint8'], {}), '((rand, rand), np.uint8)\n', (3639, 3663), True, 'import numpy as np\n'), ((3683, 3720), 'cv2.erode', 'cv2.erode', (['mask', 'kernel'], {'iterations': '(1)'}), '(mask, kernel, iterations=1)\n', (3692, 3720), False, 'import cv2\n'), ((3785, 3833), 'numpy.random.randint', 'np.random.randint', (['(0)', '(mask.shape[1] - self.width)'], {}), '(0, mask.shape[1] - self.width)\n', (3802, 3833), True, 'import numpy as np\n'), ((3850, 3899), 'numpy.random.randint', 'np.random.randint', (['(0)', '(mask.shape[0] - self.height)'], {}), '(0, mask.shape[0] - self.height)\n', (3867, 3899), True, 'import numpy as np\n'), ((4270, 4287), 'random.seed', 'seed', (['random_seed'], {}), '(random_seed)\n', (4274, 4287), False, 'from random import randint, seed\n'), ((1941, 1963), 'random.randint', 'randint', (['(1)', 'self.width'], {}), '(1, self.width)\n', (1948, 1963), False, 'from random import randint, seed\n'), ((1965, 1987), 'random.randint', 'randint', (['(1)', 'self.width'], {}), '(1, self.width)\n', (1972, 1987), False, 'from random import randint, seed\n'), ((2009, 2032), 'random.randint', 'randint', (['(1)', 'self.height'], {}), '(1, self.height)\n', (2016, 2032), False, 'from random import randint, seed\n'), ((2034, 2057), 'random.randint', 'randint', (['(1)', 'self.height'], {}), '(1, self.height)\n', (2041, 2057), False, 'from random import randint, seed\n'), ((2259, 2281), 'random.randint', 'randint', (['(1)', 'self.width'], {}), '(1, self.width)\n', (2266, 2281), False, 'from random import randint, seed\n'), ((2283, 2306), 'random.randint', 'randint', (['(1)', 'self.height'], {}), '(1, self.height)\n', (2290, 2306), False, 'from random import randint, seed\n'), ((2499, 2521), 'random.randint', 'randint', (['(1)', 'self.width'], {}), '(1, self.width)\n', (2506, 2521), False, 'from random import randint, seed\n'), ((2523, 2546), 'random.randint', 'randint', (['(1)', 'self.height'], {}), '(1, self.height)\n', (2530, 2546), False, 'from random import randint, seed\n'), ((2568, 2590), 'random.randint', 'randint', (['(1)', 'self.width'], {}), '(1, self.width)\n', (2575, 2590), False, 'from random import randint, seed\n'), ((2592, 2615), 'random.randint', 'randint', (['(1)', 'self.height'], {}), '(1, self.height)\n', (2599, 2615), False, 'from random import randint, seed\n'), ((2641, 2656), 'random.randint', 'randint', (['(3)', '(180)'], {}), '(3, 180)\n', (2648, 2656), False, 'from random import randint, seed\n'), ((2658, 2673), 'random.randint', 'randint', (['(3)', '(180)'], {}), '(3, 180)\n', (2665, 2673), False, 'from random import randint, seed\n'), ((2675, 2690), 'random.randint', 'randint', (['(3)', '(180)'], {}), '(3, 180)\n', (2682, 2690), False, 'from random import randint, seed\n'), ((1097, 1122), 'os.listdir', 'os.listdir', (['self.filepath'], {}), '(self.filepath)\n', (1107, 1122), False, 'import os\n'), ((3200, 3251), 'numpy.random.choice', 'np.random.choice', (['self.mask_files', '(1)'], {'replace': '(False)'}), '(self.mask_files, 1, replace=False)\n', (3216, 3251), True, 'import numpy as np\n')] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 28 10:58:32 2020
@author: seba
"""
# Prueba con serial
# Recordar instalar la libreria serial
#pip install pyserial
#from serial import Serial
#python -m serial.tools.list_ports # Hace una lista de los puertos
import serial
import time
import numpy as np
import sys
import math
import statistics as st
puerto='/dev/ttyUSB0'
COM_base='com1'
COM_rover='com1'
COM_rb='com2'
debug=0
# ################################################## Declaracion de funciones
def timer_print(tiempo):
k=0
while k<tiempo:
k=k+1
time.sleep(1)
print('\r '),
print('\rquedan: ' + str(tiempo-k)+' s'),
sys.stdout.flush()
print('\r')
def DEBUGUEANDO(encabezado,dato):
if debug==1:
print(encabezado+dato)
def Config_puerto():
ser=serial.Serial()
ser.baudrate=9600
ser.port=puerto
ser.timeout=1
ser.open()
time.sleep(1)
if ser.isOpen()==False:
print('Error, no se abrio el puerto serial')
return ser
def Ver_inbuffer(ser):
banderas=np.zeros((3,1))
while ser.in_waiting>0:
dato=ser.read_until()
if dato=='<OK\r\n':
banderas[0]=1 # OK recibido
elif dato=='[COM1]\r\n' or dato=='[COM1]':
banderas[1]=1
elif dato=='[COM2]\r\n' or dato=='[COM2]' :
banderas[1]=2
elif dato=='\r\n':# no hacer nada
banderas[2]=-1
else:
if debug:
print('Dato_rx: '+dato)
if banderas[0]==1:
print('comando enviado correctamente al puerto COM'+str(int(banderas[1])))
return banderas
def check_ok(banderas):
if banderas[0]==1: #and banderas[1]!=0:
return True
return False
def Env_comando(ser,comando,check=True):
# Con solo (CR), ver pag 190/650
while True:
comando2=comando+'\r'
ser.write(comando2)
DEBUGUEANDO('comando enviado: ',comando2)
time.sleep(1)
banderas=Ver_inbuffer(ser)
aux=check_ok(banderas)
if check==False or aux==True:
return comando2,banderas
return comando2,banderas
def Config_puente(ser):
# Puente en rover
# ## Conectar la PC en COM1, la radio en el COM2 y en la base conectar la radio en el COM2
#INTERFACEMODE [port] rxtype txtype [responses]
# INTERFACEMODE COM1 TCOM2 NONE OFF
aux='t'+COM_rover
Env_comando(ser,'interfacemode ' + COM_rb +' ' + aux + ' none') # com2 como transceiver
# Si lo configuras bidireccional no lo podes resetear.
# aux='t'+COM_rb
# Env_comando(ser,'interfacemode ' + COM_rb +' ' + aux + ' none') # com1 como transceiver
# Env_comando(ser,'interfacemode com1 tcom2 none')
def send_base(ser,comando,check=True):
# Comando send, funciona pag 190/650
cabecera='send '+COM_rb+' "'
comando=cabecera+comando + '"'
comando=Env_comando(ser,comando,check)
return comando
def conv_latddmm2d(numero):
d=math.trunc(numero/100)
return d+(numero-d*100)/60
def GPSQI(argument):# GPS quality indicator
# Ver pag 320/650 del manual:
# GPS Quality indicator
# 0 = fix not available or invalid
# 1 = GPS fix
# 2 = C/A differential GPS, OmniSTAR HP, OmniSTAR XP, OmniSTAR VBS, or CDGPS
# 4 = RTK fixed ambiguity solution (RT2), see also Table 90 on page 530
# 5 = RTK floating ambiguity solution (RT20),OmniSTAR HP or OmniSTAR XP
# 6 = Dead reckoning mode
# 7 = Manual input mode (fixed position)
# 8 = Simulator mode
# 9 = WAAS
tabla={
0: " fix not available or invalid",
1: " GPS fix",
2: " C / A differential GPS, OmniSTAR HP, OmniSTAR XP, OmniSTAR VBS, or CDGPS",
3: " no hay info de este indicador",
4: " RTK fixed ambiguity solution (RT2), see also Table 90 on page 530",
5: " RTK floating ambiguity solution (RT20),OmniSTAR HP or OmniSTAR XP",
6: " Dead reckoning mode",
7: " Manual input mode (fixed position)",
8: " Simulator mode",
9: " WAAS",
}
return tabla.get(argument,"nada")
def resetear(ser,disp):
Ver_inbuffer(ser) # lee el buffer para limpiarlo.
if disp=='base': # resetea
send_base(ser,'freset command',False)
print('reseteando base...')
if disp=='rover': # resetea
Env_comando(ser,'freset command',False)
print('reseteando rover...')
t0=time.time()
t1=t0
while t1<t0+30:
banderas=Ver_inbuffer(ser)
if banderas[1]!=0:
print('reseteo exitoso de '+ disp)
return True
t1=time.time()
print('Error: no se puede confirmar el reseteo')
return False
def config_base(ser):
send_base(ser,'log gpggartk ontime 0.5') # loggea la base para que envie su posicion
Config_puente(ser)
print('Esperando estabilizacion... (60segs)')
k=0
while k<60:
k=k+1
time.sleep(1)
Lectura=ser.read_until()
Lectura=Lectura.split(',')
if Lectura=="": # por si no hay datos
Lectura='None'
print('\rquedan: ' + str(60-k) + ' s Lectura actual '+ str(Lectura)),
sys.stdout.flush()
print('\nComienza la lectura de datos')
t0=time.time() # tiempo actual
t1=t0
N=10
timeout=100
Historial=np.zeros((5,N))
k=-1
vacio=0
while t1<t0+timeout and k<N-1:
time.sleep(0.5)
t1=time.time()
if ser.in_waiting>0:
k=1+k
Lectura=ser.read_until()
Lectura=Lectura.split(',')
print(Lectura)
if Lectura[6]=='0': # por si no hay datos
k=k-1
vacio=vacio+1
print('Lectura invalida')
else:
if k<=(N-1) and Lectura[6]=='1':
print('Lectura valida')
# Ver pag 320/650 del manual:
Historial[0][k]=-conv_latddmm2d(float(Lectura[2])) #lat
Historial[1][k]=-conv_latddmm2d(float(Lectura[4])) #lon
Historial[2][k]=float(Lectura[9]) # altura
Historial[3][k]=float(Lectura[7]) # num de satelites
Historial[4][k]=float(Lectura[8]) # precision
else:
print('Error: k= '+str(k)+'; GPS Quality indicator='+ GPSQI(float(Lectura[6])))
if t1>=t0+timeout:
print('Error: TimeOut, verificar conexiones')
print('Reseteando rover y Base...')
resetear(ser,'rover')
resetear(ser,'base')
else:
# Buscar la mejor lectura
Lat=st.median(Historial[0,:])
Long=st.median(Historial[1,:])
Height=st.median(Historial[2,:])
print('posicion fijada en: ' + str(Lat) + ' Lat '+str(Long)+' Long '+str(Height)+' altura ')
print('desvios: '+str(st.stdev(Historial[0,:]))+' std lat '+str(st.stdev(Historial[1,:]))+' std long '+str(st.stdev(Historial[2,:]))+' std Height ')
# Resetear base y configurarla como rtk con la posicion obtenida anteriormente
resetear(ser,'rover')
resetear(ser,'base')
timer_print(30)
send_base(ser,'fix position '+str(Lat)+' '+str(Long)+' '+str(Height))
send_base(ser,'log rtcm3 ontime 10') # base station parameters
send_base(ser,'log rtcm22 ontime 10 1') # extended base station parameters
send_base(ser,'log rtcm1819 ontime 1') # raw measurements
send_base(ser,'log rtcm31 ontime 2') # GLONASS diferential correction
send_base(ser,'log rtcm32 ontime 2') # GLONASS base station parameters
send_base(ser,'log rtcm1 ontime 5') # differential GPS correction
send_base(ser,'interfacemode novatel rtcm on',False)
# Ver_inbuffer(ser)
#Lat=-31.5424768
#Long=-68.5441024
#Height=640.0
return
# ############################################### Inicio del programa
if __name__=='__main__':
while True:
# Limpio estado anterior
print('$#####################INICIANDO')
ser=Config_puerto()
print('reseteando los sistemas...')
resetear(ser,'rover')
resetear(ser,'base')
timer_print(30)
config_base(ser)
Env_comando(ser,'interfacemode '+COM_rb+' rtcm none') # Config RX
Env_comando(ser,'log gpggartk ontime 0.1') # loggea la base para que envie su posicion
t0=time.time() # tiempo actual
t1=t0
while t1<t0+60*2:
if ser.in_waiting>0:
Lectura=ser.read_until()
Lectura=Lectura.split(',')
print(Lectura)
if float(Lectura[6])==4 or float(Lectura[6])==5:
print('RTK configurado correctamente; GPS Quality indicator='+ GPSQI(float(Lectura[6])))
t0=time.time()# para que no salga del loop
t1=time.time()
print('$#################### Ocurrio algun error, no se pudo configurar el GPS con RTK, reiniciando...')
def pirulo2():
resetear(ser,'base')
Config_puente(ser)
send_base(ser,'fix position '+str(Lat)+' '+str(Long)+' '+str(Height))
send_base(ser,'log rtcm3 ontime 10') # base station parameters
send_base(ser,'log rtcm22 ontime 10 1') # extended base station parameters
send_base(ser,'log rtcm1819 ontime 1') # raw measurements
send_base(ser,'log rtcm31 ontime 2') # GLONASS diferential correction
send_base(ser,'log rtcm32 ontime 2') # GLONASS base station parameters
send_base(ser,'log rtcm1 ontime 5') # differential GPS correction
send_base(ser,'interfacemode '+COM_base +' novatel rtcm on',False)
Ver_inbuffer(ser)
send_base(ser,'fix position '+str(Lat)+' '+str(Long)+' '+str(Height))
send_base(ser,'log '+COM_base +' rtcm3 ontime 10') # base station parameters
send_base(ser,'log '+COM_base +' rtcm22 ontime 10 1') # extended base station parameters
send_base(ser,'log '+COM_base +' rtcm1819 ontime 1') # raw measurements
send_base(ser,'log '+COM_base +' rtcm31 ontime 2') # GLONASS diferential correction
send_base(ser,'log '+COM_base +' rtcm32 ontime 2') # GLONASS base station parameters
send_base(ser,'log '+COM_base +' rtcm1 ontime 5') # differential GPS correction
# send_base(ser,'interfacemode '+COM_base +' none rtcm on',False)
send_base(ser,'interfacemode '+COM_base +' novatel rtcm on',False) | [
"serial.Serial",
"statistics.median",
"statistics.stdev",
"numpy.zeros",
"time.time",
"time.sleep",
"sys.stdout.flush",
"math.trunc"
] | [((904, 919), 'serial.Serial', 'serial.Serial', ([], {}), '()\n', (917, 919), False, 'import serial\n'), ((999, 1012), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1009, 1012), False, 'import time\n'), ((1146, 1162), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (1154, 1162), True, 'import numpy as np\n'), ((3141, 3165), 'math.trunc', 'math.trunc', (['(numero / 100)'], {}), '(numero / 100)\n', (3151, 3165), False, 'import math\n'), ((4887, 4898), 'time.time', 'time.time', ([], {}), '()\n', (4896, 4898), False, 'import time\n'), ((5746, 5757), 'time.time', 'time.time', ([], {}), '()\n', (5755, 5757), False, 'import time\n'), ((5823, 5839), 'numpy.zeros', 'np.zeros', (['(5, N)'], {}), '((5, N))\n', (5831, 5839), True, 'import numpy as np\n'), ((608, 621), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (618, 621), False, 'import time\n'), ((734, 752), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (750, 752), False, 'import sys\n'), ((2116, 2129), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2126, 2129), False, 'import time\n'), ((5083, 5094), 'time.time', 'time.time', ([], {}), '()\n', (5092, 5094), False, 'import time\n'), ((5417, 5430), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (5427, 5430), False, 'import time\n'), ((5667, 5685), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (5683, 5685), False, 'import sys\n'), ((5903, 5918), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (5913, 5918), False, 'import time\n'), ((5930, 5941), 'time.time', 'time.time', ([], {}), '()\n', (5939, 5941), False, 'import time\n'), ((7137, 7163), 'statistics.median', 'st.median', (['Historial[0, :]'], {}), '(Historial[0, :])\n', (7146, 7163), True, 'import statistics as st\n'), ((7176, 7202), 'statistics.median', 'st.median', (['Historial[1, :]'], {}), '(Historial[1, :])\n', (7185, 7202), True, 'import statistics as st\n'), ((7217, 7243), 'statistics.median', 'st.median', (['Historial[2, :]'], {}), '(Historial[2, :])\n', (7226, 7243), True, 'import statistics as st\n'), ((8941, 8952), 'time.time', 'time.time', ([], {}), '()\n', (8950, 8952), False, 'import time\n'), ((9414, 9425), 'time.time', 'time.time', ([], {}), '()\n', (9423, 9425), False, 'import time\n'), ((9355, 9366), 'time.time', 'time.time', ([], {}), '()\n', (9364, 9366), False, 'import time\n'), ((7459, 7484), 'statistics.stdev', 'st.stdev', (['Historial[2, :]'], {}), '(Historial[2, :])\n', (7467, 7484), True, 'import statistics as st\n'), ((7416, 7441), 'statistics.stdev', 'st.stdev', (['Historial[1, :]'], {}), '(Historial[1, :])\n', (7424, 7441), True, 'import statistics as st\n'), ((7374, 7399), 'statistics.stdev', 'st.stdev', (['Historial[0, :]'], {}), '(Historial[0, :])\n', (7382, 7399), True, 'import statistics as st\n')] |
# -*- coding: utf-8 -*-
# https://gist.github.com/mikalv/3947ccf21366669ac06a01f39d7cff05
# http://cv-tricks.com/tensorflow-tutorial/save-restore-tensorflow-models-quick-complete-tutorial/
import tensorflow as tf
import numpy as np
import os, sys
import re
import collections
#set hyperparameters
max_len = 40
step = 10
num_units = 128
learning_rate = 0.001
batch_size = 200
epoch = 50
temperature = 0.8
SAVE_PATH = '/home/frankzl/trash'
if not os.path.exists(SAVE_PATH):
os.mkdir(SAVE_PATH)
def tokens(text):
"""
Get all words from corpus
"""
text = re.sub(r'[0-9]+', '', text)
return re.findall(r'\w+', text.lower())
WORDS = tokens(open('RilkeBig.txt').read())
WORD_COUNTS = collections.Counter(WORDS)
def edits0(word):
"""
Return all strings that are zero edits away (i.e. the word itself).
"""
return{word}
def edits1(word):
"""
Return all strings that are one edits away.
"""
alphabet = 'abcdefghijklmnopqrstuvwxyzäüö'
def splits(word):
"""
return a list of all possible pairs
that the input word is made of
"""
return [(word[:i], word[i:]) for i in range(len(word)+1)]
pairs = splits(word)
deletes = [a+b[1:] for (a,b) in pairs if b]
transposes = [a+b[1]+b[0]+b[2:] for (a,b) in pairs if len(b) >1]
replaces = [a+c+b[1:] for (a,b) in pairs for c in alphabet if b]
inserts = [a+c+b for (a,b) in pairs for c in alphabet]
return(set(deletes + transposes + replaces + inserts))
def edits2(word):
"""
return all strings that are two edits away.
"""
return {e2 for e1 in edits1(word) for e2 in edits1(e1)}
def known(words):
return {w for w in words if w in WORD_COUNTS}
def correct(word):
candidates = (known(edits0(word)) or
known(edits1(word)) or
known(edits2(word)) or
[word])
return max(candidates, key=WORD_COUNTS.get)
def correct_match(match):#
"""
spell-correct word in match,
and perserve upper/lower/title case
"""
word = match.group()
def case_of(text):
return(str.upper if text.isupper() else
str.lower if text.islower() else
str.title if text.istitle() else
str)
return case_of(word)(correct(word.lower()))
def correct_text_generic(text):
"""
correct all words in text
"""
return re.sub('[a-zA-Z]+', correct_match, text)
def read_data(file_name):
'''
open and read text file
'''
text = open(file_name, 'r').read()
return text.lower()
def featurize(text):
'''
featurize the text to train and target dataset
'''
unique_chars = list(set(text))
len_unique_chars = len(unique_chars)
input_chars = []
output_char = []
for i in range(0, len(text) - max_len, step):
input_chars.append(text[i:i+max_len])
output_char.append(text[i+max_len])
train_data = np.zeros((len(input_chars), max_len, len_unique_chars))
target_data = np.zeros((len(input_chars), len_unique_chars))
for i , each in enumerate(input_chars):
for j, char in enumerate(each):
train_data[i, j, unique_chars.index(char)] = 1
target_data[i, unique_chars.index(output_char[i])] = 1
return train_data, target_data, unique_chars, len_unique_chars
def rnn(x, weight, bias, len_unique_chars):
'''
define rnn cell and prediction
'''
x = tf.transpose(x, [1, 0, 2])
x = tf.reshape(x, [-1, len_unique_chars])
x = tf.split(x, max_len, 0)
cell = tf.contrib.rnn.BasicLSTMCell(num_units, forget_bias=1.0)
outputs, states = tf.contrib.rnn.static_rnn(cell, x, dtype=tf.float32)
prediction = tf.matmul(outputs[-1], weight) + bias
return prediction
def sample(predicted):
'''
helper function to sample an index from a probability array
'''
exp_predicted = np.exp(predicted/temperature)
predicted = exp_predicted / np.sum(exp_predicted)
probabilities = np.random.multinomial(1, predicted, 1)
return probabilities
def run(train_data, target_data, unique_chars, len_unique_chars):
'''
main run function
'''
x = tf.placeholder("float", [None, max_len, len_unique_chars], name ="Input")
y = tf.placeholder("float", [None, len_unique_chars], name = "Output")
weight = tf.Variable(tf.random_normal([num_units, len_unique_chars]))
bias = tf.Variable(tf.random_normal([len_unique_chars]))
prediction = rnn(x, weight, bias, len_unique_chars)
softmax = tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y)
cost = tf.reduce_mean(softmax)
optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate).minimize(cost)
import glob
if glob.glob(SAVE_PATH + '*.meta'):
tf.reset_default_graph()
imported_meta = tf.train.import_meta_graph(glob.glob(SAVE_PATH + '*.meta')[0])
sess=tf.Session()
imported_meta.restore(sess, tf.train.latest_checkpoint(SAVE_PATH))
print (" restoring an old model and training it further ")
x = sess.graph.get_tensor_by_name("Input:0")
y = sess.graph.get_tensor_by_name("Output:0")
prediction = tf.get_collection("prediction")[0]
optimizer = tf.get_collection("optimizer")[0]
else:
init_op = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init_op)
print("Building model from scratch!")
saver = tf.train.Saver(max_to_keep=4, keep_checkpoint_every_n_hours=1, save_relative_paths=True)
num_batches = int(len(train_data)/batch_size)
for i in range(epoch):
print ("----------- Epoch {0}/{1} -----------".format(i+1, epoch))
count = 0
for _ in range(num_batches):
train_batch, target_batch = train_data[count:count+batch_size], target_data[count:count+batch_size]
count += batch_size
sess.run([optimizer] ,feed_dict={x:train_batch, y:target_batch})
tf.add_to_collection("optimizer", optimizer)
#get on of training set as seed
seed = train_batch[:1:]
#to print the seed 40 characters
seed_chars = ''
for each in seed[0]:
seed_chars += unique_chars[np.where(each == max(each))[0][0]]
print ("Seed:", seed_chars)
#predict next 500 characters
for i in range(500):
if i > 0:
remove_fist_char = seed[:,1:,:]
seed = np.append(remove_fist_char, np.reshape(probabilities, [1, 1, len_unique_chars]), axis=1)
predicted = sess.run([prediction], feed_dict = {x:seed})
tf.add_to_collection("prediction", prediction)
predicted = np.asarray(predicted[0]).astype('float64')[0]
probabilities = sample(predicted)
predicted_chars = unique_chars[np.argmax(probabilities)]
seed_chars += predicted_chars
print ('Result:', seed_chars)
print ('Corrected:', correct_text_generic(seed_chars))
ui = True
while ui == True:
seed_chars = input("Enter a seed: ")
for i in range(280):
if i > 0:
remove_fist_char = seed[:, 1:, :]
seed = np.append(remove_fist_char, np.reshape(probabilities, [1, 1, len_unique_chars]), axis=1)
predicted = sess.run([prediction], feed_dict={x: seed})
predicted = np.asarray(predicted[0]).astype('float64')[0]
probabilities = sample(predicted)
predicted_chars = unique_chars[np.argmax(probabilities)]
seed_chars += predicted_chars
# print 'Result:', seed_chars
print ('Corrected:', correct_text_generic(seed_chars))
action = input("Do you want to try another seed? (yes=y, no=n)?: ")
if action != "y":
ui = False
save_path = saver.save(sess, SAVE_PATH, global_step=10)
print("Model saved in file: %s" % save_path)
sess.close()
tf.reset_default_graph()
if __name__ == "__main__":
text = read_data('RilkeLyrik.txt')
text = re.sub(r'[0-9]+', '', text)
train_data, target_data, unique_chars, len_unique_chars = featurize(text)
tf.reset_default_graph()
run(train_data, target_data, unique_chars, len_unique_chars)
| [
"os.mkdir",
"numpy.sum",
"numpy.argmax",
"tensorflow.get_collection",
"tensorflow.reset_default_graph",
"numpy.random.multinomial",
"tensorflow.reshape",
"tensorflow.train.RMSPropOptimizer",
"tensorflow.matmul",
"tensorflow.contrib.rnn.static_rnn",
"tensorflow.train.latest_checkpoint",
"numpy.... | [((708, 734), 'collections.Counter', 'collections.Counter', (['WORDS'], {}), '(WORDS)\n', (727, 734), False, 'import collections\n'), ((450, 475), 'os.path.exists', 'os.path.exists', (['SAVE_PATH'], {}), '(SAVE_PATH)\n', (464, 475), False, 'import os, sys\n'), ((481, 500), 'os.mkdir', 'os.mkdir', (['SAVE_PATH'], {}), '(SAVE_PATH)\n', (489, 500), False, 'import os, sys\n'), ((577, 603), 're.sub', 're.sub', (['"""[0-9]+"""', '""""""', 'text'], {}), "('[0-9]+', '', text)\n", (583, 603), False, 'import re\n'), ((2403, 2443), 're.sub', 're.sub', (['"""[a-zA-Z]+"""', 'correct_match', 'text'], {}), "('[a-zA-Z]+', correct_match, text)\n", (2409, 2443), False, 'import re\n'), ((3447, 3473), 'tensorflow.transpose', 'tf.transpose', (['x', '[1, 0, 2]'], {}), '(x, [1, 0, 2])\n', (3459, 3473), True, 'import tensorflow as tf\n'), ((3482, 3519), 'tensorflow.reshape', 'tf.reshape', (['x', '[-1, len_unique_chars]'], {}), '(x, [-1, len_unique_chars])\n', (3492, 3519), True, 'import tensorflow as tf\n'), ((3528, 3551), 'tensorflow.split', 'tf.split', (['x', 'max_len', '(0)'], {}), '(x, max_len, 0)\n', (3536, 3551), True, 'import tensorflow as tf\n'), ((3564, 3620), 'tensorflow.contrib.rnn.BasicLSTMCell', 'tf.contrib.rnn.BasicLSTMCell', (['num_units'], {'forget_bias': '(1.0)'}), '(num_units, forget_bias=1.0)\n', (3592, 3620), True, 'import tensorflow as tf\n'), ((3643, 3695), 'tensorflow.contrib.rnn.static_rnn', 'tf.contrib.rnn.static_rnn', (['cell', 'x'], {'dtype': 'tf.float32'}), '(cell, x, dtype=tf.float32)\n', (3668, 3695), True, 'import tensorflow as tf\n'), ((3898, 3929), 'numpy.exp', 'np.exp', (['(predicted / temperature)'], {}), '(predicted / temperature)\n', (3904, 3929), True, 'import numpy as np\n'), ((4002, 4040), 'numpy.random.multinomial', 'np.random.multinomial', (['(1)', 'predicted', '(1)'], {}), '(1, predicted, 1)\n', (4023, 4040), True, 'import numpy as np\n'), ((4180, 4252), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, max_len, len_unique_chars]'], {'name': '"""Input"""'}), "('float', [None, max_len, len_unique_chars], name='Input')\n", (4194, 4252), True, 'import tensorflow as tf\n'), ((4262, 4326), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, len_unique_chars]'], {'name': '"""Output"""'}), "('float', [None, len_unique_chars], name='Output')\n", (4276, 4326), True, 'import tensorflow as tf\n'), ((4535, 4603), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'logits': 'prediction', 'labels': 'y'}), '(logits=prediction, labels=y)\n', (4574, 4603), True, 'import tensorflow as tf\n'), ((4615, 4638), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['softmax'], {}), '(softmax)\n', (4629, 4638), True, 'import tensorflow as tf\n'), ((4750, 4781), 'glob.glob', 'glob.glob', (["(SAVE_PATH + '*.meta')"], {}), "(SAVE_PATH + '*.meta')\n", (4759, 4781), False, 'import glob\n'), ((5466, 5558), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(4)', 'keep_checkpoint_every_n_hours': '(1)', 'save_relative_paths': '(True)'}), '(max_to_keep=4, keep_checkpoint_every_n_hours=1,\n save_relative_paths=True)\n', (5480, 5558), True, 'import tensorflow as tf\n'), ((7974, 7998), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (7996, 7998), True, 'import tensorflow as tf\n'), ((8077, 8103), 're.sub', 're.sub', (['"""[0-9]+"""', '""""""', 'text'], {}), "('[0-9]+', '', text)\n", (8083, 8103), False, 'import re\n'), ((8187, 8211), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (8209, 8211), True, 'import tensorflow as tf\n'), ((3713, 3743), 'tensorflow.matmul', 'tf.matmul', (['outputs[-1]', 'weight'], {}), '(outputs[-1], weight)\n', (3722, 3743), True, 'import tensorflow as tf\n'), ((3960, 3981), 'numpy.sum', 'np.sum', (['exp_predicted'], {}), '(exp_predicted)\n', (3966, 3981), True, 'import numpy as np\n'), ((4354, 4401), 'tensorflow.random_normal', 'tf.random_normal', (['[num_units, len_unique_chars]'], {}), '([num_units, len_unique_chars])\n', (4370, 4401), True, 'import tensorflow as tf\n'), ((4426, 4462), 'tensorflow.random_normal', 'tf.random_normal', (['[len_unique_chars]'], {}), '([len_unique_chars])\n', (4442, 4462), True, 'import tensorflow as tf\n'), ((4791, 4815), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (4813, 4815), True, 'import tensorflow as tf\n'), ((4916, 4928), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4926, 4928), True, 'import tensorflow as tf\n'), ((5319, 5352), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (5350, 5352), True, 'import tensorflow as tf\n'), ((5368, 5380), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (5378, 5380), True, 'import tensorflow as tf\n'), ((4655, 4709), 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (4680, 4709), True, 'import tensorflow as tf\n'), ((4965, 5002), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['SAVE_PATH'], {}), '(SAVE_PATH)\n', (4991, 5002), True, 'import tensorflow as tf\n'), ((5200, 5231), 'tensorflow.get_collection', 'tf.get_collection', (['"""prediction"""'], {}), "('prediction')\n", (5217, 5231), True, 'import tensorflow as tf\n'), ((5255, 5285), 'tensorflow.get_collection', 'tf.get_collection', (['"""optimizer"""'], {}), "('optimizer')\n", (5272, 5285), True, 'import tensorflow as tf\n'), ((5996, 6040), 'tensorflow.add_to_collection', 'tf.add_to_collection', (['"""optimizer"""', 'optimizer'], {}), "('optimizer', optimizer)\n", (6016, 6040), True, 'import tensorflow as tf\n'), ((6653, 6699), 'tensorflow.add_to_collection', 'tf.add_to_collection', (['"""prediction"""', 'prediction'], {}), "('prediction', prediction)\n", (6673, 6699), True, 'import tensorflow as tf\n'), ((4867, 4898), 'glob.glob', 'glob.glob', (["(SAVE_PATH + '*.meta')"], {}), "(SAVE_PATH + '*.meta')\n", (4876, 4898), False, 'import glob\n'), ((6859, 6883), 'numpy.argmax', 'np.argmax', (['probabilities'], {}), '(probabilities)\n', (6868, 6883), True, 'import numpy as np\n'), ((7549, 7573), 'numpy.argmax', 'np.argmax', (['probabilities'], {}), '(probabilities)\n', (7558, 7573), True, 'import numpy as np\n'), ((6511, 6562), 'numpy.reshape', 'np.reshape', (['probabilities', '[1, 1, len_unique_chars]'], {}), '(probabilities, [1, 1, len_unique_chars])\n', (6521, 6562), True, 'import numpy as np\n'), ((7261, 7312), 'numpy.reshape', 'np.reshape', (['probabilities', '[1, 1, len_unique_chars]'], {}), '(probabilities, [1, 1, len_unique_chars])\n', (7271, 7312), True, 'import numpy as np\n'), ((6724, 6748), 'numpy.asarray', 'np.asarray', (['predicted[0]'], {}), '(predicted[0])\n', (6734, 6748), True, 'import numpy as np\n'), ((7414, 7438), 'numpy.asarray', 'np.asarray', (['predicted[0]'], {}), '(predicted[0])\n', (7424, 7438), True, 'import numpy as np\n')] |
# coding=utf-8
# date: 2018/12/24, 15:27
# name: smz
import numpy as np
import tensorflow as tf
from LinearModel.modules.model import TumorModel
from LinearModel.configuration.options import opts
def TumorModelTrain():
data_X = np.load("../data/train_data_X.npy")
data_Y = np.load("../data/train_data_Y.npy") # [0., 0., 1., 1., ...]
data_Y = np.expand_dims(data_Y, axis=1)
tumor_model = TumorModel(opts)
tumor_model.build()
num_samples = len(data_X)
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
for epoch in range(opts["epochs"]):
start_pointer = 0
while start_pointer < num_samples:
batch_X = data_X[start_pointer:start_pointer+opts["batch_size"]]
batch_Y = data_Y[start_pointer:start_pointer+opts["batch_size"]]
feed_dict = {tumor_model.inputs: batch_X, tumor_model.labels:batch_Y}
loss_value, global_step_value, _, merge_string = sess.run(fetches=[tumor_model.loss, tumor_model.global_step,
tumor_model.train_step, tumor_model.merge_op],
feed_dict=feed_dict)
print("epoch:%d, step:%d, loss:%.6f"%(epoch, global_step_value, loss_value))
start_pointer += start_pointer + opts["batch_size"]
tumor_model.writer.add_summary(merge_string, global_step=global_step_value)
if (epoch + 1) % 5 == 0:
tumor_model.saver.save(sess, opts["checkpoints_dir"]+"tumor_model", global_step=tumor_model.global_step)
if __name__ == "__main__":
TumorModelTrain() | [
"numpy.load",
"tensorflow.global_variables_initializer",
"LinearModel.modules.model.TumorModel",
"tensorflow.Session",
"numpy.expand_dims"
] | [((235, 270), 'numpy.load', 'np.load', (['"""../data/train_data_X.npy"""'], {}), "('../data/train_data_X.npy')\n", (242, 270), True, 'import numpy as np\n'), ((284, 319), 'numpy.load', 'np.load', (['"""../data/train_data_Y.npy"""'], {}), "('../data/train_data_Y.npy')\n", (291, 319), True, 'import numpy as np\n'), ((359, 389), 'numpy.expand_dims', 'np.expand_dims', (['data_Y'], {'axis': '(1)'}), '(data_Y, axis=1)\n', (373, 389), True, 'import numpy as np\n'), ((409, 425), 'LinearModel.modules.model.TumorModel', 'TumorModel', (['opts'], {}), '(opts)\n', (419, 425), False, 'from LinearModel.modules.model import TumorModel\n'), ((490, 502), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (500, 502), True, 'import tensorflow as tf\n'), ((527, 560), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (558, 560), True, 'import tensorflow as tf\n')] |
# -*- coding: utf-8 -*-
"""
Created on 2020.05.19
@author: <NAME>, <NAME>, <NAME>, <NAME>
Code based on:
"""
import numpy as np
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
class RandomForest:
"""docstring for RandomForest"""
def __init__(self, model_configs, task_type='Regression', logger=None):
self.model_configs = model_configs
self.max_features = model_configs['max_features']
self.n_estimators = model_configs['n_estimators']
self.min_samples_leaf = model_configs['min_samples_leaf']
self.class_weight = model_configs['class_weight']
self.random_seed = model_configs['random_seed']
self.task_type = task_type
np.random.seed(seed=self.random_seed)
self.logger = logger
self.setup_model()
def setup_model(self):
if self.task_type == 'Classification':
self.model = RandomForestClassifier(n_estimators=self.n_estimators,
max_features=self.max_features,
min_samples_leaf=self.min_samples_leaf,
n_jobs=8,
class_weight=self.class_weight,
random_state=self.random_seed,
oob_score=False,
verbose=1)
elif self.task_type == 'Regression':
self.model = RandomForestRegressor(n_estimators=self.n_estimators,
max_features=self.max_features,
min_samples_leaf=self.min_samples_leaf,
n_jobs=8,
random_state=self.random_seed,
oob_score=False,
verbose=1)
else:
raise self.logger.error("Task type Error!")
def fit_model(self, train_loader):
self.model.fit(train_loader[0], train_loader[1])
def predict(self, test_loader):
if self.task_type == 'Classification':
return self.model.predict_proba(test_loader[0])
else:
return self.model.predict(test_loader[0]) | [
"sklearn.ensemble.RandomForestClassifier",
"numpy.random.seed",
"sklearn.ensemble.RandomForestRegressor"
] | [((726, 763), 'numpy.random.seed', 'np.random.seed', ([], {'seed': 'self.random_seed'}), '(seed=self.random_seed)\n', (740, 763), True, 'import numpy as np\n'), ((921, 1161), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': 'self.n_estimators', 'max_features': 'self.max_features', 'min_samples_leaf': 'self.min_samples_leaf', 'n_jobs': '(8)', 'class_weight': 'self.class_weight', 'random_state': 'self.random_seed', 'oob_score': '(False)', 'verbose': '(1)'}), '(n_estimators=self.n_estimators, max_features=self.\n max_features, min_samples_leaf=self.min_samples_leaf, n_jobs=8,\n class_weight=self.class_weight, random_state=self.random_seed,\n oob_score=False, verbose=1)\n', (943, 1161), False, 'from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor\n'), ((1556, 1759), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'n_estimators': 'self.n_estimators', 'max_features': 'self.max_features', 'min_samples_leaf': 'self.min_samples_leaf', 'n_jobs': '(8)', 'random_state': 'self.random_seed', 'oob_score': '(False)', 'verbose': '(1)'}), '(n_estimators=self.n_estimators, max_features=self.\n max_features, min_samples_leaf=self.min_samples_leaf, n_jobs=8,\n random_state=self.random_seed, oob_score=False, verbose=1)\n', (1577, 1759), False, 'from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor\n')] |
"""
Geographical measurement and analysis
Provides Point, Line, and Polygon classes, and their Multipart equivalents,
with methods for simple measurements such as distance, area, and direction.
"""
from __future__ import division
import math
import itertools
import numbers
import numpy as np
from coordstring import CoordString
from .decorators import cache_decorator
from ._geojson import GeoJSONOutMixin
from ._shp import ShapefileOutMixin
from .table import Table
from .utilities import _reproject, _flatten, _as_nested_lists
from .rtree import RTree
from .quadtree import QuadTree
from . import vectorgeo as _cvectorgeo
from . import dateline as _cdateline
from . import intersection as _cintersection
from . import convexhull as _cconvexhull
from . import contains as _ccontains
from . import table
from .. import geodesy
from ..crs import Cartesian, CartesianCRS, GeographicalCRS
from ..crs import SphericalEarth
from ..errors import GeometryError, CRSError
class Geometry(object):
""" Abstract base class for all geometry types """
def __init__(self, properties=None, crs=Cartesian):
self.crs = crs
if isinstance(properties, dict):
self.properties = properties
elif properties is not None:
raise TypeError("properties must be a dictionary")
else:
self.properties = {}
self._cache = {}
self._geotype = None
return
class Rotatable(object):
def rotate(self, thetad, origin=(0, 0)):
""" Rotate rank 2 geometry.
Parameters
----------
thetad : float
degreesof rotation
origin : tuple of two floats, optional
pivot for rotation (default (0, 0))
"""
ct = math.cos(thetad*math.pi / 180.0)
st = math.sin(thetad*math.pi / 180.0)
x, y = origin
M = np.array([[ct, -st, -x*ct + y*st + x],
[st, ct, -x*st - y*ct + y]], dtype=np.float64)
return self.apply_transform(M)
class Point(Geometry, Rotatable, GeoJSONOutMixin, ShapefileOutMixin):
""" Point object instantiated with:
Parameters
----------
coords : 2-tuple or 3-tuple
properties : dict, optional
geometry specific metadata (default None)
crs : karta.crs.CRS, optional
coordinate system for geometry (default Cartesian)
"""
def __init__(self, coords, properties=None, **kwargs):
if len(coords) not in (2, 3):
raise TypeError("Point coordinates must have length 2 or 3")
super(Point, self).__init__(properties=properties, **kwargs)
self._vertex = tuple(coords)
self._geotype = "Point"
return
def __getitem__(self, idx):
return self._vertex[idx]
def __repr__(self):
return 'Point({0}, {1})'.format(self.x, self.y)
def __eq__(self, other):
try:
return (tuple(self._vertex) == tuple(other._vertex)) and \
(self.properties == other.properties) and \
(self.crs == other.crs)
except AttributeError:
return False
def __neq__(self, other):
return not (self == other)
def __hash__(self):
ht = hash(self._geotype)
hd = hash(self._vertex)
hc = hash(str(self.crs))
return ht + (hd << 1) + hd + hc
def __add__(self, other):
return Multipoint([self._vertex, other.vertex(self.crs)], crs=self.crs)
@property
def __geo_interface__(self):
p = self.properties.copy()
p["_karta_proj4"] = self.crs.get_proj4()
return {"type": "Feature",
"geometry": self.geomdict,
"properties": p}
@property
def geomdict(self):
return {"type" : "Point", "coordinates" : self._vertex}
@property
def x(self):
return self._vertex[0]
@property
def y(self):
return self._vertex[1]
@property
def z(self):
return self._vertex[2]
def vertex(self, crs=None):
""" Return the Point vertex as a tuple. """
if crs is None or crs==self.crs:
return self._vertex
else:
return _reproject((self.x, self.y), self.crs, crs)
def azimuth(self, other, projected=True):
""" Returns the compass azimuth from self to other in degrees, measured
clockwise with north at 0.
Parameters
----------
other : Point
second point defining direction
projected : bool, optional
If True and self.crs is a ProjectedCRS, return the azimuth in the
projected cooridnate system. If False, return the geodetic azimuth.
if self.crs is a GeographicalCRS, result is always geodetic and this
option has no effect.
Returns
-------
float
Notes
-----
- Return value is NaN if points are coincident
- If CRS is geographical, returns azimuth as defined by the CRS
instance.
"""
az = np.nan
if projected and not isinstance(self.crs, GeographicalCRS):
x0, y0 = self.vertex()[:2]
x1, y1 = other.vertex(self.crs)[:2]
if (x0 != x1) or (y0 != y1):
az = 90.0 - math.atan2(y1-y0, x1-x0)*180.0/math.pi
az = (az+180) % 360 - 180
else:
lon0, lat0 = self.crs.project(self.x, self.y, inverse=True)
lon1, lat1 = other.crs.project(other.x, other.y, inverse=True)
if (lon0 != lon1) or (lat0 == lat1):
az, _, _ = self.crs.inverse(lon0, lat0, lon1, lat1)
return az
def apply_transform(self, M):
""" Apply an affine transform given by matrix *M* to data and return a
new Point.
Parameters
----------
M : ndarray
2x3 or 3x4 affine transformation matrix, representing a
two-dimensional or a three-dimensional transformation,
respectively.
Returns
-------
Geometry
Notes
-----
The output coordinates are computed as::
xnew x
= M * y
ynew 1
or::
xnew x
ynew = M * y
znew z
1
"""
if M.shape == (2, 3):
N = np.zeros((3, 4), dtype=np.float64)
N[:2,:2] = M[:,:2]
N[:2,3] = M[:,2]
N[2, 2] = 1.0
M = N
elif M.shape != (3, 4):
raise ValueError("invalid affine matrix size: {0}".format(M.shape))
if len(self._vertex) == 2:
old_vertex = (self.x, self.y, 0)
else:
old_vertex = self._vertex
x = old_vertex[0]*M[0,0] + old_vertex[1]*M[0,1] + old_vertex[2]*M[0,2] + M[0,3]
y = old_vertex[0]*M[1,0] + old_vertex[1]*M[1,1] + old_vertex[2]*M[1,2] + M[1,3]
if len(self._vertex) == 2:
return Point((x, y), properties=self.properties, crs=self.crs)
else:
z = old_vertex[0]*M[2,0] + old_vertex[1]*M[2,1] + old_vertex[2]*M[2,2] + M[2,3]
return Point((x, y, z), properties=self.properties, crs=self.crs)
def walk(self, distance, azimuth, projected=True):
""" Returns the point reached when moving in a given direction for
a given distance from a specified starting location.
Parameters
----------
distance : float
distance to shift point
azimuth: float
shift azimuth (clockwise with north at 0)
projected: bool, optional
If True and self.crs is a ProjectedCRS, return the compute new
position using the projected cooridnate system. If False, return
the geodetically correct new position. if self.crs is a
GeographicalCRS, result is always geodetic and this option has no
effect.
"""
if projected and not isinstance(self.crs, GeographicalCRS):
maz = -(azimuth+90)*math.pi/180
maz = (90-azimuth) * math.pi/180
x = self.x + distance*math.cos(maz)
y = self.y + distance*math.sin(maz)
else:
x1, y1 = self.crs.project(self.x, self.y, inverse=True)
x2, y2, _ = self.crs.forward(x1, y1, azimuth, distance, radians=False)
x, y = self.crs.project(x2, y2)
return Point((x, y), properties=self.properties, crs=self.crs)
def distance(self, other, projected=True):
""" Returns a distance to another Point. Distances is computed in one
of three ways:
1. If *self* is in a geographical coordinate system, *other* is inverse
projected to geographical coordinates if necessary, and the geodetic
distance is computed on the ellipsoid of *self*.
2. If *projected* is `True`, *other* is projected to the same
coordinate system as *self* and flat-earth distance is computed.
3. If *projected is `False`, both *self* and *other* are inverse
projected to geographical coordinates, and the geodetic distance is
computed on the ellipsoid of *self*.
If the coordinate system is geographical and a third (z) coordinate
exists, it is assumed to have the same units as the real-world
horizontal coordinates (e.g. meters).
Parameters
----------
other : Point
point to compute distance to
projected : bool, optional
If True and self.crs is a ProjectedCRS, return the flat distance in
the projected coordinate system. If False, return the ellipsoidal
distance. if self.crs is a GeographicalCRS, result is always
ellipsoidal/geodetic and this switch is ignored.
Returns
-------
float
Notes
-----
- If CRS is Geographical, returns distance as computed by the CRS
instance (e.g. ellipsoidal or spherical).
"""
if isinstance(self.crs, GeographicalCRS):
lon0, lat0 = self.x, self.y
lon1, lat1 = other.crs.project(other.x, other.y, inverse=True)
_, _, dist = self.crs.inverse(lon0, lat0, lon1, lat1)
elif projected:
x0, y0 = self._vertex[:2]
x1, y1 = other.vertex(self.crs)[:2]
dist = math.sqrt((x1-x0)*(x1-x0) + (y1-y0)*(y1-y0))
else:
lon0, lat0 = self.crs.project(self.x, self.y, inverse=True)
lon1, lat1 = other.crs.project(other.x, other.y, inverse=True)
_, _, dist = self.crs.inverse(lon0, lat0, lon1, lat1)
if 3 == len(self._vertex) == len(other._vertex):
dz = self.z - other.z
dist = math.sqrt(dist*dist + dz*dz)
return dist
def shift(self, shift_vector):
""" Shift point in space.
Parameters
----------
shift_vector : iterable
vector with length equal to Geometry rank defining the shift
inplace : bool
whether shift should be in place (default False)
"""
if len(shift_vector) != 2:
raise ValueError("shift vector must be of the form (xshift, yshift)")
if len(self._vertex) == 2:
vertex = (self.x+shift_vector[0], self.y+shift_vector[1])
else:
vertex = (self.x+shift_vector[0], self.y+shift_vector[1], self.z)
return Point(vertex, properties=self.properties, crs=self.crs)
class MultiVertexBase(Geometry):
def __init__(self, vertices, ring=False, **kwargs):
super(MultiVertexBase, self).__init__(**kwargs)
if isinstance(vertices, CoordString):
self._vertices = vertices
return
if hasattr(vertices, "__next__"):
vertices = list(vertices)
if len(vertices) == 0:
self._vertices = CoordString([], ring=ring)
elif not isinstance(vertices[0], Point):
self._vertices = CoordString(_flatten(vertices), ring=ring)
else:
self._vertices = CoordString([point._vertex for point in vertices], ring=ring)
if "crs" not in kwargs:
self.crs = vertices[0].crs
return
def __eq__(self, other):
try:
return (self._geotype == other._geotype) and \
(len(self._vertices) == len(other._vertices)) and \
np.all(np.equal(self._vertices, other._vertices)) and \
(self.properties == other.properties) and \
(self.crs == other.crs)
except (AttributeError, TypeError, ValueError):
return False
def __neq__(self, other):
return ~(self == other)
def __hash__(self):
ht = hash(self._geotype)
hd = hash(self._vertices.asarray().tostring())
hc = hash(str(self.crs))
return ht + (hd << 1) + hd + hc
def __getitem__(self, key):
if isinstance(key, (int, np.int64)):
return Point(self._vertices[key], properties=self.properties, crs=self.crs)
elif isinstance(key, slice):
start, stop, stride = key.indices(len(self._vertices))
verts = self._vertices.slice(start, stop, stride)
return type(self)(verts, properties=self.properties, crs=self.crs)
else:
raise KeyError('index must be integer or slice object')
def __iter__(self):
return (self[i] for i in range(len(self)))
def __len__(self):
return len(self._vertices)
class MultiVertexMixin(object):
def coords(self, crs=None):
""" Return horizontal coordinate lists.
Parameters
----------
crs : karta.CRS, optional
coordinate system of output vertices
"""
x, y = self._vertices.vectors()[:2]
if crs is not None and (crs != self.crs):
x, y = _reproject((x, y), self.crs, crs)
return x, y
def vertices(self, crs=None, drop_z=False):
""" Return vertices as an array.
Parameters
----------
crs : karta.CRS, optional
coordinate system of output vertices
drop_z : bool, optional
whether to discard third dimension for rank 3 geometries
"""
if (crs is None) or (crs is self.crs):
if not drop_z:
return self._vertices.asarray()
else:
return self._vertices.asarray()[:,:2]
else:
v = self._vertices.vectors(drop_z=drop_z)
vt = _reproject(v[:2], self.crs, crs)
if len(v) == 3:
vt.append(v[2])
return np.vstack(vt).T
@cache_decorator("bbox")
def bbox(self, crs=None):
""" Return the bounding box.
Parameters
----------
crs : karta.CRS
coordinate system of output bounding box
Returns
-------
tuple
(xmin, ymin, xmax, ymax)
Notes
-----
- If CRS is Geographical, returns bbox computed using a spherical
approximation.
"""
if crs is not None and (crs != self.crs):
cs = CoordString(list(zip(
*self.crs.transform(crs, *self._vertices.vectors(drop_z=True)))))
else:
cs = self._vertices
crs = self.crs
if isinstance(crs, GeographicalCRS):
return _cdateline.bbox(cs)
else:
return _cvectorgeo.bbox(cs)
@cache_decorator("extent")
def extent(self, crs=None):
""" Calculate geometry extent.
Parameters
----------
crs : karta.CRS
coordinate system of output
Returns
-------
tuple
(xmin, xmax, ymin, ymax)
"""
bb = self.bbox(crs=crs)
return bb[0], bb[2], bb[1], bb[3]
def _bbox_overlap(self, other):
""" Whether bounding box overlaps with that of another Geometry. """
reg0 = self.bbox()
reg1 = other.bbox(crs=self.crs)
return (reg0[0] <= reg1[2] and reg1[0] <= reg0[2] and
reg0[1] <= reg1[3] and reg1[1] <= reg0[3])
def apply_transform(self, M):
""" Apply an affine transform given by matrix *M* to data and return a
new geometry.
Parameters
----------
M : ndarray
2x3 or 3x4 affine transformation matrix, representing a
two-dimensional or a three-dimensional transformation,
respectively.
Returns
-------
Geometry
Notes
-----
The output coordinates are computed as::
xnew x
= M * y
ynew 1
or::
xnew x
ynew = M * y
znew z
1
"""
if len(self._vertices) == 0:
raise ValueError("cannot transform zero length geometry")
if M.shape == (2, 3):
N = np.zeros((3, 4), dtype=np.float64)
N[:2,:2] = M[:,:2]
N[:2,3] = M[:,2]
N[2, 2] = 1.0
M = N
elif M.shape != (3, 4):
raise ValueError("invalid affine matrix size: {0}".format(M.shape))
xy = self.vertices()
if xy.shape[1] == 2:
rank = 2
xy = np.hstack([xy, np.zeros((len(xy), 1), dtype=np.float64)])
else:
rank = 3
xy = np.hstack([xy, np.ones((len(xy), 1), dtype=np.float64)])
new_xy = np.dot(M, xy.T).T
if rank == 2:
new_xy = new_xy[:,:2]
if hasattr(self, "data"):
return type(self)(new_xy, properties=self.properties, data=self.data, crs=self.crs)
else:
return type(self)(new_xy, properties=self.properties, crs=self.crs)
def shift(self, shift_vector):
""" Shift geometry in space.
Parameters
----------
shift_vector : iterable
vector with length equal to Geometry rank defining the shift
"""
if len(shift_vector) != 2:
raise ValueError("shift vector must be of the form (xshift, yshift)")
trans_mat = np.array([[1, 0, shift_vector[0]],
[0, 1, shift_vector[1]]], dtype=np.float64)
return self.apply_transform(trans_mat)
def _subset(self, idxs):
""" Return a subset defined by indices. """
vertices = [self._vertices[i] for i in idxs]
if hasattr(self, "data"):
data = Table(data=[self.data._data[i] for i in idxs], fields=self.data.fields)
return type(self)(vertices, properties=self.properties, data=data, crs=self.crs)
else:
return type(self)(vertices, properties=self.properties, crs=self.crs)
def flat_distances_to(self, pt):
""" Return the "flat Earth" distance from each vertex to a point. """
A = self._vertices.asarray()
P = np.tile(np.array(pt._vertex), (A.shape[0], 1))
d = np.sqrt(np.sum((A-P)**2, 1))
return d
def distances_to(self, pt):
""" Return the distance from each vertex to a point. """
d = [pt.distance(a) for a in self]
return np.array(d)
def nearest_vertex_to(self, point):
""" Returns the index of the vertex that is nearest to a point. If two
points are equidistant, only one will be returned.
Parameters
----------
point : Point
target point
Returns
-------
int
"""
distances = self.distances_to(point)
idx = np.argmin(distances)
return idx
def any_within_poly(self, poly):
""" Return whether any vertices are inside *poly* """
for pt in self:
if poly.contains(pt):
return True
return False
def convex_hull(self):
""" Return a Polygon representing the convex hull.
Notes
-----
- If CRS is Geographical, returns hull computed using a spherical
approximation. Failure may occur if the vertices are not in euclidian
position.
"""
if isinstance(self.crs, GeographicalCRS):
indices = _cconvexhull.convexhull_sph(self._vertices)
else:
indices = _cconvexhull.convexhull(self._vertices)
return Polygon([self._vertices[i] for i in indices], crs=self.crs)
class ConnectedMultiVertexMixin(MultiVertexMixin):
@cache_decorator("bbox")
def bbox(self, crs=None):
""" Dateline-aware bbox for geometries consisting of connected
vertices.
Parameters
----------
crs : karta.CRS
coordinate system of output bounding box
Returns
-------
tuple
(xmin, ymin, xmax, ymax)
"""
if crs is not None and (crs != self.crs):
cs = CoordString(list(zip(
*self.crs.transform(crs, *self._vertices.vectors(drop_z=True)))))
else:
cs = self._vertices
crs = self.crs
if isinstance(crs, GeographicalCRS):
bbox = _cdateline.bbox(cs)
else:
bbox = super(ConnectedMultiVertexMixin, self).bbox(crs=crs)
return bbox
@property
@cache_decorator("length")
def length(self):
""" Returns the length of the line/boundary.
Notes
-----
- If CRS is Geographical, returns length computed using distance
provided by the CRS instance.
"""
if isinstance(self.crs, GeographicalCRS):
lon, lat = self.vertices()[0][:2]
d = 0.0
for xy in self.vertices()[1:]:
d += self.crs.inverse(lon, lat, xy[0], xy[1])[2]
lon = xy[0]
lat = xy[1]
return d
else:
return _cvectorgeo.length(self._vertices)
@property
def segments(self):
""" Returns an generator of adjacent line segments. """
return (self._subset((i,i+1)) for i in range(len(self)-1))
@property
def segment_tuples(self):
""" Returns an generator of adjacent line segments as coordinate tuples. """
return ((self._vertices[i], self._vertices[i+1])
for i in range(len(self._vertices)-1))
def intersects(self, other):
""" Return whether an intersection exists with another geometry.
Parameters
----------
other : Geometry
another geometry with multiple connected vertices
Notes
-----
- If CRS is Geographical, uses a spherical approximation.
"""
if _cintersection.bboxes_overlap(self.bbox(), other.bbox(self.crs)):
if isinstance(self.crs, GeographicalCRS):
return _cintersection.intersects_sph(self._vertices, other._vertices)
else:
return _cintersection.intersects(self._vertices, other._vertices)
else:
return False
def intersections(self, other, keep_duplicates=False):
""" Return the intersections with another geometry as a Multipoint.
Parameters
----------
other : Geometry
another geometry with multipl connected vertices
keep_duplicates : bool, optional
whether to retain duplicate intersections [default False]
Notes
-----
- If CRS is Geographical, uses a spherical approximation.
"""
if isinstance(self.crs, CartesianCRS):
interx = _cintersection.all_intersections(self._vertices, other._vertices)
if not keep_duplicates:
interx = list(set(interx))
return Multipoint(interx, crs=self.crs)
else:
interx = (geodesy.intersection_spherical(a, b)
for a in self.segment_tuples
for b in other.segment_tuples)
if not keep_duplicates:
interx = list(set(interx))
return Multipoint(interx, crs=self.crs)
def _nearest_to_point(self, point):
""" Return a tuple of the shortest distance on the geometry boundary to
a point, and the vertex at that location.
If necessary, project coordinates to the local coordinate system.
Parameters
----------
point : Point
Notes
-----
- If CRS is Geographical, uses distance defined by the CRS instance.
"""
ptvertex = point.vertex(crs=self.crs)
segments = zip(self._vertices.slice(0, -1).asarray(),
self._vertices.slice(1, 0).asarray())
if isinstance(self.crs, CartesianCRS):
func = _cvectorgeo.pt_nearest_planar
def func(seg):
return _cvectorgeo.pt_nearest_planar(ptvertex[0], ptvertex[1],
seg[0][0], seg[0][1], seg[1][0], seg[1][1])
else:
fwd = self.crs.forward
inv = self.crs.inverse
def func(seg):
return _cvectorgeo.pt_nearest_proj(fwd, inv, ptvertex,
seg[0], seg[1], tol=0.01)
point_dist = map(func, segments)
min_point = None
min_dist = -1.0
for i, (point, dist) in enumerate(point_dist):
if dist < min_dist or (i == 0):
min_point = point
min_dist = dist
return min_dist, min_point
def shortest_distance_to(self, pt):
""" Return the shortest distance from any position on the geometry
boundary to a point.
Parameters
----------
point : Point
Notes
-----
- If CRS is Geographical, uses distance defined by the CRS instance.
"""
return self._nearest_to_point(pt)[0]
def nearest_on_boundary(self, point):
""" Returns the position on the geometry boundary that is nearest to
a point. If two points are equidistant, only one will be returned.
Parameters
----------
point : Point
Notes
-----
- If CRS is Geographical, uses distance defined by the CRS instance.
"""
_, minpt = self._nearest_to_point(point)
return Point(minpt, crs=self.crs)
def within_distance(self, point, distance):
""" Test whether a point is within *distance* geometry.
Parameters
----------
point : Point
distance : float
Notes
-----
- If CRS is Geographical, uses distance defined by the CRS instance.
"""
return all(distance >= seg.shortest_distance_to(point)
for seg in self.segments)
def crosses_dateline(self):
""" Return a boolean that indicates whether any segment crosses the
dateline.
"""
if not isinstance(self.crs, GeographicalCRS):
raise CRSError("Dateline detection only defined for geographical "
"coordinates")
def _seg_crosses_dateline(seg):
a, b = seg[0], seg[1]
return (sign(a.x) != sign(b.x)) and (abs(a.x-b.x) > 180.0)
return any(_seg_crosses_dateline(seg) for seg in self.segments)
class Line(MultiVertexBase, ConnectedMultiVertexMixin, Rotatable, GeoJSONOutMixin, ShapefileOutMixin):
""" Line composed of connected vertices.
Parameters
----------
coords : list of 2-tuples or 3-tuples
vertex coordinates
properties : dict, optional
geometry specific metadata
crs : karta.CRS, optional
(default Cartesian)
"""
def __init__(self, vertices, **kwargs):
""" Partial init function that creates a metadata attribute.
"""
super(Line, self).__init__(vertices, **kwargs)
self._geotype = "Line"
return
def __add__(self, other):
return Multiline([self._vertices, other.vertices(self.crs)],
crs=self.crs)
@property
def __geo_interface__(self):
p = self.properties.copy()
p["_karta_proj4"] = self.crs.get_proj4()
return {"type": "Feature",
"geometry": self.geomdict,
"properties": p}
@property
def geomdict(self):
return {"type" : "LineString",
"bbox" : self.bbox(),
"coordinates" : _as_nested_lists(self._vertices)}
def extend(self, other):
""" Combine two lines, provided that that the data formats are similar.
"""
if len(self._vertices[0]) != len(other._vertices[0]):
raise ValueError("Rank mismatch ({0} != "
"{1})".format(self._vertices.shape[1],
other._vertices.shape[1]))
if self._geotype != other._geotype:
raise TypeError("Geometry mismatch ({0} != "
"{1})".format(self._geotype, other._geotype))
self._vertices = np.vstack([self._vertices, other._vertices])
self._cache = {}
return self
def cumulength(self):
""" Returns the cumulative length by vertex.
Notes
-----
- If CRS is Geographical, uses distance defined by the CRS instance.
"""
d = [0.0]
pta = self[0]
for ptb in self[1:]:
d_ = pta.distance(ptb)
d.append(d_ + d[-1])
pta = ptb
return d
def to_points(self, dx):
""" Return equally spaced Point instances along line.
Parameters
----------
dx : float
spacing of points
"""
remainder = 0
pt0 = self[0]
vertices = [pt0.vertex()]
for seg in self.segments:
pos = 0
az = seg[0].azimuth(seg[1])
while pos < seg.length:
distance_to_endpt = pt0.distance(seg[1])
if distance_to_endpt >= dx:
pt1 = pt0.walk(dx - remainder, az)
pos += dx - remainder
vertices.append(pt1.vertex())
remainder = 0
pt0 = pt1
else:
remainder = distance_to_endpt
pos = seg.length
pt0 = seg[1]
return Multipoint(vertices, crs=self.crs)
def to_npoints(self, n):
""" Return *n* equally spaced Point instances along line.
Parameters
----------
n : int
number of points to return
"""
segments = self.segments
Ltotal = self.cumulength()[-1]
step = Ltotal / float(n-1)
step_remaining = step
vertices = [self[0].vertex()]
x = 0.0
pos = self[0]
seg = next(segments)
seg_remaining = seg.displacement()
while x < Ltotal-1e-8:
direction = seg[0].azimuth(seg[1])
if step_remaining <= seg_remaining:
pos = pos.walk(step_remaining, direction)
x += step_remaining
seg_remaining -= step_remaining
step_remaining = step
vertices.append(pos.vertex())
seg._vertices[0] = np.array(pos._vertex, dtype=np.float64)
else:
pos = seg[1]
x += seg_remaining
step_remaining -= seg_remaining
seg = next(segments, seg)
seg_remaining = seg.displacement()
if len(vertices) == n-1:
vertices.append(seg[-1].vertex())
return Multipoint(vertices, crs=self.crs)
def displacement(self):
""" Returns the distance between the first and last vertex.
Notes
-----
- If CRS is Geographical, uses distance defined by the CRS instance.
"""
return self[0].distance(self[-1])
def to_polygon(self):
""" Returns a polygon. """
return Polygon(self._vertices, properties=self.properties, crs=self.crs)
class Polygon(MultiVertexBase, Rotatable, ConnectedMultiVertexMixin, GeoJSONOutMixin, ShapefileOutMixin):
""" Polygon, composed of a closed sequence of vertices.
Parameters
----------
coords : list of 2-tuples or 3-tuples
vertex coordinates
subs : list of Polygon instances, optional
sub-polygons [default None]
properties : dict, optional
geometry specific metadata
crs : karta.CRS, optional
(default Cartesian)
"""
def __init__(self, vertices, subs=None, **kwargs):
""" Partial init function that creates a metadata attribute.
"""
super(Polygon, self).__init__(vertices, ring=True, **kwargs)
self._geotype = "Polygon"
if subs is not None:
self.subs = list(subs)
else:
self.subs = []
return
def __getitem__(self, key):
if isinstance(key, slice):
start, stop, stride = key.indices(len(self._vertices))
if len(self) != ((stop - start) // stride):
return Line(self._vertices.slice(start, stop, stride),
properties=self.properties,
crs=self.crs)
return super(Polygon, self).__getitem__(key)
def __add__(self, other):
return Multipolygon([[self._vertices], [other.vertices(self.crs)]],
crs=self.crs)
@property
def __geo_interface__(self):
p = self.properties.copy()
p["_karta_proj4"] = self.crs.get_proj4()
return {"type": "Feature",
"geometry": self.geomdict,
"properties": p}
@property
def vertices_ring(self):
""" Return vertices as a closed ring """
# inefficient implementation
vertices = [xy for xy in self._vertices]
vertices.append(vertices[0])
return CoordString(vertices)
@property
def geomdict(self):
coords = [_as_nested_lists(self.vertices_ring)]
for geom in self.subs:
coords.append(_as_nested_lists(geom.vertices_ring))
return {"type" : "Polygon",
"bbox" : self.bbox(),
"coordinates" : coords}
def _subset(self, idxs):
""" Return a subset defined by index in *idxs*. """
vertices = [self._vertices[i] for i in idxs]
subset = Line(vertices, properties=self.properties, crs=self.crs)
return subset
def isclockwise(self):
""" Return whether polygon winds clockwise around its interior. """
s = sum((seg[1][0] - seg[0][0]) * (seg[1][1] + seg[0][1])
for seg in self.segment_tuples)
return s > 0
def ispolar(self, pole=None):
""" Return True if polygon contains one pole. If the polygon contains
neither or both poles, returns False.
Parameters
----------
pole : Point, optional
(default point on a sphere at 0 longitude, 90 latitude)
"""
if not isinstance(self.crs, GeographicalCRS):
raise CRSError("ispolar defined only for geographical CRS")
if pole is None:
pole = Point((0, 90), crs=SphericalEarth)
lon0 = geodesy.reduce_deg(self[-1]._vertex[0])
sum_angle = 0.0
for vertex in self._vertices:
lon1 = geodesy.reduce_deg(vertex[0])
if _cdateline.crosses_dateline(lon0, lon1):
sum_angle += 360.0 + lon1 - lon0
else:
sum_angle += lon1 - lon0
lon0 = lon1
return True if abs(sum_angle) > 1e-4 else False
@property
def segments(self):
""" Returns a generator of adjacent line segments.
"""
L = len(self._vertices)
return itertools.chain((self._subset((i,i+1)) for i in range(len(self)-1)),
(self._subset((L-1,0)),))
@property
def segment_tuples(self):
""" Returns a generator of adjacent line segments as coordinate
tuples. """
return ((self._vertices[i-1], self._vertices[i])
for i in range(len(self._vertices)))
@property
def length(self):
raise AttributeError("%s instance has no attribute 'length'" % type(self))
@property
def perimeter(self):
""" Return the perimeter of the polygon. If there are sub-polygons,
their perimeters are added recursively.
Notes
-----
- If CRS is Geographical, uses distance defined by the CRS instance.
"""
return sum(seg.length for seg in self.segments) + \
sum([p.perimeter for p in self.subs])
@property
def area(self):
""" Return the two-dimensional area of the polygon, excluding
sub-polygons.
Notes
-----
- If CRS is Geographical, uses either a spherical or an ellipsoidal
calculation.
"""
if isinstance(self.crs, GeographicalCRS):
major_axis = self.crs.ellipsoid.a
minor_axis = self.crs.ellipsoid.b
area = 0.0
if major_axis == minor_axis: # Sphere
for seg in self.segment_tuples:
x1, y1 = seg[0]
x2, y2 = seg[1]
area += geodesy.spherical_area(major_axis, x1, y1, x2, y2)
else:
for seg in self.segment_tuples:
x1, y1 = seg[0]
x2, y2 = seg[1]
area += geodesy.ellipsoidal_area(major_axis, minor_axis,
x1, y1, x2, y2)
else:
# Cartesian coordinate systems
x, y = self.coords()
x0 = np.min(x)
area = (0.5*(x[0] + x[-1]) - x0) * (y[0] - y[-1])
area += sum((0.5*(x[i+1]+x[i]) - x0) * (y[i+1] - y[i]) for i in range(len(x)-1))
return abs(area) - sum(sub.area for sub in self.subs)
@property
def centroid(self):
""" Return Polygon centroid as a Point, ignoring sub-polygons. """
x, y = self.coords()
A = 0.5 * sum(x[i]*y[i+1] - x[i+1]*y[i] for i in range(-1, len(self)-1))
cx = sum((x[i] + x[i+1]) * (x[i]*y[i+1] - x[i+1]*y[i])
for i in range(-1, len(self)-1)) / (6*A)
cy = sum((y[i] + y[i+1]) * (x[i]*y[i+1] - x[i+1]*y[i])
for i in range(-1, len(self)-1)) / (6*A)
return Point((cx, cy), properties=self.properties, crs=self.crs)
def contains(self, point):
""" Returns True if point is inside or on the boundary of the polygon,
and False otherwise. Uses a crossing number scheme.
Notes
-----
- When the polygon is polar in a geographical coordinate system, a less
efficient algorithm is used. For better performance, consider
projecting to an appropriate coordinate system such as NSIDCNorth or
NSIDCSouth beforehand.
- Otherwise, a planar algorithm is used.
"""
x, y = point.vertex(crs=self.crs)[:2]
if isinstance(self.crs, GeographicalCRS) and self.ispolar():
return _ccontains.contains_proj(x, y, self._vertices, self.crs) \
and not any(p.contains(point) for p in self.subs)
else:
return _ccontains.contains(x, y, self._vertices) and \
not any(p.contains(point) for p in self.subs)
def to_line(self):
""" Returns a self-closing polyline. Discards sub-polygons. """
v = self._vertices + self._vertices[0]
return Line(v, properties=self.properties, crs=self.crs)
class Multipart(Geometry):
""" Base for objects consisting of multiple singular types. """
def __init__(self, inputs, data=None, **kwargs):
super(Multipart, self).__init__(**kwargs)
if isinstance(data, Table):
self.data = data
elif data is None or len(data) == 0:
self.data = Table(size=len(self._vertices))
else:
for k, v in data.items():
if len(v) != len(inputs):
raise ValueError("length of `data` member '{k}' ({n}) not "
"equal to length of inputs ({m})".format(
k=k, n=len(v), m=len(inputs)))
self.data = Table(data)
return
@property
def d(self):
return table.Indexer(self.data)
def __eq__(self, other):
try:
return (self._geotype == other._geotype) and \
(len(self._vertices) == len(other._vertices)) and \
np.all(np.equal(self._vertices, other._vertices)) and \
(self.data == other.data) and \
(self.properties == other.properties) and \
(self.crs == other.crs)
except (AttributeError, TypeError):
return False
def __neq__(self, other):
return ~(self == other)
def __hash__(self):
ht = hash(self._geotype)
hd = hash(self._vertices.asarray().tostring())
hc = hash(str(self.crs))
return ht + (hd << 1) + hd + hc
def __contains__(self, other):
if other in (part for part in self):
return True
else:
return False
def __len__(self):
return len(self._vertices)
class Multipoint(Multipart, Rotatable, MultiVertexMixin, GeoJSONOutMixin, ShapefileOutMixin):
""" Point cloud with associated attributes.
Parameters
----------
inputs : list
list of 2-tuples, 3-tuples, or Points defining vertices
data : list, dict, Table object, or None
point-specific data [default None]
properties : dict or None
geometry specific data [default None]
crs : karta.crs.CRS subclass
[default Cartesian]
"""
def __init__(self, inputs, build_index=True, **kwargs):
if hasattr(inputs, "__next__") and not isinstance(inputs, CoordString):
inputs = list(inputs)
if isinstance(inputs, CoordString):
self._vertices = inputs
elif len(inputs) == 0:
self._vertices = CoordString([])
elif isinstance(inputs[0], Point):
crs = kwargs.get("crs", inputs[0].crs)
self._vertices = CoordString([point.vertex(crs=crs) for point in inputs])
data = merge_properties([point.properties for point in inputs])
kwargs["data"] = Table(kwargs.get("data", {})).updated(data)
kwargs.setdefault("crs", inputs[0].crs)
else:
self._vertices = CoordString(inputs)
super(Multipoint, self).__init__(inputs, **kwargs)
if build_index:
self.quadtree = QuadTree(self._vertices, leaf_capacity=50)
self._geotype = "Multipoint"
return
def __getitem__(self, key):
if isinstance(key, numbers.Integral):
p = self.d[key]
p.update(self.properties)
return Point(self._vertices[key], properties=p, crs=self.crs)
elif isinstance(key, slice):
start, stop, stride = key.indices(len(self._vertices))
return Multipoint(self._vertices.slice(start, stop, stride),
properties=self.properties,
data=self.d[key], crs=self.crs)
else:
raise KeyError(type(key))
def __setitem__(self, key, value):
if isinstance(key, numbers.Integral):
if hasattr(value, "vertex"):
self._vertices[key] = np.array(value.vertex(self.crs), dtype=np.float64)
row = []
for field in self.data.fields:
row.append(value.properties.get(field, None))
self.data[key] = tuple(row)
else:
if len(value) == len(self._vertices[0]):
verts = self._vertices.asarray()
verts[key] = value
self._vertices = CoordString(verts)
self.data[key] = (None for _ in self.data.fields)
@property
def geomdict(self):
return {"type" : "MultiPoint",
"bbox" : self.bbox(),
"coordinates" : _as_nested_lists(self._vertices)}
@property
def __geo_interface__(self):
p = self.properties.copy()
p["_karta_proj4"] = self.crs.get_proj4()
return {"type": "Feature",
"geometry": self.geomdict,
"properties": p}
@classmethod
def merge(cls, *items, **kwargs):
""" Merge multiple Point and Multipoint instances.
Parameters
----------
*items : Point/Multipolygon instances
crs : CRS object, optional
Returns
-------
Multipoint
"""
if len(items) == 0:
raise ValueError("must provide at least one geometry")
crs = kwargs.get("crs", items[0].crs)
vertices, mappings = [], []
for item in items:
t = getattr(item, "_geotype", None)
if t == "Multipoint":
vertices.append(item.vertices(crs=crs))
mappings.append(item.data)
elif t == "Point":
vertices.append(np.array(item.vertex(crs=crs))[np.newaxis, :])
mappings.append(item.properties)
rankmin = min(arr.shape[1] for arr in vertices)
rankmax = max(arr.shape[1] for arr in vertices)
if rankmin != rankmax:
for i, v in enumerate(vertices):
vertices[i] = v[:,:rankmin]
data = table.merge(mappings)
return Multipoint(np.vstack(vertices), data=data, crs=crs)
def within_radius(self, point, radius):
""" Return subset of Multipoint within a radius. Items on the border
are excluded.
Parameters
----------
point : Point
point to to center filter at
radius : float
maximum distance from *point*
Returns
-------
Multipoint
"""
if hasattr(self, "quadtree"):
candidate_indices = self.quadtree.search_within(point.x-radius,
point.y-radius,
point.x+radius,
point.y+radius)
confirmed_indices = []
for i in candidate_indices:
if point.distance(self[i]) < radius:
confirmed_indices.append(i)
confirmed_indices.sort()
else:
confirmed_indices = [i for i,d in enumerate(self.distances_to(point))
if d < radius]
return self._subset(confirmed_indices)
def within_bbox(self, bbox):
""" Return Multipoint subset that is within a square bounding box
given by (xmin, xymin, xmax, ymax).
"""
if hasattr(self, "quadtree"):
indices = self.quadtree.search_within(*bbox)
indices.sort()
else:
indices = [i for (i, pt) in enumerate(self)
if (bbox[0] < pt.x < bbox[2]) and (bbox[1] < pt.y < bbox[3])]
return self._subset(indices)
def within_polygon(self, poly):
""" Return Multipoint subset that is within a polygon.
"""
if hasattr(self, "quadtree"):
bbox = poly.bbox(crs=self.crs)
candidate_indices = self.quadtree.search_within(*bbox)
confirmed_indices = []
for i in candidate_indices:
if poly.contains(self[i]):
confirmed_indices.append(i)
confirmed_indices.sort()
else:
confirmed_indices = [i for (i, point) in enumerate(self)
if poly.contains(point)]
return self._subset(confirmed_indices)
class MultiVertexMultipartMixin(object):
""" Mix-in class for multipart classes for which it is reasonable to ask
whether member geometries are within or touching a multi-vertex geometry.
E.g.
- "which members touch this Line/Polygon?"
- "which members are contained by this Polygon?"
"""
@cache_decorator("bbox")
def bbox(self, crs=None):
bbs = [part.bbox(crs=crs) for part in self]
xmin = min([bb[0] for bb in bbs])
ymin = min([bb[1] for bb in bbs])
xmax = max([bb[2] for bb in bbs])
ymax = max([bb[3] for bb in bbs])
return (xmin, ymin, xmax, ymax)
@cache_decorator("extent")
def extent(self, crs=None):
bb = self.bbox(crs=crs)
return bb[0], bb[2], bb[1], bb[3]
def apply_transform(self, M):
""" Apply an affine transform given by matrix *M* to data and return a
new Point.
Parameters
----------
M : ndarray
2x3 or 3x4 affine transformation matrix, representing a
two-dimensional or a three-dimensional transformation,
respectively.
Returns
-------
Geometry
Notes
-----
The output coordinates are computed as::
xnew x
= M * y
ynew 1
or::
xnew x
ynew = M * y
znew z
1
"""
if M.shape == (2, 3):
N = np.zeros((3, 4), dtype=np.float64)
N[:2,:2] = M[:,:2]
N[:2,3] = M[:,2]
N[2, 2] = 1.0
M = N
elif M.shape != (3, 4):
raise ValueError("invalid affine matrix size: {0}".format(M.shape))
parts = []
for part in self:
parts.append(part.apply_transform(M))
return type(self)(parts, properties=self.properties, data=self.data, crs=self.crs)
def within_bbox(self, bbox, max_results=-1):
""" Return Multipart geometry representing member geometries that are
contained by a bounding box.
Parameters
----------
bbox : tuple
(xmin, ymin, xmax, ymax)
"""
indices = self.rtree.search_within(bbox, max_results=max_results)
return type(self)([self[i] for i in indices])
def touching_bbox(self, bbox, max_results=-1):
""" Return Multipart geometry representing member geometries that touch
a bounding box.
Parameters
----------
bbox : tuple
(xmin, ymin, xmax, ymax)
"""
indices = self.rtree.search_overlapping(bbox, max_results=max_results)
return type(self)([self[i] for i in indices])
def touching(self, geom):
""" Return a Multipart geometry representing member geometries that
touch a Line or Polygon.
Touching is defined as intersecting a Line or Polygon boundary, or
being contained within a Polygon.
Parameters
----------
geom : Line or Polygon
Returns
-------
"""
indices = self.rtree.search_overlapping(geom.bbox(self.crs))
results = []
if isinstance(geom, Line):
for i in indices:
test_geom = self[i]
if geom.intersects(test_geom):
results.append(test_geom)
elif isinstance(geom, Polygon):
for i in indices:
test_geom = self[i]
pt = test_geom[0]
if geom.contains(pt) or geom.intersects(test_geom):
results.append(test_geom)
else:
raise TypeError("argument must be Line or Polygon")
return type(self)(results)
def within(self, geom):
""" Return a Multipart geometry representing member geometries
contained within a Polygon.
Parameters
----------
geom : Polygon
"""
if not isinstance(geom, Polygon):
raise TypeError("argument must be Polygon")
indices = self.rtree.search_overlapping(geom.bbox(crs=self.crs))
results = []
for i in indices:
test_geom = self[i]
pt = test_geom[0]
if geom.contains(pt) and not geom.intersects(test_geom):
results.append(test_geom)
return type(self)(results)
class Multiline(Multipart, MultiVertexMultipartMixin, GeoJSONOutMixin,
ShapefileOutMixin):
""" Collection of lines with associated attributes.
Parameters
----------
inputs : list
list of lists of 2-tuples, 3-tuples, or Lines defining lines
data : list, dict, Table object, or None
point-specific data [default None]
properties : dict or None
geometry specific data [default None]
crs : karta.crs.CRS subclass
[default Cartesian]
"""
def __init__(self, inputs, build_index=True, **kwargs):
if len(inputs) == 0:
self._vertices = []
elif isinstance(inputs[0], Line):
crs = kwargs.get("crs", inputs[0].crs)
self._vertices = [CoordString(line.vertices(crs=crs))
for line in inputs]
data = merge_properties([line.properties for line in inputs])
kwargs["data"] = Table(kwargs.get("data", {})).updated(data)
kwargs.setdefault("crs", inputs[0].crs)
else:
self._vertices = [CoordString(part) for part in inputs]
super(Multiline, self).__init__(inputs, **kwargs)
if build_index:
self.rtree = RTree(self._vertices)
self._geotype = "Multiline"
return
def __getitem__(self, key):
if isinstance(key, numbers.Integral):
properties = self.d[key]
properties.update(self.properties)
return Line(self._vertices[key], properties=properties, crs=self.crs)
elif isinstance(key, slice):
return Multiline(self._vertices[key], properties=self.properties,
data=self.d[key], crs=self.crs)
else:
raise KeyError(type(key))
@property
def geomdict(self):
return {"type" : "MultiLineString",
"bbox" : self.bbox(),
"coordinates" : _as_nested_lists(self._vertices)}
@property
def __geo_interface__(self):
p = dict(_karta_proj4=self.crs.get_proj4())
return {"type": "Feature",
"geometry": self.geomdict,
"properties": p}
@classmethod
def merge(cls, *items, **kwargs):
""" Merge multiple Line and Multiline instances.
Parameters
----------
*items : Line/Multiline instances
crs : CRS object, optional
Returns
-------
Multiline
"""
if len(items) == 0:
raise ValueError("must provide at least one geometry")
crs = kwargs.get("crs", items[0].crs)
vertices, mappings = [], []
for item in items:
t = getattr(item, "_geotype", None)
if t == "Multiline":
vertices.extend(item.vertices(crs=crs))
mappings.append(item.data)
elif t == "Line":
vertices.append(item.vertices(crs=crs))
mappings.append(item.properties)
rankmin = min(arr.shape[1] for arr in vertices)
rankmax = max(arr.shape[1] for arr in vertices)
if rankmin != rankmax:
for i, v in enumerate(vertices):
vertices[i] = v[:,:rankmin]
data = table.merge(mappings)
return Multiline(vertices, data=data, crs=crs)
def vertices(self, crs=None):
""" Return vertices as a list of arrays.
Parameters
----------
crs : karta.CRS, optional
coordinate system of output vertices
"""
if crs is None or (crs == self.crs):
return [v.asarray() for v in self._vertices]
else:
vertices = []
for line in self._vertices:
line_vertices = [_reproject(v[:2], self.crs, crs) for v in line]
vertices.append(np.array(line_vertices))
return vertices
def coords(self, crs=None):
""" Returns a list of 2xn arrays representing lists of coordinates """
ret = []
for line_vertices in self.vertices(crs=crs):
ret.append(line_vertices.T)
return ret
class Multipolygon(Multipart, MultiVertexMultipartMixin, GeoJSONOutMixin,
ShapefileOutMixin):
""" Collection of polygons with associated attributes.
Parameters
----------
inputs : list
list of Polygons or lists of polygon rings, each consisting of 2-tuples
or 3-tuples
data : list, dict, Table object, or None
point-specific data [default None]
properties : dict or None
geometry specific data [default None]
crs : karta.crs.CRS subclass
[default Cartesian]
"""
def __init__(self, inputs, build_index=True, **kwargs):
if len(inputs) == 0:
self._vertices = []
elif isinstance(inputs[0], Polygon):
crs = kwargs.get("crs", inputs[0].crs)
self._vertices = []
for polygon in inputs:
rings = [CoordString(polygon.vertices(crs=crs))]
for sub in polygon.subs:
rings.append(sub)
self._vertices.append(rings)
data = merge_properties([polygon.properties for polygon in inputs])
kwargs["data"] = Table(kwargs.get("data", {})).updated(data)
kwargs.setdefault("crs", inputs[0].crs)
else:
self._vertices = []
for part in inputs:
rings = [CoordString(ring) for ring in part]
self._vertices.append(rings)
super(Multipolygon, self).__init__(inputs, **kwargs)
if build_index:
self.rtree = RTree([v[0] for v in self._vertices])
self._geotype = "Multipolygon"
return
def __getitem__(self, key):
if isinstance(key, numbers.Integral):
properties = self.d[key]
properties.update(self.properties)
subs = []
for vertices in self._vertices[key][1:]:
subs.append(Polygon(vertices, properties=properties, crs=self.crs))
vertices = self._vertices[key][0]
return Polygon(vertices, subs=subs, properties=properties, crs=self.crs)
elif isinstance(key, slice):
return Multipolygon(self._vertices[key], properties=self.properties,
data=self.d[key], crs=self.crs)
else:
raise KeyError(type(key))
@property
def geomdict(self):
return {"type" : "MultiPolygon",
"bbox" : self.bbox(),
"coordinates" : _as_nested_lists(self.vertices_ring)}
@property
def __geo_interface__(self):
p = dict(_karta_proj4=self.crs.get_proj4())
return {"type": "Feature",
"geometry": self.geomdict,
"properties": p}
@classmethod
def merge(cls, *items, **kwargs):
""" Merge multiple Polygon and Multipolygon instances.
Parameters
----------
*items : Polygon/Multipolygon instances
crs : CRS object, optional
Returns
-------
Multipolygon
"""
if len(items) == 0:
raise ValueError("must provide at least one geometry")
crs = kwargs.get("crs", items[0].crs)
vertices, mappings = [], []
for item in items:
t = getattr(item, "_geotype", None)
if t == "Multipolygon":
vertices.extend(item.vertices(crs=crs))
mappings.append(item.data)
elif t == "Polygon":
v = [item.vertices(crs=crs)]
for sub in item.subs:
v.append(sub.vertices(crs=crs))
vertices.append(v)
mappings.append(item.properties)
rankmin = min(arr.shape[1] for arr in itertools.chain(*vertices))
rankmax = max(arr.shape[1] for arr in itertools.chain(*vertices))
if rankmin != rankmax:
for i, v in enumerate(vertices):
vertices[i] = [ring[:,:imin] for ring in vertices]
data = table.merge(mappings)
return Multipolygon(vertices, data=data, crs=crs)
@property
def vertices_ring(self):
# inefficient implementation
vertices = []
for poly in self._vertices:
rings = []
for ring in poly:
xys = [xy for xy in ring]
xys.append(ring[0])
rings.append(CoordString(xys))
vertices.append(rings)
return vertices
def vertices(self, crs=None):
""" Return vertices as a list of arrays.
Parameters
----------
crs : karta.CRS, optional
coordinate system of output vertices
"""
if crs is None or (crs == self.crs):
vertices = []
for poly_vertices in self._vertices:
vertices.append([v.asarray() for v in poly_vertices])
return vertices
else:
vertices = []
for poly_vertices in self._vertices:
poly = []
for ring_vertices in poly_vertices:
poly.append(np.array([_reproject(v[:2], self.crs, crs)
for v in ring_vertices]))
vertices.append(poly)
return vertices
def coords(self, crs=None):
""" Returns a list of 2xn arrays representing lists of coordinates """
ret = []
for poly_vertices in self.vertices(crs=crs):
poly = []
for ring_vertices in poly_vertices:
poly.append(ring_vertices.T)
ret.append(poly)
return ret
def sign(a):
""" Return the sign of *a* """
if a == 0.0:
return 1
else:
return a/abs(a)
def merge_properties(prop_sets):
""" Perform an inner join on a list of dictionaries """
inner_keys = set.intersection(*[set(p.keys()) for p in prop_sets])
data = {}
for key in inner_keys:
data[key] = [p[key] for p in prop_sets]
return data
def affine_matrix(mpa, mpb):
""" Compute the affine transformation matrix that best matches two
Multipoint geometries using a least squares fit.
Output is relative to the coordinate system of the first geometry, if they
differ.
Parameters
----------
mpa, mpb : Multipoint
matching length collection of control points to match
"""
if len(mpa) != len(mpb):
raise GeometryError("Input geometries must have identical length")
vecp = np.asarray(mpb.vertices(mpa.crs)).ravel()
A = np.empty([2*len(mpa), 6], dtype=np.float64)
for i, (x, y) in enumerate(mpa.vertices()):
A[2*i:2*i+2,:] = np.kron(np.eye(2), [x, y, 1])
M, res, rank, singvals = np.linalg.lstsq(A, vecp, rcond=None)
return np.reshape(M, [2, 3])
| [
"numpy.sum",
"numpy.linalg.lstsq",
"math.sqrt",
"numpy.eye",
"math.atan2",
"numpy.zeros",
"math.sin",
"numpy.argmin",
"numpy.equal",
"numpy.min",
"numpy.array",
"math.cos",
"numpy.reshape",
"numpy.dot",
"coordstring.CoordString",
"itertools.chain",
"numpy.vstack"
] | [((63355, 63391), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['A', 'vecp'], {'rcond': 'None'}), '(A, vecp, rcond=None)\n', (63370, 63391), True, 'import numpy as np\n'), ((63403, 63424), 'numpy.reshape', 'np.reshape', (['M', '[2, 3]'], {}), '(M, [2, 3])\n', (63413, 63424), True, 'import numpy as np\n'), ((1746, 1780), 'math.cos', 'math.cos', (['(thetad * math.pi / 180.0)'], {}), '(thetad * math.pi / 180.0)\n', (1754, 1780), False, 'import math\n'), ((1792, 1826), 'math.sin', 'math.sin', (['(thetad * math.pi / 180.0)'], {}), '(thetad * math.pi / 180.0)\n', (1800, 1826), False, 'import math\n'), ((1859, 1956), 'numpy.array', 'np.array', (['[[ct, -st, -x * ct + y * st + x], [st, ct, -x * st - y * ct + y]]'], {'dtype': 'np.float64'}), '([[ct, -st, -x * ct + y * st + x], [st, ct, -x * st - y * ct + y]],\n dtype=np.float64)\n', (1867, 1956), True, 'import numpy as np\n'), ((18273, 18351), 'numpy.array', 'np.array', (['[[1, 0, shift_vector[0]], [0, 1, shift_vector[1]]]'], {'dtype': 'np.float64'}), '([[1, 0, shift_vector[0]], [0, 1, shift_vector[1]]], dtype=np.float64)\n', (18281, 18351), True, 'import numpy as np\n'), ((19304, 19315), 'numpy.array', 'np.array', (['d'], {}), '(d)\n', (19312, 19315), True, 'import numpy as np\n'), ((19697, 19717), 'numpy.argmin', 'np.argmin', (['distances'], {}), '(distances)\n', (19706, 19717), True, 'import numpy as np\n'), ((29123, 29167), 'numpy.vstack', 'np.vstack', (['[self._vertices, other._vertices]'], {}), '([self._vertices, other._vertices])\n', (29132, 29167), True, 'import numpy as np\n'), ((34043, 34064), 'coordstring.CoordString', 'CoordString', (['vertices'], {}), '(vertices)\n', (34054, 34064), False, 'from coordstring import CoordString\n'), ((6387, 6421), 'numpy.zeros', 'np.zeros', (['(3, 4)'], {'dtype': 'np.float64'}), '((3, 4), dtype=np.float64)\n', (6395, 6421), True, 'import numpy as np\n'), ((10791, 10823), 'math.sqrt', 'math.sqrt', (['(dist * dist + dz * dz)'], {}), '(dist * dist + dz * dz)\n', (10800, 10823), False, 'import math\n'), ((11928, 11954), 'coordstring.CoordString', 'CoordString', (['[]'], {'ring': 'ring'}), '([], ring=ring)\n', (11939, 11954), False, 'from coordstring import CoordString\n'), ((17080, 17114), 'numpy.zeros', 'np.zeros', (['(3, 4)'], {'dtype': 'np.float64'}), '((3, 4), dtype=np.float64)\n', (17088, 17114), True, 'import numpy as np\n'), ((17608, 17623), 'numpy.dot', 'np.dot', (['M', 'xy.T'], {}), '(M, xy.T)\n', (17614, 17623), True, 'import numpy as np\n'), ((19051, 19071), 'numpy.array', 'np.array', (['pt._vertex'], {}), '(pt._vertex)\n', (19059, 19071), True, 'import numpy as np\n'), ((19110, 19133), 'numpy.sum', 'np.sum', (['((A - P) ** 2)', '(1)'], {}), '((A - P) ** 2, 1)\n', (19116, 19133), True, 'import numpy as np\n'), ((37907, 37916), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (37913, 37916), True, 'import numpy as np\n'), ((45856, 45875), 'numpy.vstack', 'np.vstack', (['vertices'], {}), '(vertices)\n', (45865, 45875), True, 'import numpy as np\n'), ((49662, 49696), 'numpy.zeros', 'np.zeros', (['(3, 4)'], {'dtype': 'np.float64'}), '((3, 4), dtype=np.float64)\n', (49670, 49696), True, 'import numpy as np\n'), ((63304, 63313), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (63310, 63313), True, 'import numpy as np\n'), ((10408, 10464), 'math.sqrt', 'math.sqrt', (['((x1 - x0) * (x1 - x0) + (y1 - y0) * (y1 - y0))'], {}), '((x1 - x0) * (x1 - x0) + (y1 - y0) * (y1 - y0))\n', (10417, 10464), False, 'import math\n'), ((12119, 12180), 'coordstring.CoordString', 'CoordString', (['[point._vertex for point in vertices]'], {'ring': 'ring'}), '([point._vertex for point in vertices], ring=ring)\n', (12130, 12180), False, 'from coordstring import CoordString\n'), ((14718, 14731), 'numpy.vstack', 'np.vstack', (['vt'], {}), '(vt)\n', (14727, 14731), True, 'import numpy as np\n'), ((31370, 31409), 'numpy.array', 'np.array', (['pos._vertex'], {'dtype': 'np.float64'}), '(pos._vertex, dtype=np.float64)\n', (31378, 31409), True, 'import numpy as np\n'), ((42368, 42383), 'coordstring.CoordString', 'CoordString', (['[]'], {}), '([])\n', (42379, 42383), False, 'from coordstring import CoordString\n'), ((8166, 8179), 'math.cos', 'math.cos', (['maz'], {}), '(maz)\n', (8174, 8179), False, 'import math\n'), ((8214, 8227), 'math.sin', 'math.sin', (['maz'], {}), '(maz)\n', (8222, 8227), False, 'import math\n'), ((12474, 12515), 'numpy.equal', 'np.equal', (['self._vertices', 'other._vertices'], {}), '(self._vertices, other._vertices)\n', (12482, 12515), True, 'import numpy as np\n'), ((40835, 40876), 'numpy.equal', 'np.equal', (['self._vertices', 'other._vertices'], {}), '(self._vertices, other._vertices)\n', (40843, 40876), True, 'import numpy as np\n'), ((42808, 42827), 'coordstring.CoordString', 'CoordString', (['inputs'], {}), '(inputs)\n', (42819, 42827), False, 'from coordstring import CoordString\n'), ((44205, 44223), 'coordstring.CoordString', 'CoordString', (['verts'], {}), '(verts)\n', (44216, 44223), False, 'from coordstring import CoordString\n'), ((53647, 53664), 'coordstring.CoordString', 'CoordString', (['part'], {}), '(part)\n', (53658, 53664), False, 'from coordstring import CoordString\n'), ((56387, 56410), 'numpy.array', 'np.array', (['line_vertices'], {}), '(line_vertices)\n', (56395, 56410), True, 'import numpy as np\n'), ((60380, 60406), 'itertools.chain', 'itertools.chain', (['*vertices'], {}), '(*vertices)\n', (60395, 60406), False, 'import itertools\n'), ((60454, 60480), 'itertools.chain', 'itertools.chain', (['*vertices'], {}), '(*vertices)\n', (60469, 60480), False, 'import itertools\n'), ((61020, 61036), 'coordstring.CoordString', 'CoordString', (['xys'], {}), '(xys)\n', (61031, 61036), False, 'from coordstring import CoordString\n'), ((58017, 58034), 'coordstring.CoordString', 'CoordString', (['ring'], {}), '(ring)\n', (58028, 58034), False, 'from coordstring import CoordString\n'), ((5273, 5301), 'math.atan2', 'math.atan2', (['(y1 - y0)', '(x1 - x0)'], {}), '(y1 - y0, x1 - x0)\n', (5283, 5301), False, 'import math\n')] |
from __future__ import division, print_function
from os import mkdir
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
import sys
import time
import torch.utils.data
from torchvision import transforms, datasets
from torch._six import with_metaclass
from torch._C import _ImperativeEngine as ImperativeEngine
LAMBDA = 0.02
RANDOM_SEED = 42
def expectation_spectral_norm_upper_bound_calculation(W_mu, W_p=None, SIMU_TIMES=10, ITERATION_TIMES=10):
u = torch.rand(W_mu.shape[0]).cuda()
v = torch.rand(W_mu.shape[1]).cuda()
for _ in range(ITERATION_TIMES):
v = torch.nn.functional.normalize(torch.mv(W_mu.t(), u), dim=0, eps=1e-12)
u = torch.nn.functional.normalize(torch.mv(W_mu, v), dim=0, eps=1e-12)
sigma = torch.dot(u, torch.mv(W_mu, v))
if W_p is None:
return sigma
std_w = 1e-6 + F.softplus(W_p, beta=1, threshold=20)
res = torch.max(torch.norm(std_w, dim=1)) + torch.max(torch.norm(std_w, dim=0))
tmp = 0
for _ in range(SIMU_TIMES):
eps_W = W_mu.data.new(W_mu.size()).normal_()
tmp += torch.max(1 * eps_W * std_w)
tmp /= SIMU_TIMES
return res + tmp + sigma
class VariableMeta(type):
def __instancecheck__(cls, other):
return isinstance(other, torch.Tensor)
class Variable(with_metaclass(VariableMeta, torch._C._LegacyVariableBase)):
pass
Variable._execution_engine = ImperativeEngine()
def cprint(color, text, **kwargs):
if color[0] == '*':
pre_code = '1;'
color = color[1:]
else:
pre_code = ''
code = {
'a': '30',
'r': '31',
'g': '32',
'y': '33',
'b': '34',
'p': '35',
'c': '36',
'w': '37'
}
print("\x1b[%s%sm%s\x1b[0m" % (pre_code, code[color], text), **kwargs)
sys.stdout.flush()
def humansize(nbytes):
suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
i = 0
while nbytes >= 1024 and i < len(suffixes) - 1:
nbytes /= 1024.
i += 1
f = ('%.2f' % nbytes)
return '%s%s' % (f, suffixes[i])
def to_variable(var=(), cuda=True, volatile=False):
out = []
for v in var:
if isinstance(v, np.ndarray):
v = torch.from_numpy(v).type(torch.FloatTensor)
if not v.is_cuda and cuda:
v = v.cuda()
if not isinstance(v, Variable):
v = Variable(v, volatile=volatile)
out.append(v)
return out
def isotropic_gauss_loglike(x, mu, sigma, do_sum=True):
cte_term = -(0.5) * np.log(2 * np.pi)
det_sig_term = -torch.log(sigma)
inner = (x - mu) / sigma
dist_term = -(0.5) * (inner ** 2)
if do_sum:
out = (cte_term + det_sig_term + dist_term).sum() # sum over all weights
else:
out = (cte_term + det_sig_term + dist_term)
return out
class BaseNet(object):
def __init__(self):
cprint('c', '\nNet:')
def get_nb_parameters(self):
return np.sum(p.numel() for p in self.model.parameters())
def set_mode_train(self, train=True):
if train:
self.model.train()
else:
self.model.eval()
def update_lr(self, epoch, gamma=0.99):
self.epoch += 1
if self.schedule is not None:
if len(self.schedule) == 0 or epoch in self.schedule:
self.lr *= gamma
print('learning rate: %f (%d)\n' % self.lr, epoch)
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.lr
def save(self, filename):
cprint('c', 'Writting %s\n' % filename)
torch.save({
'epoch': self.epoch,
'lr': self.lr,
'model': self.model,
'optimizer': self.optimizer}, filename)
def load(self, filename):
cprint('c', 'Reading %s\n' % filename)
state_dict = torch.load(filename)
self.epoch = state_dict['epoch']
self.lr = state_dict['lr']
self.model = state_dict['model']
self.optimizer = state_dict['optimizer']
print(' restoring epoch: %d, lr: %f' % (self.epoch, self.lr))
return self.epoch
def KLD_cost(mu_p, sig_p, mu_q, sig_q):
KLD = 0.5 * (2 * torch.log(sig_p / sig_q) - 1 + (sig_q / sig_p).pow(2) + ((mu_p - mu_q) / sig_p).pow(2)).sum()
return KLD
class BayesLinear_local_reparam(nn.Module):
def __init__(self, n_in, n_out, prior_sig):
super(BayesLinear_local_reparam, self).__init__()
self.n_in = n_in
self.n_out = n_out
self.prior_sig = prior_sig
# Learnable parameters
self.W_mu = nn.Parameter(torch.Tensor(self.n_in, self.n_out).uniform_(-0.1, 0.1))
self.W_p = nn.Parameter(
torch.Tensor(self.n_in, self.n_out).uniform_(-3, -2))
self.b_mu = nn.Parameter(torch.Tensor(self.n_out).uniform_(-0.1, 0.1))
self.b_p = nn.Parameter(torch.Tensor(self.n_out).uniform_(-3, -2))
def forward(self, X, sample=False):
if not self.training and not sample: # This is just a placeholder function
output = torch.mm(X, self.W_mu) + self.b_mu.expand(X.size()[0], self.n_out)
return output, 0, 0, 0
else:
std_w = 1e-6 + F.softplus(self.W_p, beta=1, threshold=20)
std_b = 1e-6 + F.softplus(self.b_p, beta=1, threshold=20)
act_W_mu = torch.mm(X, self.W_mu) # self.W_mu + std_w * eps_W
act_W_std = torch.sqrt(torch.mm(X.pow(2), std_w.pow(2)))
eps_W = Variable(self.W_mu.data.new(act_W_std.size()).normal_(mean=0, std=1))
eps_b = Variable(self.b_mu.data.new(std_b.size()).normal_(mean=0, std=1))
act_W_out = act_W_mu + act_W_std * eps_W # (batch_size, n_output)
act_b_out = self.b_mu + std_b * eps_b
output = act_W_out + act_b_out.unsqueeze(0).expand(X.shape[0], -1)
a = KLD_cost(mu_p=0, sig_p=self.prior_sig, mu_q=self.W_mu, sig_q=std_w)
b = KLD_cost(mu_p=0, sig_p=0.1, mu_q=self.b_mu, sig_q=std_b)
kld = a + b
if LAMBDA < 1e-10:
lip_loss = 0
else:
lip_loss = expectation_spectral_norm_upper_bound_calculation(self.W_mu, self.W_p)
return output, kld, 0, lip_loss ** 2
class bayes_linear_LR_2L(nn.Module):
def __init__(self, input_dim, output_dim, nhid, prior_sig):
super(bayes_linear_LR_2L, self).__init__()
n_hid = nhid
self.prior_sig = prior_sig
self.input_dim = input_dim
self.output_dim = output_dim
self.bfc1 = BayesLinear_local_reparam(input_dim, n_hid, self.prior_sig)
self.bfc2 = BayesLinear_local_reparam(n_hid, n_hid, self.prior_sig)
self.bfc3 = BayesLinear_local_reparam(n_hid, output_dim, self.prior_sig)
self.act = nn.ReLU(inplace=True)
def forward(self, x, sample=False):
tlqw = 0
tlpw = 0
tlip_loss = 0
x = x.view(-1, self.input_dim) # view(batch_size, input_dim)
# -----------------
x, lqw, lpw, lip_loss = self.bfc1(x, sample)
tlqw = tlqw + lqw
tlpw = tlpw + lpw
tlip_loss += lip_loss
# -----------------
x = self.act(x)
# -----------------
x, lqw, lpw, lip_loss = self.bfc2(x, sample)
tlqw = tlqw + lqw
tlpw = tlpw + lpw
tlip_loss += lip_loss
# -----------------
x = self.act(x)
# -----------------
y, lqw, lpw, lip_loss = self.bfc3(x, sample)
tlqw = tlqw + lqw
tlpw = tlpw + lpw
tlip_loss += lip_loss
return y, tlqw, tlpw, tlip_loss
def sample_predict(self, x, Nsamples):
predictions = x.data.new(Nsamples, x.shape[0], self.output_dim)
tlqw_vec = np.zeros(Nsamples)
tlpw_vec = np.zeros(Nsamples)
Hs = []
for i in range(Nsamples):
y, tlqw, tlpw, _ = self.forward(x, sample=True)
predictions[i] = y
tlqw_vec[i] = tlqw
tlpw_vec[i] = tlpw
output = nn.functional.softmax(y)
H = torch.distributions.Categorical(probs=output).entropy()
Hs.append(H)
Ha = sum(Hs) / Nsamples
He = sum(torch.abs(Ha - i) for i in Hs) / Nsamples
return predictions, tlqw_vec, tlpw_vec, Ha, He
class BBP_Bayes_Net_LR(BaseNet):
def __init__(self, lr=1e-3, channels_in=3, side_in=28, cuda=True, classes=10, batch_size=128, Nbatches=0,
nhid=1200, prior_sig=0.1):
super(BBP_Bayes_Net_LR, self).__init__()
cprint('y', ' Creating Net!! ')
self.lr = lr
self.schedule = None # [] #[50,200,400,600]
self.cuda = cuda
self.channels_in = channels_in
self.classes = classes
self.nhid = nhid
self.prior_sig = prior_sig
self.batch_size = batch_size
self.Nbatches = Nbatches
self.side_in = side_in
self.create_net()
self.create_opt()
self.epoch = 0
self.test = False
def create_net(self):
torch.manual_seed(RANDOM_SEED)
if self.cuda:
torch.cuda.manual_seed(RANDOM_SEED)
self.model = bayes_linear_LR_2L(input_dim=self.channels_in * self.side_in * self.side_in,
output_dim=self.classes,
nhid=self.nhid, prior_sig=self.prior_sig)
if self.cuda:
self.model = self.model.cuda()
print(' Total params: %.2fM' % (self.get_nb_parameters() / 1000000.0))
def create_opt(self):
self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.lr, momentum=0)
def fit(self, x, y, samples=1):
x, y = to_variable(var=(x, y.long()), cuda=self.cuda)
self.optimizer.zero_grad()
lip_loss = 0
if samples == 1:
out, tlqw, tlpw, lip_loss = self.model(x)
mlpdw = F.cross_entropy(out, y, reduction='sum')
Edkl = (tlqw - tlpw) / self.Nbatches
elif samples > 1:
mlpdw_cum = 0
Edkl_cum = 0
for i in range(samples):
out, tlqw, tlpw, tlip_loss = self.model(x, sample=True)
mlpdw_i = F.cross_entropy(out, y, reduction='sum')
Edkl_i = (tlqw - tlpw) / self.Nbatches
mlpdw_cum = mlpdw_cum + mlpdw_i
Edkl_cum = Edkl_cum + Edkl_i
lip_loss = lip_loss + tlip_loss
mlpdw = mlpdw_cum / samples
Edkl = Edkl_cum / samples
lip_loss = lip_loss / samples
loss = Edkl + mlpdw + LAMBDA * 0.5 * lip_loss * len(x)
loss.backward()
self.optimizer.step()
pred = out.data.max(dim=1, keepdim=False)[1] # get the index of the max log-probability
err = pred.ne(y.data).sum()
return Edkl.data, mlpdw.data, err, 0 # lip_loss.data
def eval(self, x, y):
x, y = to_variable(var=(x, y.long()), cuda=self.cuda)
out, _, _, _ = self.model(x)
loss = F.cross_entropy(out, y, reduction='sum')
probs = F.softmax(out, dim=1).data.cpu()
pred = out.data.max(dim=1, keepdim=False)[1] # get the index of the max log-probability
err = pred.ne(y.data).sum()
return loss.data, err, probs
def sample_eval(self, x, y, Nsamples, logits=True, train=False):
x, y = to_variable(var=(x, y.long()), cuda=self.cuda)
out, _, _, Ha, He = self.model.sample_predict(x, Nsamples)
if logits:
mean_out = out.mean(dim=0, keepdim=False)
loss = F.cross_entropy(mean_out, y, reduction='sum')
probs = F.softmax(mean_out, dim=1).data.cpu()
else:
mean_out = F.softmax(out, dim=2).mean(dim=0, keepdim=False)
probs = mean_out.data.cpu()
log_mean_probs_out = torch.log(mean_out)
loss = F.nll_loss(log_mean_probs_out, y, reduction='sum')
pred = mean_out.data.max(dim=1, keepdim=False)[1] # get the index of the max log-probability
err = pred.ne(y.data).sum()
return loss.data, err, probs, Ha, He
if __name__ == "__main__":
epochs = 50
prior_sig = 0.1
lr = 1e-3
n_samples = 15
suffix = f"lambda{LAMBDA}_seed{RANDOM_SEED}"
models_dir = 'models_' + suffix
results_dir = 'results_' + suffix
mkdir(models_dir)
mkdir(results_dir)
NTrainPointsMNIST = 60000
batch_size = 100
nb_epochs = epochs
log_interval = 1
cprint('c', '\nData:')
transform_train = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=(0.1307,), std=(0.3081,))
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=(0.1307,), std=(0.3081,))
])
use_cuda = torch.cuda.is_available()
trainset = datasets.MNIST(root='../data', train=True, download=True, transform=transform_train)
valset = datasets.MNIST(root='../data', train=False, download=True, transform=transform_test)
if use_cuda:
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, pin_memory=True,
num_workers=3)
valloader = torch.utils.data.DataLoader(valset, batch_size=batch_size, shuffle=False, pin_memory=True,
num_workers=3)
else:
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, pin_memory=False,
num_workers=3)
valloader = torch.utils.data.DataLoader(valset, batch_size=batch_size, shuffle=False, pin_memory=False,
num_workers=3)
cprint('c', '\nNetwork:')
nsamples = int(n_samples)
net = BBP_Bayes_Net_LR(lr=lr, channels_in=1, side_in=28, cuda=use_cuda, classes=10, batch_size=batch_size,
Nbatches=(NTrainPointsMNIST / batch_size), nhid=1200, prior_sig=prior_sig)
epoch = 0
cprint('c', '\nTrain:')
print(' init cost variables:')
kl_cost_train = np.zeros(nb_epochs)
pred_cost_train = np.zeros(nb_epochs)
err_train = np.zeros(nb_epochs)
lip_losses = np.zeros(nb_epochs)
cost_dev = np.zeros(nb_epochs)
err_dev = np.zeros(nb_epochs)
best_err = np.inf
nb_its_dev = 1
train_max_grad = []
train_mean_grad = []
test_max_grad = []
test_mean_grad = []
tic0 = time.time()
for i in range(epoch, nb_epochs):
if i == 0:
ELBO_samples = nsamples
else:
ELBO_samples = nsamples
net.set_mode_train(True)
tic = time.time()
nb_samples = 0
for x, y in trainloader:
cost_dkl, cost_pred, err, lip_loss = net.fit(x, y, samples=ELBO_samples)
err_train[i] += err
kl_cost_train[i] += cost_dkl
pred_cost_train[i] += cost_pred
nb_samples += len(x)
lip_losses[i] += lip_loss
kl_cost_train[
i] /= nb_samples
pred_cost_train[i] /= nb_samples
err_train[i] /= nb_samples
lip_losses[i] /= nb_samples
toc = time.time()
net.epoch = i
print("it %d/%d, Jtr_KL = %f, Jtr_pred = %f, err = %f, lip_loss = %f" % (
i, nb_epochs, kl_cost_train[i], pred_cost_train[i], err_train[i], lip_losses[i]), end="")
cprint('r', ' time: %f seconds\n' % (toc - tic))
if i % nb_its_dev == 0:
net.set_mode_train(False)
nb_samples = 0
for j, (x, y) in enumerate(valloader):
cost, err, probs = net.eval(x, y) # This takes the expected weights to save time, not proper inference
cost_dev[i] += cost
err_dev[i] += err
nb_samples += len(x)
cost_dev[i] /= nb_samples
err_dev[i] /= nb_samples
cprint('g', ' Jdev = %f, err = %f\n' % (cost_dev[i], err_dev[i]))
if err_dev[i] < best_err:
best_err = err_dev[i]
cprint('b', 'best test error')
net.save(models_dir + '/theta_best.dat')
toc0 = time.time()
runtime_per_it = (toc0 - tic0) / float(nb_epochs)
cprint('r', ' average time: %f seconds\n' % runtime_per_it)
net.save(models_dir + '/theta_last.dat')
cprint('c', '\nRESULTS:')
nb_parameters = net.get_nb_parameters()
best_cost_dev = np.min(cost_dev)
best_cost_train = np.min(pred_cost_train)
err_dev_min = err_dev[::nb_its_dev].min()
print(' cost_dev: %f (cost_train %f)' % (best_cost_dev, best_cost_train))
print(' err_dev: %f' % (err_dev_min))
print(' nb_parameters: %d (%s)' % (nb_parameters, humansize(nb_parameters)))
print(' time_per_it: %fs\n' % (runtime_per_it))
| [
"os.mkdir",
"torch.distributions.Categorical",
"torch.mm",
"sys.stdout.flush",
"torchvision.transforms.Normalize",
"torch._C._ImperativeEngine",
"torch.utils.data.DataLoader",
"torch.load",
"torch.Tensor",
"torch.nn.functional.nll_loss",
"torch.log",
"torch.manual_seed",
"torch.norm",
"tor... | [((1316, 1374), 'torch._six.with_metaclass', 'with_metaclass', (['VariableMeta', 'torch._C._LegacyVariableBase'], {}), '(VariableMeta, torch._C._LegacyVariableBase)\n', (1330, 1374), False, 'from torch._six import with_metaclass\n'), ((1417, 1435), 'torch._C._ImperativeEngine', 'ImperativeEngine', ([], {}), '()\n', (1433, 1435), True, 'from torch._C import _ImperativeEngine as ImperativeEngine\n'), ((1828, 1846), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1844, 1846), False, 'import sys\n'), ((12400, 12417), 'os.mkdir', 'mkdir', (['models_dir'], {}), '(models_dir)\n', (12405, 12417), False, 'from os import mkdir\n'), ((12422, 12440), 'os.mkdir', 'mkdir', (['results_dir'], {}), '(results_dir)\n', (12427, 12440), False, 'from os import mkdir\n'), ((12863, 12888), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (12886, 12888), False, 'import torch\n'), ((12905, 12994), 'torchvision.datasets.MNIST', 'datasets.MNIST', ([], {'root': '"""../data"""', 'train': '(True)', 'download': '(True)', 'transform': 'transform_train'}), "(root='../data', train=True, download=True, transform=\n transform_train)\n", (12919, 12994), False, 'from torchvision import transforms, datasets\n'), ((13003, 13092), 'torchvision.datasets.MNIST', 'datasets.MNIST', ([], {'root': '"""../data"""', 'train': '(False)', 'download': '(True)', 'transform': 'transform_test'}), "(root='../data', train=False, download=True, transform=\n transform_test)\n", (13017, 13092), False, 'from torchvision import transforms, datasets\n'), ((14200, 14219), 'numpy.zeros', 'np.zeros', (['nb_epochs'], {}), '(nb_epochs)\n', (14208, 14219), True, 'import numpy as np\n'), ((14242, 14261), 'numpy.zeros', 'np.zeros', (['nb_epochs'], {}), '(nb_epochs)\n', (14250, 14261), True, 'import numpy as np\n'), ((14278, 14297), 'numpy.zeros', 'np.zeros', (['nb_epochs'], {}), '(nb_epochs)\n', (14286, 14297), True, 'import numpy as np\n'), ((14315, 14334), 'numpy.zeros', 'np.zeros', (['nb_epochs'], {}), '(nb_epochs)\n', (14323, 14334), True, 'import numpy as np\n'), ((14350, 14369), 'numpy.zeros', 'np.zeros', (['nb_epochs'], {}), '(nb_epochs)\n', (14358, 14369), True, 'import numpy as np\n'), ((14384, 14403), 'numpy.zeros', 'np.zeros', (['nb_epochs'], {}), '(nb_epochs)\n', (14392, 14403), True, 'import numpy as np\n'), ((14555, 14566), 'time.time', 'time.time', ([], {}), '()\n', (14564, 14566), False, 'import time\n'), ((16287, 16298), 'time.time', 'time.time', ([], {}), '()\n', (16296, 16298), False, 'import time\n'), ((16560, 16576), 'numpy.min', 'np.min', (['cost_dev'], {}), '(cost_dev)\n', (16566, 16576), True, 'import numpy as np\n'), ((16599, 16622), 'numpy.min', 'np.min', (['pred_cost_train'], {}), '(pred_cost_train)\n', (16605, 16622), True, 'import numpy as np\n'), ((789, 806), 'torch.mv', 'torch.mv', (['W_mu', 'v'], {}), '(W_mu, v)\n', (797, 806), False, 'import torch\n'), ((869, 906), 'torch.nn.functional.softplus', 'F.softplus', (['W_p'], {'beta': '(1)', 'threshold': '(20)'}), '(W_p, beta=1, threshold=20)\n', (879, 906), True, 'import torch.nn.functional as F\n'), ((1105, 1133), 'torch.max', 'torch.max', (['(1 * eps_W * std_w)'], {}), '(1 * eps_W * std_w)\n', (1114, 1133), False, 'import torch\n'), ((2539, 2556), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (2545, 2556), True, 'import numpy as np\n'), ((2577, 2593), 'torch.log', 'torch.log', (['sigma'], {}), '(sigma)\n', (2586, 2593), False, 'import torch\n'), ((3624, 3736), 'torch.save', 'torch.save', (["{'epoch': self.epoch, 'lr': self.lr, 'model': self.model, 'optimizer': self\n .optimizer}", 'filename'], {}), "({'epoch': self.epoch, 'lr': self.lr, 'model': self.model,\n 'optimizer': self.optimizer}, filename)\n", (3634, 3736), False, 'import torch\n'), ((3881, 3901), 'torch.load', 'torch.load', (['filename'], {}), '(filename)\n', (3891, 3901), False, 'import torch\n'), ((6836, 6857), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (6843, 6857), False, 'from torch import nn\n'), ((7793, 7811), 'numpy.zeros', 'np.zeros', (['Nsamples'], {}), '(Nsamples)\n', (7801, 7811), True, 'import numpy as np\n'), ((7831, 7849), 'numpy.zeros', 'np.zeros', (['Nsamples'], {}), '(Nsamples)\n', (7839, 7849), True, 'import numpy as np\n'), ((9091, 9121), 'torch.manual_seed', 'torch.manual_seed', (['RANDOM_SEED'], {}), '(RANDOM_SEED)\n', (9108, 9121), False, 'import torch\n'), ((11078, 11118), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['out', 'y'], {'reduction': '"""sum"""'}), "(out, y, reduction='sum')\n", (11093, 11118), True, 'import torch.nn.functional as F\n'), ((13128, 13238), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainset'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'pin_memory': '(True)', 'num_workers': '(3)'}), '(trainset, batch_size=batch_size, shuffle=True,\n pin_memory=True, num_workers=3)\n', (13155, 13238), False, 'import torch\n'), ((13305, 13414), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['valset'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'pin_memory': '(True)', 'num_workers': '(3)'}), '(valset, batch_size=batch_size, shuffle=False,\n pin_memory=True, num_workers=3)\n', (13332, 13414), False, 'import torch\n'), ((13492, 13603), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainset'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'pin_memory': '(False)', 'num_workers': '(3)'}), '(trainset, batch_size=batch_size, shuffle=True,\n pin_memory=False, num_workers=3)\n', (13519, 13603), False, 'import torch\n'), ((13670, 13780), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['valset'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'pin_memory': '(False)', 'num_workers': '(3)'}), '(valset, batch_size=batch_size, shuffle=False,\n pin_memory=False, num_workers=3)\n', (13697, 13780), False, 'import torch\n'), ((14759, 14770), 'time.time', 'time.time', ([], {}), '()\n', (14768, 14770), False, 'import time\n'), ((15281, 15292), 'time.time', 'time.time', ([], {}), '()\n', (15290, 15292), False, 'import time\n'), ((491, 516), 'torch.rand', 'torch.rand', (['W_mu.shape[0]'], {}), '(W_mu.shape[0])\n', (501, 516), False, 'import torch\n'), ((532, 557), 'torch.rand', 'torch.rand', (['W_mu.shape[1]'], {}), '(W_mu.shape[1])\n', (542, 557), False, 'import torch\n'), ((727, 744), 'torch.mv', 'torch.mv', (['W_mu', 'v'], {}), '(W_mu, v)\n', (735, 744), False, 'import torch\n'), ((928, 952), 'torch.norm', 'torch.norm', (['std_w'], {'dim': '(1)'}), '(std_w, dim=1)\n', (938, 952), False, 'import torch\n'), ((966, 990), 'torch.norm', 'torch.norm', (['std_w'], {'dim': '(0)'}), '(std_w, dim=0)\n', (976, 990), False, 'import torch\n'), ((5380, 5402), 'torch.mm', 'torch.mm', (['X', 'self.W_mu'], {}), '(X, self.W_mu)\n', (5388, 5402), False, 'import torch\n'), ((8076, 8100), 'torch.nn.functional.softmax', 'nn.functional.softmax', (['y'], {}), '(y)\n', (8097, 8100), False, 'from torch import nn\n'), ((9156, 9191), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['RANDOM_SEED'], {}), '(RANDOM_SEED)\n', (9178, 9191), False, 'import torch\n'), ((9959, 9999), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['out', 'y'], {'reduction': '"""sum"""'}), "(out, y, reduction='sum')\n", (9974, 9999), True, 'import torch.nn.functional as F\n'), ((11634, 11679), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['mean_out', 'y'], {'reduction': '"""sum"""'}), "(mean_out, y, reduction='sum')\n", (11649, 11679), True, 'import torch.nn.functional as F\n'), ((11899, 11918), 'torch.log', 'torch.log', (['mean_out'], {}), '(mean_out)\n', (11908, 11918), False, 'import torch\n'), ((11938, 11988), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['log_mean_probs_out', 'y'], {'reduction': '"""sum"""'}), "(log_mean_probs_out, y, reduction='sum')\n", (11948, 11988), True, 'import torch.nn.functional as F\n'), ((12616, 12637), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (12635, 12637), False, 'from torchvision import transforms, datasets\n'), ((12647, 12698), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '(0.1307,)', 'std': '(0.3081,)'}), '(mean=(0.1307,), std=(0.3081,))\n', (12667, 12698), False, 'from torchvision import transforms, datasets\n'), ((12757, 12778), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (12776, 12778), False, 'from torchvision import transforms, datasets\n'), ((12788, 12839), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '(0.1307,)', 'std': '(0.3081,)'}), '(mean=(0.1307,), std=(0.3081,))\n', (12808, 12839), False, 'from torchvision import transforms, datasets\n'), ((5099, 5121), 'torch.mm', 'torch.mm', (['X', 'self.W_mu'], {}), '(X, self.W_mu)\n', (5107, 5121), False, 'import torch\n'), ((5243, 5285), 'torch.nn.functional.softplus', 'F.softplus', (['self.W_p'], {'beta': '(1)', 'threshold': '(20)'}), '(self.W_p, beta=1, threshold=20)\n', (5253, 5285), True, 'import torch.nn.functional as F\n'), ((5313, 5355), 'torch.nn.functional.softplus', 'F.softplus', (['self.b_p'], {'beta': '(1)', 'threshold': '(20)'}), '(self.b_p, beta=1, threshold=20)\n', (5323, 5355), True, 'import torch.nn.functional as F\n'), ((2226, 2245), 'torch.from_numpy', 'torch.from_numpy', (['v'], {}), '(v)\n', (2242, 2245), False, 'import torch\n'), ((4642, 4677), 'torch.Tensor', 'torch.Tensor', (['self.n_in', 'self.n_out'], {}), '(self.n_in, self.n_out)\n', (4654, 4677), False, 'import torch\n'), ((4744, 4779), 'torch.Tensor', 'torch.Tensor', (['self.n_in', 'self.n_out'], {}), '(self.n_in, self.n_out)\n', (4756, 4779), False, 'import torch\n'), ((4832, 4856), 'torch.Tensor', 'torch.Tensor', (['self.n_out'], {}), '(self.n_out)\n', (4844, 4856), False, 'import torch\n'), ((4910, 4934), 'torch.Tensor', 'torch.Tensor', (['self.n_out'], {}), '(self.n_out)\n', (4922, 4934), False, 'import torch\n'), ((8117, 8162), 'torch.distributions.Categorical', 'torch.distributions.Categorical', ([], {'probs': 'output'}), '(probs=output)\n', (8148, 8162), False, 'import torch\n'), ((8248, 8265), 'torch.abs', 'torch.abs', (['(Ha - i)'], {}), '(Ha - i)\n', (8257, 8265), False, 'import torch\n'), ((10263, 10303), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['out', 'y'], {'reduction': '"""sum"""'}), "(out, y, reduction='sum')\n", (10278, 10303), True, 'import torch.nn.functional as F\n'), ((11136, 11157), 'torch.nn.functional.softmax', 'F.softmax', (['out'], {'dim': '(1)'}), '(out, dim=1)\n', (11145, 11157), True, 'import torch.nn.functional as F\n'), ((11776, 11797), 'torch.nn.functional.softmax', 'F.softmax', (['out'], {'dim': '(2)'}), '(out, dim=2)\n', (11785, 11797), True, 'import torch.nn.functional as F\n'), ((11700, 11726), 'torch.nn.functional.softmax', 'F.softmax', (['mean_out'], {'dim': '(1)'}), '(mean_out, dim=1)\n', (11709, 11726), True, 'import torch.nn.functional as F\n'), ((4228, 4252), 'torch.log', 'torch.log', (['(sig_p / sig_q)'], {}), '(sig_p / sig_q)\n', (4237, 4252), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
from os import path
import numpy as np
from mrsimulator.models import CzjzekDistribution
from mrsimulator.models import ExtCzjzekDistribution
from mrsimulator.models.utils import x_y_from_zeta_eta
from mrsimulator.models.utils import x_y_to_zeta_eta
__author__ = "<NAME>"
__email__ = "<EMAIL>"
MODULE_DIR = path.dirname(path.abspath(__file__))
COUNT = int(1e6)
def test_extended_czjzek_eta_distribution_1():
filename = path.join(MODULE_DIR, "test_data", "eps=0.05.npy")
with open(filename, "rb") as f:
data = np.load(f)
S0 = {"zeta": 1, "eta": 0.1}
_, eta1 = ExtCzjzekDistribution(S0, eps=0.05).rvs(size=COUNT)
hist1, _ = np.histogram(eta1, bins=100, range=[0, 1])
message = "failed to compare values with file eps=0.05.npy"
np.testing.assert_almost_equal(hist1 / COUNT, data[0], decimal=2, err_msg=message)
def test_extended_czjzek_polar():
S0 = {"zeta": 1, "eta": 0.1}
x, y = ExtCzjzekDistribution(S0, eps=0.05, polar=True).rvs(size=COUNT)
x1, y1 = x_y_from_zeta_eta(*x_y_to_zeta_eta(x, y))
np.testing.assert_almost_equal(x, x1)
np.testing.assert_almost_equal(y, y1)
def test_extended_czjzek_eta_distribution_2():
filename = path.join(MODULE_DIR, "test_data", "eps=0.2.npy")
with open(filename, "rb") as f:
data = np.load(f)
S0 = {"Cq": 1e6, "eta": 0.3}
_, eta1 = ExtCzjzekDistribution(S0, eps=0.2).rvs(size=COUNT)
hist1, _ = np.histogram(eta1, bins=100, range=[0, 1])
message = "failed to compare values with file eps=0.2.npy"
np.testing.assert_almost_equal(hist1 / COUNT, data[1], decimal=2, err_msg=message)
def test_extended_czjzek_eta_distribution_3():
filename = path.join(MODULE_DIR, "test_data", "eps=0.65.npy")
with open(filename, "rb") as f:
data = np.load(f)
S0 = {"Cq": 1e6, "eta": 0.7}
_, eta1 = ExtCzjzekDistribution(S0, eps=0.65).rvs(size=COUNT)
hist1, _ = np.histogram(eta1, bins=100, range=[0, 1])
message = "failed to compare values with file eps=0.05.npy"
np.testing.assert_almost_equal(hist1 / COUNT, data[3], decimal=2, err_msg=message)
def test_czjzek_distribution():
sigma = 0.5
# numerical Czjzek distribution
count_ = COUNT
zeta, eta = CzjzekDistribution(sigma).rvs(size=count_)
# eta projection
e_hist, ran_e = np.histogram(eta, bins=15, range=[0, 1])
e_vector = e_hist / count_
e_range = (ran_e[1:] + ran_e[:-1]) / 2
# zeta projection
z_hist, ran_z = np.histogram(zeta, bins=20, range=[-20, 20])
z_vector = z_hist / count_
z_range = (ran_z[1:] + ran_z[:-1]) / 2
# czjzek distribution from analytical formula
sigma_ = 2 * sigma
V, e = np.meshgrid(z_range, e_range)
denom = (2 * np.pi) ** 0.5 * sigma_ ** 5
res = (V ** 4 * e) * (1 - e ** 2 / 9) / denom
res *= np.exp(-(V ** 2 * (1 + (e ** 2 / 3))) / (2 * sigma_ ** 2))
res /= res.sum()
eta_pro = res.sum(axis=1)
zeta_pro = res.sum(axis=0)
# eta test
message = "failed to compare eta projection for Czjzek distribution"
np.testing.assert_almost_equal(e_vector, eta_pro, decimal=2, err_msg=message)
# zeta test
message = "failed to compare zeta projection for Czjzek distribution"
np.testing.assert_almost_equal(z_vector, zeta_pro, decimal=2, err_msg=message)
def test_czjzek_pdf():
sigma = 0.5
z_range = np.arange(100) * 30 / 100 - 15
e_range = np.arange(21) / 20
# czjzek distribution from analytical formula
sigma_ = 2 * sigma
V, e = np.meshgrid(z_range, e_range)
denom = (2 * np.pi) ** 0.5 * sigma_ ** 5
res = (V ** 4 * e) * (1 - e ** 2 / 9) / denom
res *= np.exp(-(V ** 2 * (1 + (e ** 2 / 3))) / (2 * sigma_ ** 2))
res /= res.sum()
_, _, amp = CzjzekDistribution(sigma).pdf([z_range, e_range])
error = "Czjzek analytical is not equal to numerical"
np.testing.assert_almost_equal(res, amp, decimal=2, err_msg=error)
def test_czjzek_polar():
x, y = CzjzekDistribution(sigma=0.5, polar=True).rvs(size=COUNT)
x1, y1 = x_y_from_zeta_eta(*x_y_to_zeta_eta(x, y))
np.testing.assert_almost_equal(x, x1)
np.testing.assert_almost_equal(y, y1)
| [
"os.path.abspath",
"numpy.meshgrid",
"numpy.load",
"numpy.testing.assert_almost_equal",
"mrsimulator.models.CzjzekDistribution",
"numpy.histogram",
"mrsimulator.models.ExtCzjzekDistribution",
"numpy.arange",
"numpy.exp",
"mrsimulator.models.utils.x_y_to_zeta_eta",
"os.path.join"
] | [((347, 369), 'os.path.abspath', 'path.abspath', (['__file__'], {}), '(__file__)\n', (359, 369), False, 'from os import path\n'), ((452, 502), 'os.path.join', 'path.join', (['MODULE_DIR', '"""test_data"""', '"""eps=0.05.npy"""'], {}), "(MODULE_DIR, 'test_data', 'eps=0.05.npy')\n", (461, 502), False, 'from os import path\n'), ((680, 722), 'numpy.histogram', 'np.histogram', (['eta1'], {'bins': '(100)', 'range': '[0, 1]'}), '(eta1, bins=100, range=[0, 1])\n', (692, 722), True, 'import numpy as np\n'), ((792, 879), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['(hist1 / COUNT)', 'data[0]'], {'decimal': '(2)', 'err_msg': 'message'}), '(hist1 / COUNT, data[0], decimal=2, err_msg=\n message)\n', (822, 879), True, 'import numpy as np\n'), ((1078, 1115), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['x', 'x1'], {}), '(x, x1)\n', (1108, 1115), True, 'import numpy as np\n'), ((1120, 1157), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['y', 'y1'], {}), '(y, y1)\n', (1150, 1157), True, 'import numpy as np\n'), ((1222, 1271), 'os.path.join', 'path.join', (['MODULE_DIR', '"""test_data"""', '"""eps=0.2.npy"""'], {}), "(MODULE_DIR, 'test_data', 'eps=0.2.npy')\n", (1231, 1271), False, 'from os import path\n'), ((1448, 1490), 'numpy.histogram', 'np.histogram', (['eta1'], {'bins': '(100)', 'range': '[0, 1]'}), '(eta1, bins=100, range=[0, 1])\n', (1460, 1490), True, 'import numpy as np\n'), ((1559, 1646), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['(hist1 / COUNT)', 'data[1]'], {'decimal': '(2)', 'err_msg': 'message'}), '(hist1 / COUNT, data[1], decimal=2, err_msg=\n message)\n', (1589, 1646), True, 'import numpy as np\n'), ((1706, 1756), 'os.path.join', 'path.join', (['MODULE_DIR', '"""test_data"""', '"""eps=0.65.npy"""'], {}), "(MODULE_DIR, 'test_data', 'eps=0.65.npy')\n", (1715, 1756), False, 'from os import path\n'), ((1934, 1976), 'numpy.histogram', 'np.histogram', (['eta1'], {'bins': '(100)', 'range': '[0, 1]'}), '(eta1, bins=100, range=[0, 1])\n', (1946, 1976), True, 'import numpy as np\n'), ((2046, 2133), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['(hist1 / COUNT)', 'data[3]'], {'decimal': '(2)', 'err_msg': 'message'}), '(hist1 / COUNT, data[3], decimal=2, err_msg=\n message)\n', (2076, 2133), True, 'import numpy as np\n'), ((2336, 2376), 'numpy.histogram', 'np.histogram', (['eta'], {'bins': '(15)', 'range': '[0, 1]'}), '(eta, bins=15, range=[0, 1])\n', (2348, 2376), True, 'import numpy as np\n'), ((2494, 2538), 'numpy.histogram', 'np.histogram', (['zeta'], {'bins': '(20)', 'range': '[-20, 20]'}), '(zeta, bins=20, range=[-20, 20])\n', (2506, 2538), True, 'import numpy as np\n'), ((2698, 2727), 'numpy.meshgrid', 'np.meshgrid', (['z_range', 'e_range'], {}), '(z_range, e_range)\n', (2709, 2727), True, 'import numpy as np\n'), ((2834, 2890), 'numpy.exp', 'np.exp', (['(-(V ** 2 * (1 + e ** 2 / 3)) / (2 * sigma_ ** 2))'], {}), '(-(V ** 2 * (1 + e ** 2 / 3)) / (2 * sigma_ ** 2))\n', (2840, 2890), True, 'import numpy as np\n'), ((3069, 3146), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['e_vector', 'eta_pro'], {'decimal': '(2)', 'err_msg': 'message'}), '(e_vector, eta_pro, decimal=2, err_msg=message)\n', (3099, 3146), True, 'import numpy as np\n'), ((3242, 3320), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['z_vector', 'zeta_pro'], {'decimal': '(2)', 'err_msg': 'message'}), '(z_vector, zeta_pro, decimal=2, err_msg=message)\n', (3272, 3320), True, 'import numpy as np\n'), ((3525, 3554), 'numpy.meshgrid', 'np.meshgrid', (['z_range', 'e_range'], {}), '(z_range, e_range)\n', (3536, 3554), True, 'import numpy as np\n'), ((3661, 3717), 'numpy.exp', 'np.exp', (['(-(V ** 2 * (1 + e ** 2 / 3)) / (2 * sigma_ ** 2))'], {}), '(-(V ** 2 * (1 + e ** 2 / 3)) / (2 * sigma_ ** 2))\n', (3667, 3717), True, 'import numpy as np\n'), ((3871, 3937), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['res', 'amp'], {'decimal': '(2)', 'err_msg': 'error'}), '(res, amp, decimal=2, err_msg=error)\n', (3901, 3937), True, 'import numpy as np\n'), ((4093, 4130), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['x', 'x1'], {}), '(x, x1)\n', (4123, 4130), True, 'import numpy as np\n'), ((4135, 4172), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['y', 'y1'], {}), '(y, y1)\n', (4165, 4172), True, 'import numpy as np\n'), ((554, 564), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (561, 564), True, 'import numpy as np\n'), ((1323, 1333), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (1330, 1333), True, 'import numpy as np\n'), ((1808, 1818), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (1815, 1818), True, 'import numpy as np\n'), ((3421, 3434), 'numpy.arange', 'np.arange', (['(21)'], {}), '(21)\n', (3430, 3434), True, 'import numpy as np\n'), ((613, 648), 'mrsimulator.models.ExtCzjzekDistribution', 'ExtCzjzekDistribution', (['S0'], {'eps': '(0.05)'}), '(S0, eps=0.05)\n', (634, 648), False, 'from mrsimulator.models import ExtCzjzekDistribution\n'), ((955, 1002), 'mrsimulator.models.ExtCzjzekDistribution', 'ExtCzjzekDistribution', (['S0'], {'eps': '(0.05)', 'polar': '(True)'}), '(S0, eps=0.05, polar=True)\n', (976, 1002), False, 'from mrsimulator.models import ExtCzjzekDistribution\n'), ((1051, 1072), 'mrsimulator.models.utils.x_y_to_zeta_eta', 'x_y_to_zeta_eta', (['x', 'y'], {}), '(x, y)\n', (1066, 1072), False, 'from mrsimulator.models.utils import x_y_to_zeta_eta\n'), ((1382, 1416), 'mrsimulator.models.ExtCzjzekDistribution', 'ExtCzjzekDistribution', (['S0'], {'eps': '(0.2)'}), '(S0, eps=0.2)\n', (1403, 1416), False, 'from mrsimulator.models import ExtCzjzekDistribution\n'), ((1867, 1902), 'mrsimulator.models.ExtCzjzekDistribution', 'ExtCzjzekDistribution', (['S0'], {'eps': '(0.65)'}), '(S0, eps=0.65)\n', (1888, 1902), False, 'from mrsimulator.models import ExtCzjzekDistribution\n'), ((2251, 2276), 'mrsimulator.models.CzjzekDistribution', 'CzjzekDistribution', (['sigma'], {}), '(sigma)\n', (2269, 2276), False, 'from mrsimulator.models import CzjzekDistribution\n'), ((3758, 3783), 'mrsimulator.models.CzjzekDistribution', 'CzjzekDistribution', (['sigma'], {}), '(sigma)\n', (3776, 3783), False, 'from mrsimulator.models import CzjzekDistribution\n'), ((3976, 4017), 'mrsimulator.models.CzjzekDistribution', 'CzjzekDistribution', ([], {'sigma': '(0.5)', 'polar': '(True)'}), '(sigma=0.5, polar=True)\n', (3994, 4017), False, 'from mrsimulator.models import CzjzekDistribution\n'), ((4066, 4087), 'mrsimulator.models.utils.x_y_to_zeta_eta', 'x_y_to_zeta_eta', (['x', 'y'], {}), '(x, y)\n', (4081, 4087), False, 'from mrsimulator.models.utils import x_y_to_zeta_eta\n'), ((3376, 3390), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (3385, 3390), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from tensorflow.keras.preprocessing import image
import os
import glob
import PIL
#PIL.Image.MAX_IMAGE_PIXELS = 933120000
images = "F:/Datasets/DigestPath/mask_npy"
outfolder = "F:/Datasets/DigestPath/masks"
paths = glob.glob(os.path.join(images,"*.npy"))
if not os.path.exists(outfolder):
os.makedirs(outfolder)
ws = []
hs = []
def extract_image(path):
imname = os.path.split(path)[1]
imname = imname.split(".")[0]+".png"
image = np.load(path)
print(image.shape)
w,h = image.shape
matplotlib.image.imsave(os.path.join(outfolder,imname), image)
ws.append(w)
hs.append(h)
for path in paths:
if("neg" in path):
print(path)
extract_image(path) | [
"numpy.load",
"os.makedirs",
"os.path.exists",
"os.path.split",
"os.path.join"
] | [((297, 326), 'os.path.join', 'os.path.join', (['images', '"""*.npy"""'], {}), "(images, '*.npy')\n", (309, 326), False, 'import os\n'), ((335, 360), 'os.path.exists', 'os.path.exists', (['outfolder'], {}), '(outfolder)\n', (349, 360), False, 'import os\n'), ((370, 392), 'os.makedirs', 'os.makedirs', (['outfolder'], {}), '(outfolder)\n', (381, 392), False, 'import os\n'), ((524, 537), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (531, 537), True, 'import numpy as np\n'), ((448, 467), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (461, 467), False, 'import os\n'), ((611, 642), 'os.path.join', 'os.path.join', (['outfolder', 'imname'], {}), '(outfolder, imname)\n', (623, 642), False, 'import os\n')] |
import cv2
import numpy as np
import pytest
from ..utils import vis, display
@pytest.mark.vis
def test_vis_line_scalar_positive():
image = np.zeros((120, 160, 3), np.uint8)
vis_image = vis.vis_line_scalar(image, 0.5)
assert not np.array_equal(vis_image, image)
cv2.imshow("test_vis_line_scalar_positive", vis_image)
@pytest.mark.vis
def test_vis_line_scalar_negative():
image = np.zeros((120, 160, 3), np.uint8)
vis_image = vis.vis_line_scalar(image, -0.5)
assert not np.array_equal(vis_image, image)
cv2.imshow("test_vis_line_scalar_negative", vis_image)
@pytest.mark.vis
def test_vis_steering_positive():
image_data = {
"image": np.zeros((120, 160, 3), np.uint8),
"steering": 1.0,
}
vis_image = vis.vis_steering(image_data)
assert not np.array_equal(vis_image, image_data["image"])
cv2.imshow("test_vis_steering_positive", vis_image)
@pytest.mark.vis
def test_vis_steering_negative():
image_data = {
"image": np.zeros((120, 160, 3), np.uint8),
"steering": -1.0,
}
vis_image = vis.vis_steering(image_data)
assert not np.array_equal(vis_image, image_data["image"])
cv2.imshow("test_vis_steering_negative", vis_image)
@pytest.mark.vis
def test_vis_throttle_positive():
image_data = {
"image": np.zeros((120, 160, 3), np.uint8),
"throttle": 0.5,
}
vis_image = vis.vis_throttle(image_data)
assert not np.array_equal(vis_image, image_data["image"])
cv2.imshow("test_vis_throttle_positive", vis_image)
@pytest.mark.vis
def test_vis_throttle_negative():
image_data = {
"image": np.zeros((120, 160, 3), np.uint8),
"throttle": -0.5,
}
vis_image = vis.vis_throttle(image_data)
assert not np.array_equal(vis_image, image_data["image"])
cv2.imshow("test_vis_throttle_negative", vis_image)
@pytest.mark.vis
def test_vis_speed():
image_data = {
"image": np.zeros((120, 160, 3), np.uint8),
"speed": 13.654321,
}
vis_image = vis.vis_speed(image_data)
assert not np.array_equal(vis_image, image_data["image"])
cv2.imshow("test_vis_speed", vis_image)
@pytest.mark.vis
def test_vis_all():
image_data = {
"image": np.zeros((120, 160, 3), np.uint8),
"speed": 13.654321,
"steering": 0.345,
"throttle": 0.543,
}
vis_image = vis.vis_all(image_data)
assert not np.array_equal(vis_image, image_data["image"])
cv2.imshow("test_vis_all", vis_image)
@pytest.mark.vis
def test_vis_compare():
"""Test the compare() vis function."""
image_data1 = {
"image": np.zeros((120, 160, 3), np.uint8),
"speed": 12.34,
"steering": -0.543,
"throttle": -0.456,
}
image_data2 = {
"image": np.zeros((120, 160, 3), np.uint8),
"speed": 13.654321,
"steering": 0.345,
"throttle": 0.543,
}
vis_image = vis.compare([image_data1, image_data2], image_key="image")
assert vis_image.shape == (120, 320, 3)
cv2.imshow("test_vis_compare", vis_image)
@pytest.mark.vis
def test_vis_display():
"""Test some of the functions from vis combined to display."""
image_data = {
"image": np.zeros((120, 160, 3), np.uint8),
"speed": 12.34,
"steering": -0.543,
"throttle": -0.456,
}
disp = display.Display(
image_data,
["image_disp0", "image_disp1", "image_disp2"],
["image", "image", "image"],
[vis.vis_all, vis.vis_steering, display.identity_transform],
waitKey=0,
)
disp.update()
def test_vis_display_wrong_params():
"""Test whether Display raises an error when giving wrong parameters."""
with pytest.raises(Exception):
# giving on purpose mismatched list length
display.Display(
{"test": "this is a test"},
["image_disp0", "image_disp1"],
[],
[display.identity_transform],
waitKey=0,
)
| [
"numpy.array_equal",
"pytest.raises",
"cv2.imshow",
"numpy.zeros"
] | [((146, 179), 'numpy.zeros', 'np.zeros', (['(120, 160, 3)', 'np.uint8'], {}), '((120, 160, 3), np.uint8)\n', (154, 179), True, 'import numpy as np\n'), ((281, 335), 'cv2.imshow', 'cv2.imshow', (['"""test_vis_line_scalar_positive"""', 'vis_image'], {}), "('test_vis_line_scalar_positive', vis_image)\n", (291, 335), False, 'import cv2\n'), ((404, 437), 'numpy.zeros', 'np.zeros', (['(120, 160, 3)', 'np.uint8'], {}), '((120, 160, 3), np.uint8)\n', (412, 437), True, 'import numpy as np\n'), ((540, 594), 'cv2.imshow', 'cv2.imshow', (['"""test_vis_line_scalar_negative"""', 'vis_image'], {}), "('test_vis_line_scalar_negative', vis_image)\n", (550, 594), False, 'import cv2\n'), ((862, 913), 'cv2.imshow', 'cv2.imshow', (['"""test_vis_steering_positive"""', 'vis_image'], {}), "('test_vis_steering_positive', vis_image)\n", (872, 913), False, 'import cv2\n'), ((1182, 1233), 'cv2.imshow', 'cv2.imshow', (['"""test_vis_steering_negative"""', 'vis_image'], {}), "('test_vis_steering_negative', vis_image)\n", (1192, 1233), False, 'import cv2\n'), ((1501, 1552), 'cv2.imshow', 'cv2.imshow', (['"""test_vis_throttle_positive"""', 'vis_image'], {}), "('test_vis_throttle_positive', vis_image)\n", (1511, 1552), False, 'import cv2\n'), ((1821, 1872), 'cv2.imshow', 'cv2.imshow', (['"""test_vis_throttle_negative"""', 'vis_image'], {}), "('test_vis_throttle_negative', vis_image)\n", (1831, 1872), False, 'import cv2\n'), ((2128, 2167), 'cv2.imshow', 'cv2.imshow', (['"""test_vis_speed"""', 'vis_image'], {}), "('test_vis_speed', vis_image)\n", (2138, 2167), False, 'import cv2\n'), ((2473, 2510), 'cv2.imshow', 'cv2.imshow', (['"""test_vis_all"""', 'vis_image'], {}), "('test_vis_all', vis_image)\n", (2483, 2510), False, 'import cv2\n'), ((3039, 3080), 'cv2.imshow', 'cv2.imshow', (['"""test_vis_compare"""', 'vis_image'], {}), "('test_vis_compare', vis_image)\n", (3049, 3080), False, 'import cv2\n'), ((244, 276), 'numpy.array_equal', 'np.array_equal', (['vis_image', 'image'], {}), '(vis_image, image)\n', (258, 276), True, 'import numpy as np\n'), ((503, 535), 'numpy.array_equal', 'np.array_equal', (['vis_image', 'image'], {}), '(vis_image, image)\n', (517, 535), True, 'import numpy as np\n'), ((684, 717), 'numpy.zeros', 'np.zeros', (['(120, 160, 3)', 'np.uint8'], {}), '((120, 160, 3), np.uint8)\n', (692, 717), True, 'import numpy as np\n'), ((811, 857), 'numpy.array_equal', 'np.array_equal', (['vis_image', "image_data['image']"], {}), "(vis_image, image_data['image'])\n", (825, 857), True, 'import numpy as np\n'), ((1003, 1036), 'numpy.zeros', 'np.zeros', (['(120, 160, 3)', 'np.uint8'], {}), '((120, 160, 3), np.uint8)\n', (1011, 1036), True, 'import numpy as np\n'), ((1131, 1177), 'numpy.array_equal', 'np.array_equal', (['vis_image', "image_data['image']"], {}), "(vis_image, image_data['image'])\n", (1145, 1177), True, 'import numpy as np\n'), ((1323, 1356), 'numpy.zeros', 'np.zeros', (['(120, 160, 3)', 'np.uint8'], {}), '((120, 160, 3), np.uint8)\n', (1331, 1356), True, 'import numpy as np\n'), ((1450, 1496), 'numpy.array_equal', 'np.array_equal', (['vis_image', "image_data['image']"], {}), "(vis_image, image_data['image'])\n", (1464, 1496), True, 'import numpy as np\n'), ((1642, 1675), 'numpy.zeros', 'np.zeros', (['(120, 160, 3)', 'np.uint8'], {}), '((120, 160, 3), np.uint8)\n', (1650, 1675), True, 'import numpy as np\n'), ((1770, 1816), 'numpy.array_equal', 'np.array_equal', (['vis_image', "image_data['image']"], {}), "(vis_image, image_data['image'])\n", (1784, 1816), True, 'import numpy as np\n'), ((1950, 1983), 'numpy.zeros', 'np.zeros', (['(120, 160, 3)', 'np.uint8'], {}), '((120, 160, 3), np.uint8)\n', (1958, 1983), True, 'import numpy as np\n'), ((2077, 2123), 'numpy.array_equal', 'np.array_equal', (['vis_image', "image_data['image']"], {}), "(vis_image, image_data['image'])\n", (2091, 2123), True, 'import numpy as np\n'), ((2243, 2276), 'numpy.zeros', 'np.zeros', (['(120, 160, 3)', 'np.uint8'], {}), '((120, 160, 3), np.uint8)\n', (2251, 2276), True, 'import numpy as np\n'), ((2422, 2468), 'numpy.array_equal', 'np.array_equal', (['vis_image', "image_data['image']"], {}), "(vis_image, image_data['image'])\n", (2436, 2468), True, 'import numpy as np\n'), ((2634, 2667), 'numpy.zeros', 'np.zeros', (['(120, 160, 3)', 'np.uint8'], {}), '((120, 160, 3), np.uint8)\n', (2642, 2667), True, 'import numpy as np\n'), ((2792, 2825), 'numpy.zeros', 'np.zeros', (['(120, 160, 3)', 'np.uint8'], {}), '((120, 160, 3), np.uint8)\n', (2800, 2825), True, 'import numpy as np\n'), ((3227, 3260), 'numpy.zeros', 'np.zeros', (['(120, 160, 3)', 'np.uint8'], {}), '((120, 160, 3), np.uint8)\n', (3235, 3260), True, 'import numpy as np\n'), ((3727, 3751), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (3740, 3751), False, 'import pytest\n')] |
import numpy
from btypes.big_endian import *
import gl
import gx
from OpenGL.GL import *
import logging
logger = logging.getLogger(__name__)
class Header(Struct):
magic = ByteString(4)
section_size = uint32
shape_count = uint16
__padding__ = Padding(2)
shape_offset = uint32
index_offset = uint32
unknown0_offset = uint32
attribute_descriptor_offset = uint32
matrix_index_offset = uint32
packet_offset = uint32
matrix_selection_offset = uint32
packet_location_offset = uint32
def __init__(self):
self.magic = b'SHP1'
@classmethod
def unpack(cls,stream):
header = super().unpack(stream)
if header.magic != b'SHP1':
raise FormatError('invalid magic')
if header.unknown0_offset != 0:
logger.warning('unknown0_offset different from default')
return header
class AttributeDescriptor(Struct):
"""Arguments to GXSetVtxDesc."""
attribute = EnumConverter(uint32,gx.Attribute)
input_type = EnumConverter(uint32,gx.InputType)
def __init__(self,attribute,input_type):
self.attribute = attribute
self.input_type = input_type
def field(self):
if self.attribute == gx.VA_PTNMTXIDX and self.input_type == gx.DIRECT:
return (gx.VA_PTNMTXIDX.name,numpy.uint8)
if self.input_type == gx.INDEX8:
return (self.attribute.name,numpy.uint8)
if self.input_type == gx.INDEX16:
return (self.attribute.name,numpy.uint16)
raise ValueError('invalid attribute descriptor')
class AttributeDescriptorList(TerminatedList):
element_type = AttributeDescriptor
terminator_value = element_type(gx.VA_NULL,gx.NONE)
@staticmethod
def terminator_predicate(element):
return element.attribute == gx.VA_NULL
class MatrixSelection(Struct):
unknown0 = uint16 # position/normal matrix for texture matrices?
count = uint16
first = uint32
class PacketLocation(Struct):
size = uint32
offset = uint32
class Primitive:
def __init__(self,primitive_type,vertices):
self.primitive_type = primitive_type
self.vertices = vertices
class Batch:
def __init__(self,primitives,matrix_table,unknown0):
self.primitives = primitives
self.matrix_table = matrix_table
self.unknown0 = unknown0
def gl_count_triangles(shape):
triangle_count = 0
for primitive in shape.primitives:
if primitive.primitive_type == gx.TRIANGLES:
triangle_count += len(primitive.vertices)//3
elif primitive.primitive_type == gx.TRIANGLESTRIP:
triangle_count += len(primitive.vertices) - 2
elif primitive.primitive_type == gx.TRIANGLEFAN:
triangle_count += len(primitive.vertices) - 2
elif primitive.primitive_type == gx.QUADS:
triangle_count += len(primitive.vertices)//2
else:
raise ValueError('invalid primitive type')
return triangle_count
def gl_create_element_array(shape,element_map,element_count):
element_array = numpy.empty(element_count,numpy.uint16)
element_index = 0
vertex_index = 0
for primitive in shape.primitives:
if primitive.primitive_type == gx.TRIANGLES:
for i in range(len(primitive.vertices)//3):
element_array[element_index + 0] = element_map[vertex_index + 3*i + 0]
element_array[element_index + 1] = element_map[vertex_index + 3*i + 2]
element_array[element_index + 2] = element_map[vertex_index + 3*i + 1]
element_index += 3
elif primitive.primitive_type == gx.TRIANGLESTRIP:
for i in range(len(primitive.vertices) - 2):
element_array[element_index + 0] = element_map[vertex_index + i + 1 - (i % 2)]
element_array[element_index + 1] = element_map[vertex_index + i + (i % 2)]
element_array[element_index + 2] = element_map[vertex_index + i + 2]
element_index += 3
elif primitive.primitive_type == gx.TRIANGLEFAN:
for i in range(len(primitive.vertices) - 2):
element_array[element_index + 0] = element_map[vertex_index]
element_array[element_index + 1] = element_map[vertex_index + i + 2]
element_array[element_index + 2] = element_map[vertex_index + i + 1]
element_index += 3
elif primitive.primitive_type == gx.QUADS:
for i in range(0,len(primitive.vertices)//4,4):
element_array[element_index + 0] = element_map[vertex_index + i]
element_array[element_index + 1] = element_map[vertex_index + i + 1]
element_array[element_index + 2] = element_map[vertex_index + i + 2]
element_array[element_index + 3] = element_map[vertex_index + i + 1]
element_array[element_index + 4] = element_map[vertex_index + i + 3]
element_array[element_index + 5] = element_map[vertex_index + i + 2]
element_index += 6
else:
raise ValueError('invalid primitive type')
vertex_index += len(primitive.vertices)
return element_array
class Shape(Struct):
transformation_type = uint8
__padding__ = Padding(1)
batch_count = uint16
attribute_descriptor_offset = uint16
first_matrix_selection = uint16
first_packet = uint16
__padding__ = Padding(2)
bounding_radius = float32
min_x = float32
min_y = float32
min_z = float32
max_x = float32
max_y = float32
max_z = float32
def __init__(self):
self.transformation_type = 0
@property
def attributes(self):
for descriptor in self.attribute_descriptors:
yield descriptor.attribute
@property
def primitives(self):
for batch in self.batches:
yield from batch.primitives
@classmethod
def pack(cls,stream,shape):
shape.batch_count = len(shape.batches)
super().pack(stream,shape)
def create_vertex_type(self):
return numpy.dtype([descriptor.field() for descriptor in self.attribute_descriptors]).newbyteorder('>')
def gl_init(self,array_table):
self.gl_hide = False
self.gl_vertex_array = gl.VertexArray()
glBindVertexArray(self.gl_vertex_array)
self.gl_vertex_buffer = gl.Buffer()
glBindBuffer(GL_ARRAY_BUFFER,self.gl_vertex_buffer)
self.gl_element_count = 3*gl_count_triangles(self)
self.gl_element_buffer = gl.Buffer()
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,self.gl_element_buffer)
vertex_type = numpy.dtype([array_table[attribute].field() for attribute in self.attributes])
vertex_count = sum(len(primitive.vertices) for primitive in self.primitives)
vertex_array = numpy.empty(vertex_count,vertex_type)
for attribute in self.attributes:
array_table[attribute].load(self,vertex_array)
vertex_array,element_map = numpy.unique(vertex_array,return_inverse=True)
element_array = gl_create_element_array(self,element_map,self.gl_element_count)
glBufferData(GL_ARRAY_BUFFER,vertex_array.nbytes,vertex_array,GL_STATIC_DRAW)
glBufferData(GL_ELEMENT_ARRAY_BUFFER,element_array.nbytes,element_array,GL_STATIC_DRAW)
def gl_bind(self):
glBindVertexArray(self.gl_vertex_array)
def gl_draw(self):
glDrawElements(GL_TRIANGLES,self.gl_element_count,GL_UNSIGNED_SHORT,None)
def pack_packet(stream,primitives):
for primitive in primitives:
uint8.pack(stream,primitive.primitive_type)
uint16.pack(stream,len(primitive.vertices))
primitive.vertices.tofile(stream)
align(stream,0x20,b'\x00')
def unpack_packet(stream,vertex_type,size):
# The entire packet is read into memory at once for speed
packet = stream.read(size)
primitives = []
i = 0
while i < size:
opcode = packet[i]
if opcode == 0x00:
i += 1
continue
primitive_type = gx.PrimitiveType(opcode)
vertex_count = uint16.unpack_from(packet,i + 1)
vertices = numpy.frombuffer(packet,vertex_type,vertex_count,i + 3)
primitives.append(Primitive(primitive_type,vertices))
i += 3 + vertex_count*vertex_type.itemsize
return primitives
class Pool:
def __init__(self):
self.keys = []
self.values = []
def __contains__(self,key):
return key in self.keys
def __missing__(self,key):
raise KeyError(key)
def __getitem__(self,key):
try:
return self.values[self.keys.index(key)]
except ValueError:
return self.__missing__(key)
def __setitem__(self,key,value):
try:
self.values[self.keys.index(key)] = value
except ValueError:
self.keys.append(key)
self.values.append(value)
class CachedOffsetPacker:
def __init__(self,stream,pack_function,base=0,default_offset_table=None):
self.stream = stream
self.pack_function = pack_function
self.base = base
self.offset_table = default_offset_table if default_offset_table is not None else {}
def __call__(self,*args):
if args in self.offset_table:
return self.offset_table[args]
offset = self.stream.tell() - self.base
self.pack_function(self.stream,*args)
self.offset_table[args] = offset
return offset
class CachedOffsetUnpacker:
def __init__(self,stream,unpack_function,base=0):
self.stream = stream
self.unpack_function = unpack_function
self.base = base
self.argument_table = {}
self.value_table = {}
def __call__(self,offset,*args):
if offset in self.value_table:
if args != self.argument_table[offset]:
raise ValueError('inconsistent arguments for same offset')
return self.value_table[offset]
self.stream.seek(self.base + offset)
value = self.unpack_function(self.stream,*args)
self.argument_table[offset] = args
self.value_table[offset] = value
return value
def pack(stream,shapes):
base = stream.tell()
header = Header()
header.shape_count = len(shapes)
stream.write(b'\x00'*Header.sizeof())
header.shape_offset = stream.tell() - base
stream.write(b'\x00'*Shape.sizeof()*len(shapes))
header.index_offset = stream.tell() - base
for index in range(len(shapes)):
uint16.pack(stream,index)
align(stream,4)
header.unknown0_offset = 0
align(stream,0x20)
header.attribute_descriptor_offset = stream.tell() - base
pack_attribute_descriptors = CachedOffsetPacker(stream,AttributeDescriptorList.pack,stream.tell(),Pool())
for shape in shapes:
shape.attribute_descriptor_offset = pack_attribute_descriptors(shape.attribute_descriptors)
matrix_indices = []
matrix_selections = []
packet_locations = []
for shape in shapes:
shape.first_matrix_selection = len(matrix_selections)
for batch in shape.batches:
matrix_selection = MatrixSelection()
matrix_selection.unknown0 = batch.unknown0
matrix_selection.first = len(matrix_indices)
matrix_selection.count = len(batch.matrix_table)
matrix_indices.extend(batch.matrix_table)
matrix_selections.append(matrix_selection)
header.matrix_index_offset = stream.tell() - base
for matrix_index in matrix_indices:
uint16.pack(stream,matrix_index)
align(stream,0x20)
header.packet_offset = stream.tell() - base
for shape in shapes:
shape.first_packet_location = len(packet_locations)
for batch in shape.batches:
packet_location = PacketLocation()
packet_location.offset = stream.tell() - header.packet_offset - base
pack_packet(stream,batch.primitives)
packet_location.size = stream.tell() - packet_location.offset - header.packet_offset - base
packet_locations.append(packet_location)
header.matrix_selection_offset = stream.tell() - base
for matrix_selection in matrix_selections:
MatrixSelection.pack(stream,matrix_selection)
header.packet_location_offset = stream.tell() - base
for packet_location in packet_locations:
PacketLocation.pack(stream,packet_location)
align(stream,0x20)
header.section_size = stream.tell() - base
stream.seek(base)
Header.pack(stream,header)
stream.seek(base + header.shape_offset)
for shape in shapes:
Shape.pack(stream,shape)
stream.seek(base + header.section_size)
def unpack(stream):
base = stream.tell()
header = Header.unpack(stream)
stream.seek(base + header.shape_offset)
shapes = [Shape.unpack(stream) for _ in range(header.shape_count)]
stream.seek(base + header.index_offset)
for index in range(header.shape_count):
if index != uint16.unpack(stream):
raise FormatError('invalid index')
unpack_attribute_descriptors = CachedOffsetUnpacker(stream,AttributeDescriptorList.unpack,base + header.attribute_descriptor_offset)
for shape in shapes:
shape.attribute_descriptors = unpack_attribute_descriptors(shape.attribute_descriptor_offset)
stream.seek(base + header.matrix_selection_offset)
matrix_selection_count = max(shape.first_matrix_selection + shape.batch_count for shape in shapes)
matrix_selections = [MatrixSelection.unpack(stream) for _ in range(matrix_selection_count)]
stream.seek(base + header.matrix_index_offset)
matrix_index_count = max(selection.first + selection.count for selection in matrix_selections)
matrix_indices = [uint16.unpack(stream) for _ in range(matrix_index_count)]
stream.seek(base + header.packet_location_offset)
packet_count = max(shape.first_packet + shape.batch_count for shape in shapes)
packet_locations = [PacketLocation.unpack(stream) for _ in range(packet_count)]
for shape in shapes:
vertex_type = shape.create_vertex_type()
shape.batches = [None]*shape.batch_count
for i in range(shape.batch_count):
matrix_selection = matrix_selections[shape.first_matrix_selection + i]
matrix_table = matrix_indices[matrix_selection.first:matrix_selection.first + matrix_selection.count]
packet_location = packet_locations[shape.first_packet + i]
stream.seek(base + header.packet_offset + packet_location.offset)
primitives = unpack_packet(stream,vertex_type,packet_location.size)
shape.batches[i] = Batch(primitives,matrix_table,matrix_selection.unknown0)
stream.seek(base + header.section_size)
return shapes
| [
"gx.PrimitiveType",
"gl.Buffer",
"numpy.empty",
"numpy.frombuffer",
"logging.getLogger",
"gl.VertexArray",
"numpy.unique"
] | [((114, 141), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (131, 141), False, 'import logging\n'), ((3110, 3150), 'numpy.empty', 'numpy.empty', (['element_count', 'numpy.uint16'], {}), '(element_count, numpy.uint16)\n', (3121, 3150), False, 'import numpy\n'), ((6340, 6356), 'gl.VertexArray', 'gl.VertexArray', ([], {}), '()\n', (6354, 6356), False, 'import gl\n'), ((6438, 6449), 'gl.Buffer', 'gl.Buffer', ([], {}), '()\n', (6447, 6449), False, 'import gl\n'), ((6603, 6614), 'gl.Buffer', 'gl.Buffer', ([], {}), '()\n', (6612, 6614), False, 'import gl\n'), ((6895, 6933), 'numpy.empty', 'numpy.empty', (['vertex_count', 'vertex_type'], {}), '(vertex_count, vertex_type)\n', (6906, 6933), False, 'import numpy\n'), ((7071, 7118), 'numpy.unique', 'numpy.unique', (['vertex_array'], {'return_inverse': '(True)'}), '(vertex_array, return_inverse=True)\n', (7083, 7118), False, 'import numpy\n'), ((8125, 8149), 'gx.PrimitiveType', 'gx.PrimitiveType', (['opcode'], {}), '(opcode)\n', (8141, 8149), False, 'import gx\n'), ((8225, 8283), 'numpy.frombuffer', 'numpy.frombuffer', (['packet', 'vertex_type', 'vertex_count', '(i + 3)'], {}), '(packet, vertex_type, vertex_count, i + 3)\n', (8241, 8283), False, 'import numpy\n')] |
import argparse
import torch
import torch.nn.functional as F
import librosa
import numpy
import tqdm
import soundfile as sf
from net import UpsampleNet
from net import WaveNet
from dataset import MuLaw
from dataset import Preprocess
import pytorch_lightning as pl
class WavenetLightningModule(pl.LightningModule):
def __init__(self, n_loop, n_layer, a_channels, r_channels, s_channels, use_embed_tanh):
super().__init__()
self.a_channels = a_channels
self.encoder = UpsampleNet(
n_loop*n_layer,
r_channels)
self.decoder = WaveNet(
n_loop,
n_layer,
a_channels,
r_channels,
s_channels,
use_embed_tanh)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input', '-i', required=True, help='input file')
parser.add_argument('--output', '-o', default='result.wav', help='output file')
parser.add_argument('--model', '-m', required=True, help='snapshot of trained model')
args = parser.parse_args()
# Load trained parameters
model = WavenetLightningModule.load_from_checkpoint(args.model)
model.eval()
# Preprocess
_, conditions, _ = Preprocess(
sr=16000, n_fft=1024, hop_length=256, n_mels=128, top_db=20,
length=None, quantize=model.a_channels)(args.input)
conditions = torch.Tensor(conditions).unsqueeze(0)
# Non-autoregressive generate
encoded_conditions = model.encoder(conditions)
# Autoregressive generate
model.decoder.initialize(1)
x = torch.zeros((1, model.a_channels, 1), dtype=torch.float32)
output = numpy.zeros(encoded_conditions.size(3), dtype=numpy.float32)
for i in tqdm.tqdm(range(len(output))):
with torch.no_grad():
out = model.decoder.generate(x, encoded_conditions[:, :, :, i:i + 1])
p = F.softmax(out, dim=1).detach().numpy()[0, :, 0]
value = numpy.random.choice(model.a_channels, size=1, p=p)[0]
x = torch.zeros_like(x)
x[:, value, :] = 1
output[i] = value
# Save
wave = MuLaw(model.a_channels).itransform(output)
sf.write(args.output, wave, 16000)
| [
"numpy.random.choice",
"dataset.Preprocess",
"argparse.ArgumentParser",
"torch.zeros_like",
"dataset.MuLaw",
"net.UpsampleNet",
"torch.nn.functional.softmax",
"torch.Tensor",
"net.WaveNet",
"soundfile.write",
"torch.zeros",
"torch.no_grad"
] | [((777, 802), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (800, 802), False, 'import argparse\n'), ((1593, 1651), 'torch.zeros', 'torch.zeros', (['(1, model.a_channels, 1)'], {'dtype': 'torch.float32'}), '((1, model.a_channels, 1), dtype=torch.float32)\n', (1604, 1651), False, 'import torch\n'), ((2167, 2201), 'soundfile.write', 'sf.write', (['args.output', 'wave', '(16000)'], {}), '(args.output, wave, 16000)\n', (2175, 2201), True, 'import soundfile as sf\n'), ((498, 539), 'net.UpsampleNet', 'UpsampleNet', (['(n_loop * n_layer)', 'r_channels'], {}), '(n_loop * n_layer, r_channels)\n', (509, 539), False, 'from net import UpsampleNet\n'), ((586, 662), 'net.WaveNet', 'WaveNet', (['n_loop', 'n_layer', 'a_channels', 'r_channels', 's_channels', 'use_embed_tanh'], {}), '(n_loop, n_layer, a_channels, r_channels, s_channels, use_embed_tanh)\n', (593, 662), False, 'from net import WaveNet\n'), ((1240, 1355), 'dataset.Preprocess', 'Preprocess', ([], {'sr': '(16000)', 'n_fft': '(1024)', 'hop_length': '(256)', 'n_mels': '(128)', 'top_db': '(20)', 'length': 'None', 'quantize': 'model.a_channels'}), '(sr=16000, n_fft=1024, hop_length=256, n_mels=128, top_db=20,\n length=None, quantize=model.a_channels)\n', (1250, 1355), False, 'from dataset import Preprocess\n'), ((2024, 2043), 'torch.zeros_like', 'torch.zeros_like', (['x'], {}), '(x)\n', (2040, 2043), False, 'import torch\n'), ((1398, 1422), 'torch.Tensor', 'torch.Tensor', (['conditions'], {}), '(conditions)\n', (1410, 1422), False, 'import torch\n'), ((1783, 1798), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1796, 1798), False, 'import torch\n'), ((1958, 2008), 'numpy.random.choice', 'numpy.random.choice', (['model.a_channels'], {'size': '(1)', 'p': 'p'}), '(model.a_channels, size=1, p=p)\n', (1977, 2008), False, 'import numpy\n'), ((2120, 2143), 'dataset.MuLaw', 'MuLaw', (['model.a_channels'], {}), '(model.a_channels)\n', (2125, 2143), False, 'from dataset import MuLaw\n'), ((1894, 1915), 'torch.nn.functional.softmax', 'F.softmax', (['out'], {'dim': '(1)'}), '(out, dim=1)\n', (1903, 1915), True, 'import torch.nn.functional as F\n')] |
# 評価関数による評価
import numpy as np
import copy
from .generator import Generator
from .standard_data import StandardData
from .leave_one_out import LeaveOneOut
from prediction import Prediction
class CrossValidation(object):
def __init__(self, data, design_variables, objective_variables):
# CrossValidation を使うときは引数が1つ増えるので注意
self._data = data # テストデータ
self._design_variables = design_variables
self._objective_variables = objective_variables
def evaluate(self, individual_set):
"""constractor
Args :
individual_set (np.array) : 個体の2次元配列
data : 訓練データの2次元配列
Returns :
np.array(1次元配列):評価値配列
"""
self._individual_set = copy.deepcopy(individual_set)
# 設計変数の個数指定
design_variables = self._design_variables
# 目的関数の個数指定
objective_variables = self._objective_variables
#テストデータのN数を取得
data_size = self._data.shape[0]
individual_set = self._individual_set
# individual_setの行数(個体数)を取得
size = individual_set.shape[0]
# テストデータの設計変数と目的関数を取得
design = np.array(self._data[0:, 0:design_variables])
object = np.array(self._data[0:, design_variables-1:-1])
evaluate_set = np.array([], dtype = np.float64)
for i in range(size):
x = LeaveOneOut(individual_set[i, 0], individual_set[i, 1], individual_set[i, 2], individual_set[i, 3], design, object)
result = x.cross_validation()
evaluate_set = np.append(evaluate_set, result)
return evaluate_set
class CrowdingDistance(object):
def __init__(self, design_data, ndesign, nobject, hyper_set, alpha_vector):
self._design_data = design_data
self._ndesign = ndesign
self._nobject = nobject
self._hyper_set = hyper_set
self._alpha_vector = alpha_vector
def evaluate(self, individual_set):
'''予測値を計算してランク付けと混雑度ソート
'''
design_data = self._design_data
hyper_set = self._hyper_set
alpha_vector = self._alpha_vector
# individual_setの行数(個体数)を取得
size = individual_set.shape[0]
# 予測値の計算
data = Prediction(size, hyper_set, design_data, individual_set, alpha_vector)
predict_value = data.predict()
# 非優越ソートによるランキング
# rank用,混雑度用の列を追加
rank = np.ones((size, 1))
crowd = np.zeros((size, 1))
mat = np.append(predict_value, rank, axis=1)
mat = np.append(mat, crowd, axis=1)
# rankの計算
for i in range(size):
for j in range(size):
if mat[i, 0]>mat[j, 0] and mat[i, 1]>mat[j, 1]:
mat[i, 2] = mat[i, 2] + 1
# rankでソート
mat = mat[np.argsort(mat[:, 2])]
# rankの最大値を取得
max_rank = int(np.max(mat, axis=0)[2])
# 各目的関数の最大・最小値を取得
max1 = np.max(mat, axis=0)[0]
max2 = np.max(mat, axis=0)[1]
min1 = np.min(mat, axis=0)[0]
min2 = np.min(mat, axis=0)[1]
nrank = np.array([], dtype = np.int64)
sort_mat = np.array([], dtype = np.float64)
# rank毎に混雑度を計算
for i in range(1, max_rank+1):
# 各rankの個体数を取得
count = np.count_nonzero(mat == i, axis=0)[2]
# rank毎に混雑度を計算
if count == 0:
continue
else:
# 同一rankの行を抽出
mat_temp = mat[np.any(mat==i, axis=1)]
# 同一rank内で1個めの目的関数でソート
mat1 = mat_temp[np.argsort(mat_temp[:, 0])]
# 混雑度距離を計算
if count >= 3:
for j in range(count):
# 境界個体に対して最大距離を与える
if j == 0 or j == count - 1:
mat1[j, 3] = 10**10
# 境界個体以外に対して混雑度距離を計算する
else:
mat1[j, 3] = (mat1[j+1, 0] - mat1[j-1, 0])/(max1 - min1)
else:
for j in range(count):
mat1[j, 3] = 10**10
# 同一rank内で2個めの目的関数でソート
mat2 = mat1[np.argsort(mat1[:, 1])]
# 混雑度距離を計算
if count >= 3:
for j in range(count):
if j == 0 or j == count - 1:
mat2[j, 3] = mat2[j, 3] + 10**10
else:
mat2[j, 3] = mat2[j, 3] + (mat2[j+1, 0] - mat2[j-1, 0])/(max2 - min2)
else:
for j in range(count):
mat2[j, 3] = mat2[j, 3] + 10**10
sort_mat = np.append(sort_mat, mat2)
# 1次元配列を2次元配列に変換
sort_mat = sort_mat.reshape([size, -1])
evaluate_set = np.array([], dtype = np.float64)
evaluate_set = sort_mat[:, 2:4]
return evaluate_set
| [
"copy.deepcopy",
"numpy.count_nonzero",
"numpy.zeros",
"numpy.ones",
"numpy.argsort",
"numpy.append",
"numpy.max",
"numpy.min",
"numpy.array",
"numpy.any",
"prediction.Prediction"
] | [((738, 767), 'copy.deepcopy', 'copy.deepcopy', (['individual_set'], {}), '(individual_set)\n', (751, 767), False, 'import copy\n'), ((1151, 1195), 'numpy.array', 'np.array', (['self._data[0:, 0:design_variables]'], {}), '(self._data[0:, 0:design_variables])\n', (1159, 1195), True, 'import numpy as np\n'), ((1213, 1262), 'numpy.array', 'np.array', (['self._data[0:, design_variables - 1:-1]'], {}), '(self._data[0:, design_variables - 1:-1])\n', (1221, 1262), True, 'import numpy as np\n'), ((1285, 1315), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float64'}), '([], dtype=np.float64)\n', (1293, 1315), True, 'import numpy as np\n'), ((2236, 2306), 'prediction.Prediction', 'Prediction', (['size', 'hyper_set', 'design_data', 'individual_set', 'alpha_vector'], {}), '(size, hyper_set, design_data, individual_set, alpha_vector)\n', (2246, 2306), False, 'from prediction import Prediction\n'), ((2414, 2432), 'numpy.ones', 'np.ones', (['(size, 1)'], {}), '((size, 1))\n', (2421, 2432), True, 'import numpy as np\n'), ((2449, 2468), 'numpy.zeros', 'np.zeros', (['(size, 1)'], {}), '((size, 1))\n', (2457, 2468), True, 'import numpy as np\n'), ((2483, 2521), 'numpy.append', 'np.append', (['predict_value', 'rank'], {'axis': '(1)'}), '(predict_value, rank, axis=1)\n', (2492, 2521), True, 'import numpy as np\n'), ((2536, 2565), 'numpy.append', 'np.append', (['mat', 'crowd'], {'axis': '(1)'}), '(mat, crowd, axis=1)\n', (2545, 2565), True, 'import numpy as np\n'), ((3085, 3113), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int64'}), '([], dtype=np.int64)\n', (3093, 3113), True, 'import numpy as np\n'), ((3135, 3165), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float64'}), '([], dtype=np.float64)\n', (3143, 3165), True, 'import numpy as np\n'), ((4824, 4854), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float64'}), '([], dtype=np.float64)\n', (4832, 4854), True, 'import numpy as np\n'), ((1549, 1580), 'numpy.append', 'np.append', (['evaluate_set', 'result'], {}), '(evaluate_set, result)\n', (1558, 1580), True, 'import numpy as np\n'), ((2796, 2817), 'numpy.argsort', 'np.argsort', (['mat[:, 2]'], {}), '(mat[:, 2])\n', (2806, 2817), True, 'import numpy as np\n'), ((2931, 2950), 'numpy.max', 'np.max', (['mat'], {'axis': '(0)'}), '(mat, axis=0)\n', (2937, 2950), True, 'import numpy as np\n'), ((2969, 2988), 'numpy.max', 'np.max', (['mat'], {'axis': '(0)'}), '(mat, axis=0)\n', (2975, 2988), True, 'import numpy as np\n'), ((3007, 3026), 'numpy.min', 'np.min', (['mat'], {'axis': '(0)'}), '(mat, axis=0)\n', (3013, 3026), True, 'import numpy as np\n'), ((3045, 3064), 'numpy.min', 'np.min', (['mat'], {'axis': '(0)'}), '(mat, axis=0)\n', (3051, 3064), True, 'import numpy as np\n'), ((2865, 2884), 'numpy.max', 'np.max', (['mat'], {'axis': '(0)'}), '(mat, axis=0)\n', (2871, 2884), True, 'import numpy as np\n'), ((3278, 3312), 'numpy.count_nonzero', 'np.count_nonzero', (['(mat == i)'], {'axis': '(0)'}), '(mat == i, axis=0)\n', (3294, 3312), True, 'import numpy as np\n'), ((4701, 4726), 'numpy.append', 'np.append', (['sort_mat', 'mat2'], {}), '(sort_mat, mat2)\n', (4710, 4726), True, 'import numpy as np\n'), ((3475, 3499), 'numpy.any', 'np.any', (['(mat == i)'], {'axis': '(1)'}), '(mat == i, axis=1)\n', (3481, 3499), True, 'import numpy as np\n'), ((3570, 3596), 'numpy.argsort', 'np.argsort', (['mat_temp[:, 0]'], {}), '(mat_temp[:, 0])\n', (3580, 3596), True, 'import numpy as np\n'), ((4183, 4205), 'numpy.argsort', 'np.argsort', (['mat1[:, 1]'], {}), '(mat1[:, 1])\n', (4193, 4205), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import numpy as np
from typing import Callable
from scipy.integrate import nquad
from .entropy import coupled_entropy
def shannon_entropy(density_func: Callable[..., np.ndarray],
dim: int = 1,
support: list = [[-np.inf, np.inf]],
root = False
) -> [float, np.ndarray]:
"""
Add description here.
Parameters
----------
dist : TYPE
DESCRIPTION.
dx : float
DESCRIPTION.
dim : int, optional
DESCRIPTION. The default is 1.
root : bool, optional
DESCRIPTION. The default is False.
Returns
-------
TYPE
DESCRIPTION.
"""
if root:
alpha = 2
else:
alpha = 1
return coupled_entropy(density_func,
kappa=0.0,
alpha=alpha,
dim=dim,
support=support,
root=root
)
def tsallis_entropy(density_func: Callable[..., np.ndarray],
kappa: float = 0.0,
alpha: int = 1,
dim: int = 1,
support: list = [(-np.inf, np.inf)],
normalize = False,
root = False
) -> [float, np.ndarray]:
"""
Add description here.
Parameters
----------
dist : TYPE
DESCRIPTION.
kappa : TYPE
DESCRIPTION.
dx : TYPE
DESCRIPTION.
alpha : TYPE, optional
DESCRIPTION. The default is 1.
dim : TYPE, optional
DESCRIPTION. The default is 1.
normalize : bool, optional
DESCRIPTION. The default is False.
root : False, optional
DESCRIPTION. The default is False.
Returns
-------
None.
"""
if normalize:
entropy = (1+kappa)**(1/alpha) * coupled_entropy(density_func,
kappa=kappa,
alpha=alpha,
dim=dim,
support=support,
root=root
)
else:
def un_normalized_density_func(*args):
if dim == 1:
x = np.array(args)
else:
x = np.array([args]).reshape(1, dim)
return density_func(x)**(1+(alpha*kappa/(1+kappa)))
entropy = (nquad(un_normalized_density_func, support)[0]
* (1+kappa)**(1/alpha)
* coupled_entropy(density_func,
kappa=kappa,
alpha=alpha,
dim=dim,
support=support,
root=root
)
)
return entropy | [
"numpy.array",
"scipy.integrate.nquad"
] | [((2476, 2490), 'numpy.array', 'np.array', (['args'], {}), '(args)\n', (2484, 2490), True, 'import numpy as np\n'), ((2646, 2688), 'scipy.integrate.nquad', 'nquad', (['un_normalized_density_func', 'support'], {}), '(un_normalized_density_func, support)\n', (2651, 2688), False, 'from scipy.integrate import nquad\n'), ((2529, 2545), 'numpy.array', 'np.array', (['[args]'], {}), '([args])\n', (2537, 2545), True, 'import numpy as np\n')] |
from ctapipe.utils.datasets import get_example_simtelarray_file
from ctapipe.io.hessio import hessio_event_source
from ctapipe.core import Container
from ctapipe.io.containers import RawData
from ctapipe.io.containers import MCShowerData, CentralTriggerData
from ctapipe.reco.cleaning import tailcuts_clean
from ctapipe import io
from astropy.coordinates import Angle, AltAz
from astropy.time import Time
from ctapipe.instrument import mc_config as ID
from ctapipe.coordinates import CameraFrame, NominalFrame
from ctapipe.calib.array.muon_ring_finder import chaudhuri_kundu_circle_fit
from ctapipe.calib.array.muon_integrator import *
from ctapipe import visualization
import matplotlib.pyplot as plt
from astropy import units as u
import numpy as np
import pyhessio
import time
import logging
import argparse
logging.basicConfig(level=logging.DEBUG)
def get_mc_calibration_coeffs(tel_id):
"""
Get the calibration coefficients from the MC data file to the
data. This is ahack (until we have a real data structure for the
calibrated data), it should move into `ctapipe.io.hessio_event_source`.
returns
-------
(peds,gains) : arrays of the pedestal and pe/dc ratios.
"""
peds = pyhessio.get_pedestal(tel_id)[0]
gains = pyhessio.get_calibration(tel_id)[0]
return peds, gains
def apply_mc_calibration(adcs, tel_id):
"""
apply basic calibration
"""
peds, gains = get_mc_calibration_coeffs(tel_id)
return (adcs - peds) * gains
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Perform simple Hillas Reco')
parser.add_argument('filename', metavar='EVENTIO_FILE', nargs='?',
default=get_example_simtelarray_file())
args = parser.parse_args()
source = hessio_event_source(args.filename)
container = Container("hessio_container")
container.meta.add_item('pixel_pos', dict())
container.add_item("dl0", RawData())
container.add_item("mc", MCShowerData())
container.add_item("trig", CentralTriggerData())
container.add_item("count")
tel,cam,opt = ID.load(filename=args.filename)
ev = 0
efficiency = list()
efficiency.append(list())
efficiency.append(list())
efficiency.append(list())
efficiency.append(list())
impact = list()
geom = 0
for event in source:
container.dl0.tels_with_data = set(pyhessio.get_teldata_list())
container.trig.tels_with_trigger \
= pyhessio.get_central_event_teltrg_list()
time_s, time_ns = pyhessio.get_central_event_gps_time()
container.trig.gps_time = Time(time_s * u.s, time_ns * u.ns,
format='gps', scale='utc')
container.mc.energy = pyhessio.get_mc_shower_energy() * u.TeV
container.mc.alt = Angle(pyhessio.get_mc_shower_altitude(), u.rad)
container.mc.az = Angle(pyhessio.get_mc_shower_azimuth(), u.rad)
container.mc.core_x = pyhessio.get_mc_event_xcore() * u.m
container.mc.core_y = pyhessio.get_mc_event_ycore() * u.m
# this should be done in a nicer way to not re-allocate the
# data each time (right now it's just deleted and garbage
# collected)
container.dl0.tel = dict() # clear the previous telescopes
table = "CameraTable_VersionFeb2016_TelID"
for tel_id in container.dl0.tels_with_data:
x, y = event.meta.pixel_pos[tel_id]
if geom == 0:
geom = io.CameraGeometry.guess(x, y,event.meta.optical_foclen[tel_id])
image = apply_mc_calibration(event.dl0.tel[tel_id].adc_sums[0], tel_id)
if image.shape[0] >1000:
continue
clean_mask = tailcuts_clean(geom,image,1,picture_thresh=5,boundary_thresh=7)
camera_coord = CameraFrame(x=x,y=y,z=np.zeros(x.shape)*u.m)
nom_coord = camera_coord.transform_to(NominalFrame(array_direction=[container.mc.alt,container.mc.az],
pointing_direction=[container.mc.alt,container.mc.az],
focal_length=tel['TelescopeTable_VersionFeb2016'][tel['TelescopeTable_VersionFeb2016']['TelID']==tel_id]['FL'][0]*u.m))
x = nom_coord.x.to(u.deg)
y = nom_coord.y.to(u.deg)
img = image*clean_mask
noise = 5
weight = img / (img+noise)
centre_x,centre_y,radius = chaudhuri_kundu_circle_fit(x,y,image*clean_mask)
dist = np.sqrt(np.power(x-centre_x,2) + np.power(y-centre_y,2))
ring_dist = np.abs(dist-radius)
centre_x,centre_y,radius = chaudhuri_kundu_circle_fit(x,y,image*(ring_dist<radius*0.3))
dist = np.sqrt(np.power(x-centre_x,2) + np.power(y-centre_y,2))
ring_dist = np.abs(dist-radius)
centre_x,centre_y,radius = chaudhuri_kundu_circle_fit(x,y,image*(ring_dist<radius*0.3))
dist_mask = np.abs(dist-radius)<radius*0.4
#print (centre_x,centre_y,radius)
rad = list()
cx = list()
cy = list()
mc_x = container.mc.core_x
mc_y = container.mc.core_y
pix_im = image*dist_mask
nom_dist = np.sqrt(np.power(centre_x,2)+np.power(centre_y,2))
if(np.sum(pix_im>5)>30 and np.sum(pix_im)>80 and nom_dist.value <1. and radius.value<1.5 and radius.value>1.):
hess = MuonLineIntegrate(6.50431*u.m,0.883*u.m,pixel_width=0.16*u.deg)
if (image.shape[0]<2000):
im,phi,width,eff=hess.fit_muon(centre_x,centre_y,radius,x[dist_mask],y[dist_mask],image[dist_mask])
if( im < 6*u.m and im>0.9*u.m and width<0.08*u.deg and width>0.04*u.deg ):# and radius.value>0.2 and radius.value<0.4):
efficiency[tel_id-1].append(eff)
impact.append(im)
#print(len(efficiency),len(impact))
ev +=1
print("Muon Efficiency of CT1",np.average(np.asarray(efficiency[0])))
print("Muon Efficiency of CT2",np.average(np.asarray(efficiency[1])))
print("Muon Efficiency of CT3",np.average(np.asarray(efficiency[2])))
print("Muon Efficiency of CT4",np.average(np.asarray(efficiency[3])))
fig, axs = plt.subplots(2, 2, figsize=(15, 15), sharey=False, sharex=False)
axs[0][0].hist((efficiency[0]),bins=40,range=(0,0.1), alpha=0.5)
axs[0][1].hist((efficiency[1]),bins=40,range=(0,0.1), alpha=0.5)
axs[1][0].hist((efficiency[2]),bins=40,range=(0,0.1), alpha=0.5)
axs[1][1].hist((efficiency[3]),bins=40,range=(0,0.1), alpha=0.5)
plt.show()
| [
"numpy.abs",
"argparse.ArgumentParser",
"numpy.sum",
"ctapipe.core.Container",
"pyhessio.get_mc_shower_altitude",
"ctapipe.io.containers.MCShowerData",
"pyhessio.get_pedestal",
"ctapipe.io.CameraGeometry.guess",
"ctapipe.calib.array.muon_ring_finder.chaudhuri_kundu_circle_fit",
"ctapipe.coordinate... | [((816, 856), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (835, 856), False, 'import logging\n'), ((1541, 1606), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Perform simple Hillas Reco"""'}), "(description='Perform simple Hillas Reco')\n", (1564, 1606), False, 'import argparse\n'), ((1787, 1821), 'ctapipe.io.hessio.hessio_event_source', 'hessio_event_source', (['args.filename'], {}), '(args.filename)\n', (1806, 1821), False, 'from ctapipe.io.hessio import hessio_event_source\n'), ((1839, 1868), 'ctapipe.core.Container', 'Container', (['"""hessio_container"""'], {}), "('hessio_container')\n", (1848, 1868), False, 'from ctapipe.core import Container\n'), ((2107, 2138), 'ctapipe.instrument.mc_config.load', 'ID.load', ([], {'filename': 'args.filename'}), '(filename=args.filename)\n', (2114, 2138), True, 'from ctapipe.instrument import mc_config as ID\n'), ((6355, 6419), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'figsize': '(15, 15)', 'sharey': '(False)', 'sharex': '(False)'}), '(2, 2, figsize=(15, 15), sharey=False, sharex=False)\n', (6367, 6419), True, 'import matplotlib.pyplot as plt\n'), ((6702, 6712), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6710, 6712), True, 'import matplotlib.pyplot as plt\n'), ((1222, 1251), 'pyhessio.get_pedestal', 'pyhessio.get_pedestal', (['tel_id'], {}), '(tel_id)\n', (1243, 1251), False, 'import pyhessio\n'), ((1267, 1299), 'pyhessio.get_calibration', 'pyhessio.get_calibration', (['tel_id'], {}), '(tel_id)\n', (1291, 1299), False, 'import pyhessio\n'), ((1948, 1957), 'ctapipe.io.containers.RawData', 'RawData', ([], {}), '()\n', (1955, 1957), False, 'from ctapipe.io.containers import RawData\n'), ((1988, 2002), 'ctapipe.io.containers.MCShowerData', 'MCShowerData', ([], {}), '()\n', (2000, 2002), False, 'from ctapipe.io.containers import MCShowerData, CentralTriggerData\n'), ((2035, 2055), 'ctapipe.io.containers.CentralTriggerData', 'CentralTriggerData', ([], {}), '()\n', (2053, 2055), False, 'from ctapipe.io.containers import MCShowerData, CentralTriggerData\n'), ((2485, 2525), 'pyhessio.get_central_event_teltrg_list', 'pyhessio.get_central_event_teltrg_list', ([], {}), '()\n', (2523, 2525), False, 'import pyhessio\n'), ((2552, 2589), 'pyhessio.get_central_event_gps_time', 'pyhessio.get_central_event_gps_time', ([], {}), '()\n', (2587, 2589), False, 'import pyhessio\n'), ((2624, 2685), 'astropy.time.Time', 'Time', (['(time_s * u.s)', '(time_ns * u.ns)'], {'format': '"""gps"""', 'scale': '"""utc"""'}), "(time_s * u.s, time_ns * u.ns, format='gps', scale='utc')\n", (2628, 2685), False, 'from astropy.time import Time\n'), ((1710, 1740), 'ctapipe.utils.datasets.get_example_simtelarray_file', 'get_example_simtelarray_file', ([], {}), '()\n', (1738, 1740), False, 'from ctapipe.utils.datasets import get_example_simtelarray_file\n'), ((2398, 2425), 'pyhessio.get_teldata_list', 'pyhessio.get_teldata_list', ([], {}), '()\n', (2423, 2425), False, 'import pyhessio\n'), ((2755, 2786), 'pyhessio.get_mc_shower_energy', 'pyhessio.get_mc_shower_energy', ([], {}), '()\n', (2784, 2786), False, 'import pyhessio\n'), ((2828, 2861), 'pyhessio.get_mc_shower_altitude', 'pyhessio.get_mc_shower_altitude', ([], {}), '()\n', (2859, 2861), False, 'import pyhessio\n'), ((2902, 2934), 'pyhessio.get_mc_shower_azimuth', 'pyhessio.get_mc_shower_azimuth', ([], {}), '()\n', (2932, 2934), False, 'import pyhessio\n'), ((2973, 3002), 'pyhessio.get_mc_event_xcore', 'pyhessio.get_mc_event_xcore', ([], {}), '()\n', (3000, 3002), False, 'import pyhessio\n'), ((3039, 3068), 'pyhessio.get_mc_event_ycore', 'pyhessio.get_mc_event_ycore', ([], {}), '()\n', (3066, 3068), False, 'import pyhessio\n'), ((3738, 3805), 'ctapipe.reco.cleaning.tailcuts_clean', 'tailcuts_clean', (['geom', 'image', '(1)'], {'picture_thresh': '(5)', 'boundary_thresh': '(7)'}), '(geom, image, 1, picture_thresh=5, boundary_thresh=7)\n', (3752, 3805), False, 'from ctapipe.reco.cleaning import tailcuts_clean\n'), ((4490, 4542), 'ctapipe.calib.array.muon_ring_finder.chaudhuri_kundu_circle_fit', 'chaudhuri_kundu_circle_fit', (['x', 'y', '(image * clean_mask)'], {}), '(x, y, image * clean_mask)\n', (4516, 4542), False, 'from ctapipe.calib.array.muon_ring_finder import chaudhuri_kundu_circle_fit\n'), ((4639, 4660), 'numpy.abs', 'np.abs', (['(dist - radius)'], {}), '(dist - radius)\n', (4645, 4660), True, 'import numpy as np\n'), ((4698, 4766), 'ctapipe.calib.array.muon_ring_finder.chaudhuri_kundu_circle_fit', 'chaudhuri_kundu_circle_fit', (['x', 'y', '(image * (ring_dist < radius * 0.3))'], {}), '(x, y, image * (ring_dist < radius * 0.3))\n', (4724, 4766), False, 'from ctapipe.calib.array.muon_ring_finder import chaudhuri_kundu_circle_fit\n'), ((4860, 4881), 'numpy.abs', 'np.abs', (['(dist - radius)'], {}), '(dist - radius)\n', (4866, 4881), True, 'import numpy as np\n'), ((4919, 4987), 'ctapipe.calib.array.muon_ring_finder.chaudhuri_kundu_circle_fit', 'chaudhuri_kundu_circle_fit', (['x', 'y', '(image * (ring_dist < radius * 0.3))'], {}), '(x, y, image * (ring_dist < radius * 0.3))\n', (4945, 4987), False, 'from ctapipe.calib.array.muon_ring_finder import chaudhuri_kundu_circle_fit\n'), ((6089, 6114), 'numpy.asarray', 'np.asarray', (['efficiency[0]'], {}), '(efficiency[0])\n', (6099, 6114), True, 'import numpy as np\n'), ((6163, 6188), 'numpy.asarray', 'np.asarray', (['efficiency[1]'], {}), '(efficiency[1])\n', (6173, 6188), True, 'import numpy as np\n'), ((6237, 6262), 'numpy.asarray', 'np.asarray', (['efficiency[2]'], {}), '(efficiency[2])\n', (6247, 6262), True, 'import numpy as np\n'), ((6311, 6336), 'numpy.asarray', 'np.asarray', (['efficiency[3]'], {}), '(efficiency[3])\n', (6321, 6336), True, 'import numpy as np\n'), ((3503, 3567), 'ctapipe.io.CameraGeometry.guess', 'io.CameraGeometry.guess', (['x', 'y', 'event.meta.optical_foclen[tel_id]'], {}), '(x, y, event.meta.optical_foclen[tel_id])\n', (3526, 3567), False, 'from ctapipe import io\n'), ((3926, 4184), 'ctapipe.coordinates.NominalFrame', 'NominalFrame', ([], {'array_direction': '[container.mc.alt, container.mc.az]', 'pointing_direction': '[container.mc.alt, container.mc.az]', 'focal_length': "(tel['TelescopeTable_VersionFeb2016'][tel['TelescopeTable_VersionFeb2016'][\n 'TelID'] == tel_id]['FL'][0] * u.m)"}), "(array_direction=[container.mc.alt, container.mc.az],\n pointing_direction=[container.mc.alt, container.mc.az], focal_length=\n tel['TelescopeTable_VersionFeb2016'][tel[\n 'TelescopeTable_VersionFeb2016']['TelID'] == tel_id]['FL'][0] * u.m)\n", (3938, 4184), False, 'from ctapipe.coordinates import CameraFrame, NominalFrame\n'), ((5005, 5026), 'numpy.abs', 'np.abs', (['(dist - radius)'], {}), '(dist - radius)\n', (5011, 5026), True, 'import numpy as np\n'), ((4566, 4591), 'numpy.power', 'np.power', (['(x - centre_x)', '(2)'], {}), '(x - centre_x, 2)\n', (4574, 4591), True, 'import numpy as np\n'), ((4591, 4616), 'numpy.power', 'np.power', (['(y - centre_y)', '(2)'], {}), '(y - centre_y, 2)\n', (4599, 4616), True, 'import numpy as np\n'), ((4787, 4812), 'numpy.power', 'np.power', (['(x - centre_x)', '(2)'], {}), '(x - centre_x, 2)\n', (4795, 4812), True, 'import numpy as np\n'), ((4812, 4837), 'numpy.power', 'np.power', (['(y - centre_y)', '(2)'], {}), '(y - centre_y, 2)\n', (4820, 4837), True, 'import numpy as np\n'), ((5315, 5336), 'numpy.power', 'np.power', (['centre_x', '(2)'], {}), '(centre_x, 2)\n', (5323, 5336), True, 'import numpy as np\n'), ((5336, 5357), 'numpy.power', 'np.power', (['centre_y', '(2)'], {}), '(centre_y, 2)\n', (5344, 5357), True, 'import numpy as np\n'), ((5373, 5391), 'numpy.sum', 'np.sum', (['(pix_im > 5)'], {}), '(pix_im > 5)\n', (5379, 5391), True, 'import numpy as np\n'), ((5397, 5411), 'numpy.sum', 'np.sum', (['pix_im'], {}), '(pix_im)\n', (5403, 5411), True, 'import numpy as np\n'), ((3852, 3869), 'numpy.zeros', 'np.zeros', (['x.shape'], {}), '(x.shape)\n', (3860, 3869), True, 'import numpy as np\n')] |
from libs.can import CANSocket
from libs.myactuator import MyActuator
from time import perf_counter, sleep
import numpy as np
# import getpass
# password = getpass.getpass()
# the serial port of device
# you may find one by examing /dev/ folder,
# this is usually devices ttyACM
# os.system(f"sudo slcand -o -c -s8 /dev/serial/by-id/usb-Protofusion_Labs_CANable_8c005eb_https\:__github.com_normaldotcom_cantact-fw.git_001D00335734570920343135-if00 can0")
serial_device = "ttyACM1"
# Initiate the can bus socket
can_bus = CANSocket(serial_port=serial_device)
# Initiate motor
motor = MyActuator(can_bus=can_bus)
# Set the control loop timings
frequency = 1000
sampling_time = 1 / frequency
def stop_motor(motor):
for _ in range(100):
motor.set_current(0)
# total working time
T = 3
N = T * frequency
# gains = [20]
gains = [20,50,70]
g_n = len(gains)
# sin_amplitudes = [2]
sin_amplitudes = [2, 6]
# sin_frequencies = [10]
sin_frequencies = [1, 7]
amp_n = len(sin_amplitudes)
freq_n = len(sin_frequencies)
angles = np.zeros((amp_n, freq_n, g_n, N))
velocities = np.zeros((amp_n, freq_n, g_n, N))
times = np.zeros(N)
angle_initial = 2
velocity_desired = 0
angle_desired = np.zeros((amp_n, freq_n, N))
motor.set_current(0)
try:
for amp in range(amp_n):
for freq in range(freq_n):
for k in range(g_n):
i = 0
last_execution = 0
control = 0
# find the global time before entering control loop
initial_time = perf_counter()
# motor.set_zero()
initial_angle = motor.state["angle"] + angle_initial
while True:
time = perf_counter() - initial_time # get actual time in secs
# /////////////////////////
# Get and parse motor state
# /////////////////////////
state = motor.state
theta = state["angle"] - initial_angle
dtheta = state["speed"]
current = state["current"]
# ///////////////////////////////////////////
# Update the control only on specific timings
# ///////////////////////////////////////////
# P-control
if (time - last_execution) >= sampling_time:
if i >= N:
break
angles[amp, freq, k, i] = theta
velocities[amp, freq, k, i] = dtheta
times[i] = time
current_desired = sin_amplitudes[amp] * np.sin(sin_frequencies[freq] * time)
angle_desired[amp, freq, i] = current_desired
control = -gains[k] * (theta - current_desired)
i += 1
last_execution = time
# YOUR CONTROLLER GOES HERE
motor.set_current(control)
stop_motor(motor)
sleep(1)
except KeyboardInterrupt:
stop_motor(motor)
print("Disabled by interrupt")
motor = None
import matplotlib.pyplot as plt
fig, ax = plt.subplots(amp_n * freq_n, 2, figsize=(16, amp_n*freq_n*5))
bound = 1
last_n = 10
def add_plot(ax, x, ts, gain):
ax.plot(
ts,
x,
label=f"gain: {gain}",
)
for amp in range(amp_n):
for freq in range(freq_n):
ax0 = ax[amp * freq_n + freq, 0]
ax1 = ax[amp * freq_n + freq, 1]
ax0.set_xlabel("t [s]")
ax0.set_ylabel("$\\theta$ [$rad$]")
ax1.set_xlabel("t [s]")
ax1.set_ylabel("$\\dot{\\theta}$ [$\\frac{rad}{s}$]")
for i in range(g_n):
add_plot(
ax=ax0,
x=angles[amp, freq, i],
ts=times,
gain=gains[i],
)
add_plot(
ax=ax1,
x=velocities[amp, freq, i],
ts=times,
gain=gains[i],
)
ax0.plot(times, angle_desired[amp, freq], label=f"$\\theta_{{desired}}$, amplitude: {sin_amplitudes[amp]}, frequency: {sin_frequencies[freq]} Hz")
ax0.legend()
ax1.legend()
fig.suptitle(f"control loop frequency = {frequency} Hz", fontsize="13")
fig.tight_layout(pad=3.0)
plt.savefig("./plots/4.2.png")
plt.show()
| [
"matplotlib.pyplot.show",
"numpy.zeros",
"time.perf_counter",
"time.sleep",
"libs.can.CANSocket",
"numpy.sin",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"libs.myactuator.MyActuator"
] | [((526, 562), 'libs.can.CANSocket', 'CANSocket', ([], {'serial_port': 'serial_device'}), '(serial_port=serial_device)\n', (535, 562), False, 'from libs.can import CANSocket\n'), ((589, 616), 'libs.myactuator.MyActuator', 'MyActuator', ([], {'can_bus': 'can_bus'}), '(can_bus=can_bus)\n', (599, 616), False, 'from libs.myactuator import MyActuator\n'), ((1043, 1076), 'numpy.zeros', 'np.zeros', (['(amp_n, freq_n, g_n, N)'], {}), '((amp_n, freq_n, g_n, N))\n', (1051, 1076), True, 'import numpy as np\n'), ((1090, 1123), 'numpy.zeros', 'np.zeros', (['(amp_n, freq_n, g_n, N)'], {}), '((amp_n, freq_n, g_n, N))\n', (1098, 1123), True, 'import numpy as np\n'), ((1132, 1143), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (1140, 1143), True, 'import numpy as np\n'), ((1201, 1229), 'numpy.zeros', 'np.zeros', (['(amp_n, freq_n, N)'], {}), '((amp_n, freq_n, N))\n', (1209, 1229), True, 'import numpy as np\n'), ((3280, 3345), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(amp_n * freq_n)', '(2)'], {'figsize': '(16, amp_n * freq_n * 5)'}), '(amp_n * freq_n, 2, figsize=(16, amp_n * freq_n * 5))\n', (3292, 3345), True, 'import matplotlib.pyplot as plt\n'), ((4426, 4456), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./plots/4.2.png"""'], {}), "('./plots/4.2.png')\n", (4437, 4456), True, 'import matplotlib.pyplot as plt\n'), ((4457, 4467), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4465, 4467), True, 'import matplotlib.pyplot as plt\n'), ((1539, 1553), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (1551, 1553), False, 'from time import perf_counter, sleep\n'), ((3127, 3135), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (3132, 3135), False, 'from time import perf_counter, sleep\n'), ((1713, 1727), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (1725, 1727), False, 'from time import perf_counter, sleep\n'), ((2717, 2753), 'numpy.sin', 'np.sin', (['(sin_frequencies[freq] * time)'], {}), '(sin_frequencies[freq] * time)\n', (2723, 2753), True, 'import numpy as np\n')] |
import time
import pathlib
import numpy as np
import json
from .logging import logger
# Timing and Performance
def timing_info(method):
def wrapper(*args, **kw):
start_time = time.time()
result = method(*args, **kw)
end_time = time.time()
logger.info(f"timing_info: {method.__name__}"
f"@{round((end_time-start_time)*1000,1)} ms")
return result
return wrapper
def record_time_interval(section, start_time, line_break=False):
"""Record a time interval since the last timestamp"""
end_time = time.time()
delta = end_time - start_time
if delta < 1:
delta *= 1000
units = "ms"
else:
units = "s"
if line_break:
logger.debug("PROCESS_TIME:{:>36} {} {}\n".format(section, round(delta, 1), units))
else:
logger.debug("PROCESS_TIME:{:>36} {} {}".format(section, round(delta, 1), units))
return end_time
def normalize_numpy_dict(d):
ret = d.copy()
for k, v in ret.items():
if isinstance(v, np.generic):
ret[k] = np.asscalar(v)
return ret
def save_json(filename, obj, indent=2, sort_keys=True):
"""Dump an object to disk in json format
filename: pathname
Filename to dump to
obj: object
Object to dump
indent: integer
number of characters to indent
sort_keys: boolean
Whether to sort keys before writing. Should be True if you ever use revision control
on the resulting json file.
"""
with open(filename, 'w') as fw:
json.dump(obj, fw, indent=indent, sort_keys=sort_keys)
def load_json(filename):
"""Read a json file from disk"""
with open(filename) as fw:
obj = json.load(fw)
return obj
def head_file(filename, n=5):
"""Return the first `n` lines of a file
"""
with open(filename, 'r') as fd:
lines = []
for i, line in enumerate(fd):
if i > n:
break
lines.append(line)
return "".join(lines)
def list_dir(path, fully_qualified=False, glob_pattern='*'):
"""do an ls on a path
fully_qualified: boolean (default: False)
If True, return a list of fully qualified pathlib objects.
if False, return just the bare filenames
glob_pattern: glob (default: '*')
File mattern to match
Returns
-------
A list of names, or fully qualified pathlib objects"""
if fully_qualified:
return list(pathlib.Path(path).glob(glob_pattern))
return [file.name for file in pathlib.Path(path).glob(glob_pattern)]
| [
"json.dump",
"json.load",
"time.time",
"pathlib.Path",
"numpy.asscalar"
] | [((570, 581), 'time.time', 'time.time', ([], {}), '()\n', (579, 581), False, 'import time\n'), ((188, 199), 'time.time', 'time.time', ([], {}), '()\n', (197, 199), False, 'import time\n'), ((256, 267), 'time.time', 'time.time', ([], {}), '()\n', (265, 267), False, 'import time\n'), ((1567, 1621), 'json.dump', 'json.dump', (['obj', 'fw'], {'indent': 'indent', 'sort_keys': 'sort_keys'}), '(obj, fw, indent=indent, sort_keys=sort_keys)\n', (1576, 1621), False, 'import json\n'), ((1730, 1743), 'json.load', 'json.load', (['fw'], {}), '(fw)\n', (1739, 1743), False, 'import json\n'), ((1081, 1095), 'numpy.asscalar', 'np.asscalar', (['v'], {}), '(v)\n', (1092, 1095), True, 'import numpy as np\n'), ((2483, 2501), 'pathlib.Path', 'pathlib.Path', (['path'], {}), '(path)\n', (2495, 2501), False, 'import pathlib\n'), ((2557, 2575), 'pathlib.Path', 'pathlib.Path', (['path'], {}), '(path)\n', (2569, 2575), False, 'import pathlib\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 3rd party imports
import numpy as np
import xarray as xr
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__copyright__ = "Copyright 2020-2021"
__license__ = "MIT"
__version__ = "2.3.7"
__status__ = "Prototype"
def feeps_sector_spec(inp_alle):
r"""Creates sector-spectrograms with FEEPS data (particle data organized
by time and sector number)
Parameters
----------
inp_alle : xarray.Dataset
Dataset of energy spectrum of all eyes.
Returns
-------
out : xarray.Dataset
Sector-spectrograms with FEEPS data for all eyes.
"""
sensors_eyes_top = list(filter(lambda x: x[:3] in "top", inp_alle))
sensors_eyes_bot = list(filter(lambda x: x[:3] in "bot", inp_alle))
sensors_eyes = [*sensors_eyes_top, *sensors_eyes_bot]
sector_time = inp_alle["spinsectnum"].time.data
sector_data = inp_alle["spinsectnum"].data
out_dict = {k: inp_alle[k] for k in inp_alle if k not in sensors_eyes}
for se in sensors_eyes:
sensor_data = inp_alle[se].data
spin_starts = np.where(sector_data[:-1] > sector_data[1:])[0] + 1
sector_spec = np.zeros((len(spin_starts), 64))
c_start = spin_starts[0]
for i, spin in enumerate(spin_starts):
# find the sectors for this spin
s_ = sector_data[c_start:spin]
sector_spec[i, s_] = np.nanmean(sensor_data[c_start:spin, :],
axis=1)
c_start = spin
out_dict[se] = xr.DataArray(sector_spec,
coords=[sector_time[spin_starts],
np.arange(64)],
dims=["time", "sectornum"])
out = xr.Dataset(out_dict)
return out
| [
"numpy.where",
"numpy.nanmean",
"numpy.arange",
"xarray.Dataset"
] | [((1784, 1804), 'xarray.Dataset', 'xr.Dataset', (['out_dict'], {}), '(out_dict)\n', (1794, 1804), True, 'import xarray as xr\n'), ((1408, 1456), 'numpy.nanmean', 'np.nanmean', (['sensor_data[c_start:spin, :]'], {'axis': '(1)'}), '(sensor_data[c_start:spin, :], axis=1)\n', (1418, 1456), True, 'import numpy as np\n'), ((1096, 1140), 'numpy.where', 'np.where', (['(sector_data[:-1] > sector_data[1:])'], {}), '(sector_data[:-1] > sector_data[1:])\n', (1104, 1140), True, 'import numpy as np\n'), ((1693, 1706), 'numpy.arange', 'np.arange', (['(64)'], {}), '(64)\n', (1702, 1706), True, 'import numpy as np\n')] |
import numpy as np
import cv2
import os
def handsegment(frame):
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
#lower, upper = boundaries[0]
#lower = np.array(lower, dtype="uint8")
#upper = np.array(upper, dtype="uint8")
lower = np.array([0, 48, 80], dtype="uint8")
upper = np.array([20, 255, 255], dtype="uint8")
mask1 = cv2.inRange(hsv, lower, upper)
#lower, upper = boundaries[1]
lower = np.array([170,48,80], dtype="uint8")
upper = np.array([180,255,255], dtype="uint8")
mask2 = cv2.inRange(hsv, lower, upper)
mask1 = mask1+mask2
mask1 = cv2.morphologyEx(mask1, cv2.MORPH_OPEN, np.ones((3,3),np.uint8),iterations=2)
mask1 = cv2.dilate(mask1,np.ones((3,3),np.uint8),iterations = 1)
mask2 = cv2.bitwise_not(mask1)
# for i,(lower, upper) in enumerate(boundaries):
# # create NumPy arrays from the boundaries
# lower = np.array(lower, dtype = "uint8")
# upper = np.array(upper, dtype = "uint8")
# # find the colors within the specified boundaries and apply
# # the mask
# if(i==0):
# print "Harish"
# mask1 = cv2.inRange(frame, lower, upper)
# else:
# print "Aadi"
# mask2 = cv2.inRange(frame, lower, upper)
#mask = cv2.bitwise_or(mask1, mask2)
output = cv2.bitwise_and(frame, frame, mask=mask1)
# show the images
#cv2.imshow("images", mask)
#cv2.imshow("images", output)
#cv2.waitKey()
return output
def test_a_video():
video_url = 'train_videos/Ai/AiHn2.mp4'
cap = cv2.VideoCapture(video_url)
ret, frame = cap.read()
if not ret:
print('File %s not found' % (video_url))
else:
segmented_frame = handsegment(frame)
segmented_frame = cv2.resize(segmented_frame , (420,480))
frame = cv2.resize(frame, (420,480))
cv2.imshow('segmented_frame', segmented_frame)
cv2.imshow('frame', frame)
cv2.waitKey()
cv2.destroyAllWindows()
def test_entire_videos(dir_videos):
labels = os.listdir(dir_videos)
for label in labels:
videos_files = os.listdir(dir_videos + '/' + label) # './videos_train/Ba'
for video_file in videos_files:
video_url = dir_videos + '/' + label + '/' + video_file
cap = cv2.VideoCapture(video_url) # './videos_train/Ba/'Bắt tay 3.mp4''
print('Reading video %s' % (video_url))
count = 0
init = False
ret, frame = cap.read()
if not ret:
print('[ERROR] Can not read video %s' % (video_url))
else:
segmented_frame = handsegment(frame)
segmented_frame = cv2.resize(segmented_frame , (420,480))
frame = cv2.resize(frame, (420,480))
cv2.imshow('segmented_frame', segmented_frame)
cv2.imshow('frame', frame)
cv2.waitKey()
cv2.destroyAllWindows()
if __name__ == '__main__':
# FOR TESTING HAND SEGMENTATION ONLY
test_entire_videos('train_videos') | [
"cv2.bitwise_not",
"cv2.bitwise_and",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.imshow",
"numpy.ones",
"cv2.VideoCapture",
"numpy.array",
"cv2.destroyAllWindows",
"cv2.inRange",
"os.listdir",
"cv2.resize"
] | [((75, 113), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HSV'], {}), '(frame, cv2.COLOR_BGR2HSV)\n', (87, 113), False, 'import cv2\n'), ((248, 284), 'numpy.array', 'np.array', (['[0, 48, 80]'], {'dtype': '"""uint8"""'}), "([0, 48, 80], dtype='uint8')\n", (256, 284), True, 'import numpy as np\n'), ((297, 336), 'numpy.array', 'np.array', (['[20, 255, 255]'], {'dtype': '"""uint8"""'}), "([20, 255, 255], dtype='uint8')\n", (305, 336), True, 'import numpy as np\n'), ((349, 379), 'cv2.inRange', 'cv2.inRange', (['hsv', 'lower', 'upper'], {}), '(hsv, lower, upper)\n', (360, 379), False, 'import cv2\n'), ((427, 465), 'numpy.array', 'np.array', (['[170, 48, 80]'], {'dtype': '"""uint8"""'}), "([170, 48, 80], dtype='uint8')\n", (435, 465), True, 'import numpy as np\n'), ((476, 516), 'numpy.array', 'np.array', (['[180, 255, 255]'], {'dtype': '"""uint8"""'}), "([180, 255, 255], dtype='uint8')\n", (484, 516), True, 'import numpy as np\n'), ((527, 557), 'cv2.inRange', 'cv2.inRange', (['hsv', 'lower', 'upper'], {}), '(hsv, lower, upper)\n', (538, 557), False, 'import cv2\n'), ((758, 780), 'cv2.bitwise_not', 'cv2.bitwise_not', (['mask1'], {}), '(mask1)\n', (773, 780), False, 'import cv2\n'), ((1293, 1334), 'cv2.bitwise_and', 'cv2.bitwise_and', (['frame', 'frame'], {'mask': 'mask1'}), '(frame, frame, mask=mask1)\n', (1308, 1334), False, 'import cv2\n'), ((1535, 1562), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_url'], {}), '(video_url)\n', (1551, 1562), False, 'import cv2\n'), ((2018, 2040), 'os.listdir', 'os.listdir', (['dir_videos'], {}), '(dir_videos)\n', (2028, 2040), False, 'import os\n'), ((2914, 2937), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2935, 2937), False, 'import cv2\n'), ((639, 664), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.uint8'], {}), '((3, 3), np.uint8)\n', (646, 664), True, 'import numpy as np\n'), ((706, 731), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.uint8'], {}), '((3, 3), np.uint8)\n', (713, 731), True, 'import numpy as np\n'), ((1738, 1777), 'cv2.resize', 'cv2.resize', (['segmented_frame', '(420, 480)'], {}), '(segmented_frame, (420, 480))\n', (1748, 1777), False, 'import cv2\n'), ((1794, 1823), 'cv2.resize', 'cv2.resize', (['frame', '(420, 480)'], {}), '(frame, (420, 480))\n', (1804, 1823), False, 'import cv2\n'), ((1831, 1877), 'cv2.imshow', 'cv2.imshow', (['"""segmented_frame"""', 'segmented_frame'], {}), "('segmented_frame', segmented_frame)\n", (1841, 1877), False, 'import cv2\n'), ((1886, 1912), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (1896, 1912), False, 'import cv2\n'), ((1921, 1934), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (1932, 1934), False, 'import cv2\n'), ((1943, 1966), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1964, 1966), False, 'import cv2\n'), ((2090, 2126), 'os.listdir', 'os.listdir', (["(dir_videos + '/' + label)"], {}), "(dir_videos + '/' + label)\n", (2100, 2126), False, 'import os\n'), ((2276, 2303), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_url'], {}), '(video_url)\n', (2292, 2303), False, 'import cv2\n'), ((2680, 2719), 'cv2.resize', 'cv2.resize', (['segmented_frame', '(420, 480)'], {}), '(segmented_frame, (420, 480))\n', (2690, 2719), False, 'import cv2\n'), ((2744, 2773), 'cv2.resize', 'cv2.resize', (['frame', '(420, 480)'], {}), '(frame, (420, 480))\n', (2754, 2773), False, 'import cv2\n'), ((2789, 2835), 'cv2.imshow', 'cv2.imshow', (['"""segmented_frame"""', 'segmented_frame'], {}), "('segmented_frame', segmented_frame)\n", (2799, 2835), False, 'import cv2\n'), ((2852, 2878), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (2862, 2878), False, 'import cv2\n'), ((2895, 2908), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (2906, 2908), False, 'import cv2\n')] |
import os
import struct
from enum import Enum
from typing import Optional
import numpy as np
import torch
from hivemind.utils import DHTExpiration, MPFuture, get_dht_time, get_logger
logger = get_logger(__name__)
class AveragingStage(Enum):
IDLE = 0 # still initializing
LOOKING_FOR_GROUP = 1 # running decentralized matchmaking, can't run allreduce yet
AWAITING_TRIGGER = 2 # waiting for user to set the trigger that allows running allreduce
RUNNING_ALLREDUCE = 3 # exchanging tensors with groupmates
FINISHED = 4 # either done or failed with exception
class StepControl(MPFuture):
"""
An auxiliary data structure that allows user to control stages and track progress in a single averaging step
:param scheduled_time: estimated time when averaging should begin. Will be used for scheduling
:param deadline: if averaging is still in progress at this time, it should be stopped due to TimeoutError
:param allow_retries: if True, allow running matchmaking and all-reduce again if previous attempt fails
:param weight: averaging weight, can be changed afterwards
:param data_for_gather: send this data to all peers in the next group and gather it from groupmates
"""
# indices for the shared buffer
_SCHEDULED_TIME, _WEIGHT, _STAGE, _BEGAN_ALLREDUCE = slice(0, 8), slice(8, 16), 16, 17
def __init__(
self,
scheduled_time: DHTExpiration,
deadline: float,
allow_retries: bool,
weight: float,
data_for_gather: bytes,
):
super().__init__()
self._data_for_gather, self._deadline, self._allow_retries = data_for_gather, deadline, allow_retries
self._trigger: Optional[MPFuture] = None
self._cancel: Optional[MPFuture] = None
# Buffer contents:
# scheduled_time (double) | weight (double) | stage (AveragingStage, 1 byte) | began_allreduce: (bool, 1 byte)
self._shared_buffer = torch.zeros([18], dtype=torch.uint8).share_memory_()
self.stage = AveragingStage.IDLE
self.scheduled_time = scheduled_time
self.weight = weight
self.began_allreduce = False
def attach(self, trigger: MPFuture, cancel: MPFuture):
assert self._trigger is None and self._cancel is None, "Futures are already attached"
self._trigger, self._cancel = trigger, cancel
def allow_allreduce(self):
"""Allow averager to begin all-reduce when it finds a group. Meant to be triggered by user."""
assert self._trigger is not None, "StepControl does not have an attached trigger"
if self._trigger.done():
logger.warning("Trigger is already set")
else:
self._trigger.set_result(None)
async def wait_for_trigger(self):
assert self._trigger is not None, "StepControl does not have an attached trigger"
await self._trigger
@property
def triggered(self) -> bool:
assert self._trigger is not None, "StepControl does not have an attached trigger"
return self._trigger.done()
@property
def scheduled_time(self) -> DHTExpiration:
return struct.unpack("d", self._shared_buffer[StepControl._SCHEDULED_TIME].numpy().data)[0]
@scheduled_time.setter
def scheduled_time(self, scheduled_time):
if self.began_allreduce:
logger.warning("Changing scheduled time has no effect after all-reduce has already started")
if scheduled_time >= self.deadline:
logger.warning("Changing scheduled time to after deadline, averaging will likely fail due to timeout")
struct.pack_into("d", self._shared_buffer[StepControl._SCHEDULED_TIME].numpy().data, 0, float(scheduled_time))
@property
def weight(self) -> float:
return struct.unpack("d", self._shared_buffer[StepControl._WEIGHT].numpy().data)[0]
@weight.setter
def weight(self, weight: float):
assert weight >= 0 and np.isfinite(weight)
if self.began_allreduce:
logger.warning("Changing weights has no effect after all-reduce has already started")
struct.pack_into("d", self._shared_buffer[StepControl._WEIGHT].numpy().data, 0, float(weight))
@property
def stage(self) -> AveragingStage:
return AveragingStage(self._shared_buffer[StepControl._STAGE].item())
@stage.setter
def stage(self, stage: AveragingStage):
if stage == AveragingStage.RUNNING_ALLREDUCE:
self.began_allreduce = True
self._shared_buffer[StepControl._STAGE] = stage.value
@property
def began_allreduce(self) -> bool:
return bool(self._shared_buffer[StepControl._BEGAN_ALLREDUCE].item())
@began_allreduce.setter
def began_allreduce(self, value: bool):
self._shared_buffer[StepControl._BEGAN_ALLREDUCE] = int(value)
@property
def data_for_gather(self) -> bytes:
return self._data_for_gather
@property
def deadline(self) -> DHTExpiration:
return self._deadline
def get_timeout(self) -> Optional[DHTExpiration]:
return max(0.0, self.deadline - get_dht_time())
@property
def allow_retries(self) -> bool:
return self._allow_retries
def __getstate__(self):
return dict(
super().__getstate__(),
_trigger=self._trigger,
_cancel=self._cancel,
_shared_buffer=self._shared_buffer,
immutable_params=(self._data_for_gather, self._deadline, self._allow_retries),
)
def __setstate__(self, state):
super().__setstate__(state)
self._trigger, self._cancel, self._shared_buffer = state["_trigger"], state["_cancel"], state["_shared_buffer"]
self._data_for_gather, self._deadline, self._allow_retries = state["immutable_params"]
def __del__(self):
if os.getpid() == self._origin_pid and not self.triggered:
logger.warning(
"Deleted an averaging StepControl, but the step was not triggered. This may cause other "
"peers to fail an averaging round via TimeoutError."
)
super().__del__()
def cancel(self) -> bool:
if self._trigger is not None:
self._trigger.cancel()
if self._cancel is not None:
self._cancel.set_result(None)
return super().cancel()
async def wait_for_cancel(self):
"""Await for step to be cancelled by the user. Should be called from insider the averager."""
await self._cancel
| [
"os.getpid",
"numpy.isfinite",
"hivemind.utils.get_logger",
"torch.zeros",
"hivemind.utils.get_dht_time"
] | [((195, 215), 'hivemind.utils.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (205, 215), False, 'from hivemind.utils import DHTExpiration, MPFuture, get_dht_time, get_logger\n'), ((3946, 3965), 'numpy.isfinite', 'np.isfinite', (['weight'], {}), '(weight)\n', (3957, 3965), True, 'import numpy as np\n'), ((1956, 1992), 'torch.zeros', 'torch.zeros', (['[18]'], {'dtype': 'torch.uint8'}), '([18], dtype=torch.uint8)\n', (1967, 1992), False, 'import torch\n'), ((5100, 5114), 'hivemind.utils.get_dht_time', 'get_dht_time', ([], {}), '()\n', (5112, 5114), False, 'from hivemind.utils import DHTExpiration, MPFuture, get_dht_time, get_logger\n'), ((5830, 5841), 'os.getpid', 'os.getpid', ([], {}), '()\n', (5839, 5841), False, 'import os\n')] |
"""
ObservationBuilder objects are objects that can be passed to environments designed for customizability.
The ObservationBuilder-derived custom classes implement 2 functions, reset() and get() or get(handle).
+ `reset()` is called after each environment reset (i.e. at the beginning of a new episode), to allow for pre-computing relevant data.
+ `get()` is called whenever an observation has to be computed, potentially for each agent independently in case of \
multi-agent environments.
"""
import collections
from typing import Optional, List, Dict, Tuple
import queue
import numpy as np
from collections import defaultdict
import math
from flatland.core.env import Environment
from flatland.core.env_observation_builder import ObservationBuilder
from flatland.envs.agent_utils import RailAgentStatus, EnvAgent
from flatland.utils.ordered_set import OrderedSet
from flatland.envs.agent_utils import RailAgentStatus
from flatland.envs.distance_map import DistanceMap
from flatland.envs.rail_env import RailEnvNextAction, RailEnvActions
from flatland.envs.rail_env_shortest_paths import get_valid_move_actions_#, get_action_for_move
from flatland.core.grid.grid4_utils import get_new_position
from flatland.core.grid.grid_utils import coordinate_to_position, distance_on_rail, position_to_coordinate
from src.draw_obs_graph import build_graph
from src.utils import assign_random_priority, assign_speed_priority, assign_priority
class GraphObsForRailEnv(ObservationBuilder):
"""
Build graph observations.
"""
Node = collections.namedtuple('Node',
'cell_position ' # Cell position (x, y)
'agent_direction ' # Direction with which the agent arrived in this node
'is_target') # Whether agent's target is in this cell
def __init__(self, predictor):
super(GraphObsForRailEnv, self).__init__()
# self.bfs_depth = bfs_depth
self.predictor = predictor
self.max_prediction_depth = 0
self.prediction_dict = {} # Dict handle : list of tuples representing prediction steps
self.predicted_pos = {} # Dict ts : int_pos_list
self.predicted_pos_list = {} # Dict handle : int_pos_list
self.predicted_pos_coord = {} # Dict ts : coord_pos_list
self.predicted_dir = {} # Dict ts : dir (float)
self.num_active_agents = 0
self.cells_sequence = None
self.env_graph = None
self.forks_coords = None
# self.overlapping_spans = {} # Dict handle : list of cells that correspond to 1 in occupancy
def set_env(self, env: Environment):
super().set_env(env)
if self.predictor:
# Use set_env available in PredictionBuilder (parent class)
self.predictor.set_env(self.env)
def reset(self):
"""
Inherited method used for pre computations.
:return:
"""
#self.env_graph.py = map_to_graph(self.env) # Graph of the environment as tuple (nodes, edges) - as computed in src.algo.graph.env_graph.py.py
'''
for a in range(self.env.get_num_agents()):
self.overlapping_spans.update({a: []})
'''
self.forks_coords = self._find_forks()
def get_many(self, handles: Optional[List[int]] = None) -> {}:
"""
Compute observations for all agents in the env.
:param handles:
:return:
"""
self.num_active_agents = 0
for a in self.env.agents:
if a.status == RailAgentStatus.ACTIVE:
self.num_active_agents += 1
self.prediction_dict = self.predictor.get()
# Useful to check if occupancy is correctly computed
self.cells_sequence = self.predictor.compute_cells_sequence(self.prediction_dict)
if self.prediction_dict:
self.max_prediction_depth = self.predictor.max_depth
for t in range(self.max_prediction_depth):
pos_list = []
dir_list = []
for a in handles:
if self.prediction_dict[a] is None:
continue
pos_list.append(self.prediction_dict[a][t][1:3])
dir_list.append(self.prediction_dict[a][t][3])
self.predicted_pos_coord.update({t: pos_list})
self.predicted_pos.update({t: coordinate_to_position(self.env.width, pos_list)})
self.predicted_dir.update({t: dir_list})
for a in range(len(self.env.agents)):
pos_list = []
for ts in range(self.max_prediction_depth):
pos_list.append(self.predicted_pos[ts][a]) # Use int positions
self.predicted_pos_list.update({a: pos_list})
observations = {}
for a in handles:
observations[a] = self.get(a)
return observations
# TODO Optimize considering that I don't need obs for those agents who don't have to pick actions
def get(self, handle: int = 0) -> {}:
"""
TODO Update docstrings
Returns obs for one agent, obs are a single array of concatenated values representing:
- occupancy of next prediction_depth cells,
- agent priority/speed,
- number of malfunctioning agents (encountered),
- number of agents that are ready to depart (encountered).
:param handle:
:return:
"""
#bfs_graph = self._bfs_graph(handle)
agents = self.env.agents
agent = agents[handle]
# Occupancy
occupancy, conflicting_agents = self._fill_occupancy(handle)
# TODO This can be done inside _fill_occupancy - temp not using a second layer
# Augment occupancy with another one-hot encoded layer: 1 if this cell is overlapping and the conflict span was already entered by some other agent
second_layer = np.zeros(self.max_prediction_depth, dtype=int) # Same size as occupancy
for ca in conflicting_agents:
if ca != handle:
# Find ts when conflict occurred
ts = [x for x, y in enumerate(self.cells_sequence[handle]) if y[0] == agents[ca].position[0] and y[1] == agents[ca].position[1]] # Find index/ts for conflict
# Set to 1 conflict span which was already entered by some agent - fill left side and right side of ts
if len(ts) > 0:
i = ts[0] # Since the previous returns a list of ts
while 0 <= i < self.max_prediction_depth:
second_layer[i] = 1 if occupancy[i] > 0 else 0
i -= 1
i = ts[0]
while i < self.max_prediction_depth:
second_layer[i] = 1 if occupancy[i] > 0 else 0
i += 1
occupancy = np.append(occupancy, second_layer)
#print('Agent {}'.format(handle))
#print('Occupancy, first layer: {}'.format(occupancy))
#print('Occupancy, second layer: {}'.format(second_layer))
# Bifurcation points, one-hot encoded layer of predicted cells where 1 means that this cell is a fork
# (globally - considering cell transitions not depending on agent orientation)
forks = np.zeros(self.max_prediction_depth, dtype=int)
# Target
target = np.zeros(self.max_prediction_depth, dtype=int)
for index in range(self.max_prediction_depth):
# Fill as 1 if transitions represent a fork cell
cell = self.cells_sequence[handle][index]
if cell in self.forks_coords:
forks[index] = 1
if cell == agent.target:
target[index] = 1
# print('Forks: {}'.format(forks))
# print('Target: {}'.format(target))
# Speed/priority
is_conflict = True if len(conflicting_agents) > 0 else False
priority = assign_priority(self.env, agent, is_conflict)
max_prio_encountered = 0
if is_conflict:
conflicting_agents_priorities = [assign_priority(self.env, agents[ca], True) for ca in conflicting_agents]
max_prio_encountered = np.min(conflicting_agents_priorities) # Max prio is the one with lowest value
#print('Priority: {}'.format(priority))
#print('Max priority encountered: {}'.format(max_prio_encountered))
# Malfunctioning obs
# Counting number of agents that are currently malfunctioning (globally) - experimental
n_agents_malfunctioning = 0 # in TreeObs they store the length of the longest malfunction encountered
for a in agents:
if a.malfunction_data['malfunction'] != 0:
n_agents_malfunctioning += 1 # Considering ALL agents
#print('Num malfunctioning agents (globally): {}'.format(n_agents_malfunctioning))
# Agents status (agents ready to depart) - it tells the agent how many will appear - encountered? or globally?
n_agents_ready_to_depart = 0
for a in agents:
if a.status in [RailAgentStatus.READY_TO_DEPART]:
n_agents_ready_to_depart += 1 # Considering ALL agents
#print('Num agents ready to depart (globally): {}'.format(n_agents_ready_to_depart))
# shape (prediction_depth * 4 + 4, )
agent_obs = np.append(occupancy, forks)
agent_obs = np.append(agent_obs, target)
agent_obs = np.append(agent_obs, (priority, max_prio_encountered, n_agents_malfunctioning, n_agents_ready_to_depart))
# With this obs the agent actually decided only if it has to move or stop
return agent_obs
# TODO Stop when shortest_path.py() says that rail is disrupted
def _get_shortest_path_action(self, handle):
"""
Takes an agent handle and returns next action for that agent following shortest path:
- if agent status == READY_TO_DEPART => agent moves forward;
- if agent status == ACTIVE => pick action using shortest_path.py() fun available in prediction utils;
- if agent status == DONE => agent does nothing.
:param handle:
:return:
"""
agent = self.env.agents[handle]
if agent.status == RailAgentStatus.READY_TO_DEPART:
if self.num_active_agents < 10: # TODO
# This could be reasonable since agents never start on switches - I guess
action = RailEnvActions.MOVE_FORWARD
else:
action = RailEnvActions.DO_NOTHING
elif agent.status == RailAgentStatus.ACTIVE: # TODO Move k_shortest_paths from here - this is computationally expensive
# This can return None when rails are disconnected or there was an error in the DistanceMap
shortest_paths = self.predictor.get_shortest_paths()
'''
k_shortest_paths = self.predictor.get_k_shortest_paths(
source_position=agent.position,
source_direction=agent.direction,
target_position=agent.target,
k=3,
debug=True)
'''
if shortest_paths[handle] is None: # Railway disrupted
action = RailEnvActions.STOP_MOVING
else:
step = shortest_paths[handle][0]
next_action_element = step[2][0] # Get next_action_element
''' THIS WORKS WITH NEXT VERSION
next_direction = shortest_paths[handle][1].direction
next_position = shortest_paths[handle][1].position # COULD return None?
action = get_action_for_move(agent.position, agent.direction, next_position, next_direction, self.env.rail)
if action is None:
action = RailEnvActions.DO_NOTHING
'''
# Just to use the correct form/name
if next_action_element == 1:
action = RailEnvActions.MOVE_LEFT
elif next_action_element == 2:
action = RailEnvActions.MOVE_FORWARD
elif next_action_element == 3:
action = RailEnvActions.MOVE_RIGHT
else: # If status == DONE
action = RailEnvActions.DO_NOTHING
return action
def choose_railenv_action(self, handle, network_action):
"""
Choose action to perform from RailEnvActions, namely follow shortest path or stop if DQN network said so.
:param handle:
:param network_action:
:return:
"""
if network_action == 1:
return RailEnvActions.STOP_MOVING
else:
return self._get_shortest_path_action(handle)
'''
def _bfs_graph(self, handle: int = 0) -> {}:
"""
Build a graph (dict) of nodes, where nodes are identified by ids, graph is directed, depends on agent direction
(that are tuples that represent the cell position, eg (11, 23))
:param handle: agent id
:return:
"""
obs_graph = defaultdict(list) # Dict node (as pos) : adjacent nodes
visited_nodes = set() # set
bfs_queue = []
done = False # True if agent has reached its target
agent = self.env.agents[handle]
if agent.status == RailAgentStatus.READY_TO_DEPART:
agent_virtual_position = agent.initial_position
elif agent.status == RailAgentStatus.ACTIVE:
agent_virtual_position = agent.position
elif agent.status == RailAgentStatus.DONE:
agent_virtual_position = agent.target
done = True
else:
return None
agent_current_direction = agent.direction
# Push root node into the queue
root_node_obs = GraphObsForRailEnv.Node(cell_position=agent_virtual_position,
agent_direction=agent_current_direction,
is_target=done)
bfs_queue.append(root_node_obs)
# Perform BFS of depth = bfs_depth
for i in range(1, self.bfs_depth + 1):
# Temporary queue to store nodes that must be appended at the next pass
tmp_queue = []
while not len(bfs_queue) == 0:
current_node = bfs_queue.pop(0)
agent_position = current_node[0]
# Init node in the obs_graph (if first time)
if not agent_position in obs_graph.keys():
obs_graph[agent_position] = []
agent_current_direction = current_node[1]
# Get cell transitions given agent direction
possible_transitions = self.env.rail.get_transitions(*agent_position, agent_current_direction)
orientation = agent_current_direction
possible_branch_directions = []
# Build list of possible branching directions from cell
for j, branch_direction in enumerate([(orientation + j) % 4 for j in range(-1, 3)]):
if possible_transitions[branch_direction]:
possible_branch_directions.append(branch_direction)
for branch_direction in possible_branch_directions:
# Gets adjacent cell and start exploring from that for possible fork points
neighbour_cell = get_new_position(agent_position, branch_direction)
adj_node = self._explore_path(handle, neighbour_cell, branch_direction)
if not (*adj_node[0], adj_node[1]) in visited_nodes:
# For now I'm using as key the agent_position tuple
obs_graph[agent_position].append(adj_node)
visited_nodes.add((*adj_node[0], adj_node[1]))
tmp_queue.append(adj_node)
# Add all the nodes of the next level to the BFS queue
for el in tmp_queue:
bfs_queue.append(el)
# After the last pass add adj nodes to the obs graph wih empty lists
for el in bfs_queue:
if not el[0] in obs_graph.keys():
obs_graph[el[0]] = []
# visited_nodes.add((*el[0], el[1]))
# For obs rendering
# self.env.dev_obs_dict[handle] = [(node[0], node[1]) for node in visited_nodes]
# Build graph with graph-tool library for visualization
# g = build_graph(obs_graph, handle)
return obs_graph
def _explore_path(self, handle, position, direction):
"""
Given agent handle, current position, and direction, explore that path until a new branching point is found.
:param handle: agent id
:param position: agent position as cell
:param direction: agent direction
:return: a tuple Node with its features
"""
# Continue along direction until next switch or
# until no transitions are possible along the current direction (i.e., dead-ends)
# We treat dead-ends as nodes, instead of going back, to avoid loops
exploring = True
# 4 different cases to have a branching point:
last_is_switch = False
last_is_dead_end = False
last_is_terminal = False # wrong cell or cycle
last_is_target = False # target was reached
agent = self.env.agents[handle]
visited = OrderedSet()
while True:
if (position[0], position[1], direction) in visited:
last_is_terminal = True
break
visited.add((position[0], position[1], direction))
# If the target node is encountered, pick that as node. Also, no further branching is possible.
if np.array_equal(position, self.env.agents[handle].target):
last_is_target = True
break
cell_transitions = self.env.rail.get_transitions(*position, direction)
num_transitions = np.count_nonzero(cell_transitions)
cell_transitions_bitmap = bin(self.env.rail.get_full_transitions(*position))
total_transitions = cell_transitions_bitmap.count("1")
if num_transitions == 1:
# Check if dead-end (1111111111111111), or if we can go forward along direction
if total_transitions == 1:
last_is_dead_end = True
break
if not last_is_dead_end:
# Keep walking through the tree along `direction`
# convert one-hot encoding to 0,1,2,3
direction = np.argmax(cell_transitions)
position = get_new_position(position, direction)
elif num_transitions > 1:
last_is_switch = True
break
elif num_transitions == 0:
# Wrong cell type, but let's cover it and treat it as a dead-end, just in case
print("WRONG CELL TYPE detected in tree-search (0 transitions possible) at cell", position[0],
position[1], direction)
last_is_terminal = True
break
# Out of while loop - a branching point was found
# TODO tmp build node features and save them here
node = GraphObsForRailEnv.Node(cell_position=position,
agent_direction=direction,
is_target=last_is_target)
return node
'''
def _possible_conflict(self, handle, ts):
"""
Function that given agent (as handle) and time step, returns a counter that represents the sum of possible conflicts with
other agents at that time step.
Possible conflict is computed considering time step (current, pre and stop), direction, and possibility to enter that cell
in opposite direction (w.r.t. to current agent).
Precondition: 0 <= ts <= self.max_prediction_depth - 1.
Exclude READY_TO_DEPART agents from this count, namely, check conflicts only with agents that are already active.
:param handle: agent id
:param ts: time step
:return occupancy_counter, conflicting_agents
"""
occupancy_counter = 0
cell_pos = self.predicted_pos_coord[ts][handle]
int_pos = self.predicted_pos[ts][handle]
pre_ts = max(0, ts - 1)
post_ts = min(self.max_prediction_depth - 1, ts + 1)
int_direction = int(self.predicted_dir[ts][handle])
cell_transitions = self.env.rail.get_transitions(int(cell_pos[0]), int(cell_pos[1]), int_direction)
conflicting_agents_ts = set()
# Careful, int_pos, predicted_pos are not (y, x) but are given as int
if int_pos in np.delete(self.predicted_pos[ts], handle, 0):
conflicting_agents = np.where(self.predicted_pos[ts] == int_pos)
for ca in conflicting_agents[0]:
if self.env.agents[ca].status == RailAgentStatus.ACTIVE:
if self.predicted_dir[ts][handle] != self.predicted_dir[ts][ca] and cell_transitions[self._reverse_dir(self.predicted_dir[ts][ca])] == 1:
if not (self._is_following(ca, handle)):
occupancy_counter += 1
conflicting_agents_ts.add(ca)
elif int_pos in np.delete(self.predicted_pos[pre_ts], handle, 0):
conflicting_agents = np.where(self.predicted_pos[pre_ts] == int_pos)
for ca in conflicting_agents[0]:
if self.env.agents[ca].status == RailAgentStatus.ACTIVE:
if self.predicted_dir[ts][handle] != self.predicted_dir[pre_ts][ca] and cell_transitions[self._reverse_dir(self.predicted_dir[pre_ts][ca])] == 1:
if not (self._is_following(ca, handle)):
occupancy_counter += 1
conflicting_agents_ts.add(ca)
elif int_pos in np.delete(self.predicted_pos[post_ts], handle, 0):
conflicting_agents = np.where(self.predicted_pos[post_ts] == int_pos)
for ca in conflicting_agents[0]:
if self.env.agents[ca].status == RailAgentStatus.ACTIVE:
if self.predicted_dir[ts][handle] != self.predicted_dir[post_ts][ca] and cell_transitions[self._reverse_dir(self.predicted_dir[post_ts][ca])] == 1:
if not (self._is_following(ca, handle)):
occupancy_counter += 1
conflicting_agents_ts.add(ca)
return occupancy_counter, conflicting_agents_ts
def _fill_occupancy(self, handle):
"""
Returns encoding of agent occupancy as an array where each element is
0: no other agent in this cell at this ts (free cell)
>= 1: counter (probably) other agents here at the same ts, so conflict, e.g. if 1 => one possible conflict, 2 => 2 possible conflicts, etc.
:param handle: agent id
:return: occupancy, conflicting_agents
"""
occupancy = np.zeros(self.max_prediction_depth, dtype=int)
conflicting_agents = set()
overlapping_paths = self._compute_overlapping_paths(handle)
# cells_sequence = self.cells_sequence[handle]
# span_cells = []
for ts in range(self.max_prediction_depth):
if self.env.agents[handle].status in [RailAgentStatus.READY_TO_DEPART, RailAgentStatus.ACTIVE]:
occupancy[ts], conflicting_agents_ts = self._possible_conflict(handle, ts)
conflicting_agents.update(conflicting_agents_ts)
# If a conflict is predicted, then it makes sense to populate occupancy with overlapping paths
# But only with THAT agent
# Because I could have overlapping paths but without conflict (TODO improve)
if len(conflicting_agents) != 0: # If there was conflict
for ca in conflicting_agents:
for ts in range(self.max_prediction_depth):
occupancy[ts] = overlapping_paths[ca, ts] if occupancy[ts] == 0 else 1
# if occupancy[ts]:
# span_cells.append(cells_sequence[ts]) # Save occupancy as sequence of cells too
'''
if not self.overlapping_spans[handle]: # If empty means it's the first time or there weren't overlapping paths
self.overlapping_spans.update({handle: span_cells})
else:
# Check if agent.position was already there - occupying an overlapping span
agent_pos = self.env.agents[handle].position
if agent_pos in self.overlapping_spans[handle]:
# Then I can free first block of occupancy
index = 0
while occupancy[index]:
occupancy[index] = 0
# Update for new obs
self.overlapping_spans.update({handle: span_cells})
'''
# Occupancy is 0 for agents that are done - they don't perform actions anymore
return occupancy, conflicting_agents
def _reverse_dir(self, direction):
"""
Invert direction (int) of one agent.
:param direction:
:return:
"""
return int((direction + 2) % 4)
# More than overlapping paths, this function computes cells in common in the predictions
def _compute_overlapping_paths(self, handle):
"""
Function that checks overlapping paths, where paths take into account shortest path prediction, so time/speed,
but not the fact that the agent is moving or not.
:param handle: agent id
:return: overlapping_paths is a np.array that computes path overlapping for pairs of agents, where 1 means overlapping.
Each layer represents overlapping with one particular agent.
"""
overlapping_paths = np.zeros((self.env.get_num_agents(), self.max_prediction_depth), dtype=int)
cells_sequence = self.predicted_pos_list[handle]
for a in range(len(self.env.agents)):
if a != handle:
i = 0
other_agent_cells_sequence = self.predicted_pos_list[a]
for pos in cells_sequence:
if pos in other_agent_cells_sequence:
overlapping_paths[a, i] = 1
i += 1
return overlapping_paths
# TODO
# il problema è che così non tengo conto del fatto che sono in questa pos ancora per 1 o 2 o 3 ts in base alla mia
# velocità
def _compute_overlapping_paths_with_current_ts(self, handle):
"""
"""
agent = self.env.agents[handle]
overlapping_paths = np.zeros((self.env.get_num_agents(), self.max_prediction_depth + 1), dtype=int)
cells_sequence = self.predicted_pos_list[handle]
# Prepend current ts
if agent.status == RailAgentStatus.ACTIVE:
virtual_position = agent.position
elif agent.status == RailAgentStatus.READY_TO_DEPART:
virtual_position = agent.initial_position
int_pos = coordinate_to_position(self.env.width, [virtual_position])
cells_sequence = np.append(int_pos[0], cells_sequence)
for a in range(len(self.env.agents)):
if a != handle and self.env.agents[a].status == RailAgentStatus.ACTIVE:
i = 0
# Prepend other agents current ts
other_agent_cells_sequence = self.predicted_pos_list[a]
other_int_pos = coordinate_to_position(self.env.width, [self.env.agents[a].position])
other_agent_cells_sequence = np.append(other_int_pos[0], other_agent_cells_sequence)
for pos in cells_sequence:
if pos in other_agent_cells_sequence:
overlapping_paths[a, i] = 1
i += 1
return overlapping_paths
def _find_forks(self):
"""
A fork (in the map) is either a switch or a diamond crossing.
:return:
"""
forks = set() # Set of nodes as tuples/coordinates
# Identify cells hat are nodes (have switches)
for i in range(self.env.height):
for j in range(self.env.width):
is_switch = False
is_crossing = False
# Check if diamond crossing
transitions_bit = bin(self.env.rail.get_full_transitions(i, j))
if int(transitions_bit, 2) == int('1000010000100001', 2):
is_crossing = True
else:
# Check if switch
for direction in (0, 1, 2, 3): # 0:N, 1:E, 2:S, 3:W
possible_transitions = self.env.rail.get_transitions(i, j, direction)
num_transitions = np.count_nonzero(possible_transitions)
if num_transitions > 1:
is_switch = True
if is_switch or is_crossing:
forks.add((i, j))
return forks
def _is_following(self, handle1, handle2):
"""
Checks whether the agent with higher handle is (probably) following the other one.
invariant handle1 < handle2
:param handle1:
:param handle2:
:return:
"""
'''
if handle1 > handle2:
handle2, handle1 = handle1, handle2
'''
agent1 = self.env.agents[handle1]
agent2 = self.env.agents[handle2]
virtual_position1 = agent1.initial_position if agent1.status == RailAgentStatus.READY_TO_DEPART else agent1.position
virtual_position2 = agent2.initial_position if agent2.status == RailAgentStatus.READY_TO_DEPART else agent2.position
if agent1.initial_position == agent2.initial_position \
and agent1.initial_direction == agent2.initial_direction \
and agent1.target == agent2.target \
and (abs(virtual_position1[0] - virtual_position2[0]) <= 2 or abs(virtual_position1[1] - virtual_position2[1]) <= 2):
return True
else:
return False
| [
"numpy.count_nonzero",
"numpy.zeros",
"numpy.append",
"numpy.min",
"flatland.core.grid.grid_utils.coordinate_to_position",
"numpy.where",
"collections.namedtuple",
"src.utils.assign_priority",
"numpy.delete"
] | [((1542, 1615), 'collections.namedtuple', 'collections.namedtuple', (['"""Node"""', '"""cell_position agent_direction is_target"""'], {}), "('Node', 'cell_position agent_direction is_target')\n", (1564, 1615), False, 'import collections\n'), ((5979, 6025), 'numpy.zeros', 'np.zeros', (['self.max_prediction_depth'], {'dtype': 'int'}), '(self.max_prediction_depth, dtype=int)\n', (5987, 6025), True, 'import numpy as np\n'), ((6946, 6980), 'numpy.append', 'np.append', (['occupancy', 'second_layer'], {}), '(occupancy, second_layer)\n', (6955, 6980), True, 'import numpy as np\n'), ((7387, 7433), 'numpy.zeros', 'np.zeros', (['self.max_prediction_depth'], {'dtype': 'int'}), '(self.max_prediction_depth, dtype=int)\n', (7395, 7433), True, 'import numpy as np\n'), ((7468, 7514), 'numpy.zeros', 'np.zeros', (['self.max_prediction_depth'], {'dtype': 'int'}), '(self.max_prediction_depth, dtype=int)\n', (7476, 7514), True, 'import numpy as np\n'), ((8051, 8096), 'src.utils.assign_priority', 'assign_priority', (['self.env', 'agent', 'is_conflict'], {}), '(self.env, agent, is_conflict)\n', (8066, 8096), False, 'from src.utils import assign_random_priority, assign_speed_priority, assign_priority\n'), ((9515, 9542), 'numpy.append', 'np.append', (['occupancy', 'forks'], {}), '(occupancy, forks)\n', (9524, 9542), True, 'import numpy as np\n'), ((9563, 9591), 'numpy.append', 'np.append', (['agent_obs', 'target'], {}), '(agent_obs, target)\n', (9572, 9591), True, 'import numpy as np\n'), ((9612, 9721), 'numpy.append', 'np.append', (['agent_obs', '(priority, max_prio_encountered, n_agents_malfunctioning,\n n_agents_ready_to_depart)'], {}), '(agent_obs, (priority, max_prio_encountered,\n n_agents_malfunctioning, n_agents_ready_to_depart))\n', (9621, 9721), True, 'import numpy as np\n'), ((23472, 23518), 'numpy.zeros', 'np.zeros', (['self.max_prediction_depth'], {'dtype': 'int'}), '(self.max_prediction_depth, dtype=int)\n', (23480, 23518), True, 'import numpy as np\n'), ((27531, 27589), 'flatland.core.grid.grid_utils.coordinate_to_position', 'coordinate_to_position', (['self.env.width', '[virtual_position]'], {}), '(self.env.width, [virtual_position])\n', (27553, 27589), False, 'from flatland.core.grid.grid_utils import coordinate_to_position, distance_on_rail, position_to_coordinate\n'), ((27615, 27652), 'numpy.append', 'np.append', (['int_pos[0]', 'cells_sequence'], {}), '(int_pos[0], cells_sequence)\n', (27624, 27652), True, 'import numpy as np\n'), ((8308, 8345), 'numpy.min', 'np.min', (['conflicting_agents_priorities'], {}), '(conflicting_agents_priorities)\n', (8314, 8345), True, 'import numpy as np\n'), ((21082, 21126), 'numpy.delete', 'np.delete', (['self.predicted_pos[ts]', 'handle', '(0)'], {}), '(self.predicted_pos[ts], handle, 0)\n', (21091, 21126), True, 'import numpy as np\n'), ((21161, 21204), 'numpy.where', 'np.where', (['(self.predicted_pos[ts] == int_pos)'], {}), '(self.predicted_pos[ts] == int_pos)\n', (21169, 21204), True, 'import numpy as np\n'), ((8199, 8242), 'src.utils.assign_priority', 'assign_priority', (['self.env', 'agents[ca]', '(True)'], {}), '(self.env, agents[ca], True)\n', (8214, 8242), False, 'from src.utils import assign_random_priority, assign_speed_priority, assign_priority\n'), ((21700, 21748), 'numpy.delete', 'np.delete', (['self.predicted_pos[pre_ts]', 'handle', '(0)'], {}), '(self.predicted_pos[pre_ts], handle, 0)\n', (21709, 21748), True, 'import numpy as np\n'), ((21783, 21830), 'numpy.where', 'np.where', (['(self.predicted_pos[pre_ts] == int_pos)'], {}), '(self.predicted_pos[pre_ts] == int_pos)\n', (21791, 21830), True, 'import numpy as np\n'), ((27959, 28028), 'flatland.core.grid.grid_utils.coordinate_to_position', 'coordinate_to_position', (['self.env.width', '[self.env.agents[a].position]'], {}), '(self.env.width, [self.env.agents[a].position])\n', (27981, 28028), False, 'from flatland.core.grid.grid_utils import coordinate_to_position, distance_on_rail, position_to_coordinate\n'), ((28074, 28129), 'numpy.append', 'np.append', (['other_int_pos[0]', 'other_agent_cells_sequence'], {}), '(other_int_pos[0], other_agent_cells_sequence)\n', (28083, 28129), True, 'import numpy as np\n'), ((22342, 22391), 'numpy.delete', 'np.delete', (['self.predicted_pos[post_ts]', 'handle', '(0)'], {}), '(self.predicted_pos[post_ts], handle, 0)\n', (22351, 22391), True, 'import numpy as np\n'), ((22426, 22474), 'numpy.where', 'np.where', (['(self.predicted_pos[post_ts] == int_pos)'], {}), '(self.predicted_pos[post_ts] == int_pos)\n', (22434, 22474), True, 'import numpy as np\n'), ((4424, 4472), 'flatland.core.grid.grid_utils.coordinate_to_position', 'coordinate_to_position', (['self.env.width', 'pos_list'], {}), '(self.env.width, pos_list)\n', (4446, 4472), False, 'from flatland.core.grid.grid_utils import coordinate_to_position, distance_on_rail, position_to_coordinate\n'), ((29265, 29303), 'numpy.count_nonzero', 'np.count_nonzero', (['possible_transitions'], {}), '(possible_transitions)\n', (29281, 29303), True, 'import numpy as np\n')] |
"""
Implimentation of Density-Based Clustering Validation "DBCV"
"""
import numpy as np
from scipy.spatial.distance import cdist
from scipy.sparse.csgraph import minimum_spanning_tree
from scipy.sparse import csgraph
from tqdm import tqdm
class DBCV:
def __init__(self, samples: np.ndarray, labels: np.ndarray, dist_function: str = 'euclidean', verbose=False):
"""
Density Based clustering validation
Args:
samples (np.ndarray): ndarray with dimensions [n_samples, n_features]
data to check validity of clustering
labels (np.array): clustering assignments for data X
dist_dunction (func): function to determine distance between objects
func args must be [np.array, np.array] where each array is a point
"""
self.samples = samples
self.labels = labels
self.dist_function = dist_function
self.cluster_lookup = {}
self.shortest_paths = None
self.verbose = verbose
def verbose_log(self, msg):
if self.verbose:
print(msg)
def get_score(self):
"""
Density Based clustering validation
Returns: cluster_validity (float)
score in range[-1, 1] indicating validity of clustering assignments
"""
graph = self._mutual_reach_dist_graph(self.samples, self.labels, self.dist_function)
self.verbose_log("made graph matrix")
mst = self._mutual_reach_dist_MST(graph)
self.verbose_log("built MST")
self.shortest_paths = csgraph.dijkstra(mst)
self.verbose_log("calculated shortest paths")
cluster_validity = self._clustering_validity_index(mst, self.labels)
self.verbose_log("scores calculated")
return cluster_validity
def _core_dist(self, point: np.ndarray, distance_vector: np.ndarray):
"""
Computes the core distance of a point.
Core distance is the inverse density of an object.
Args:
point (np.array): array of dimensions (n_features,)
point to compute core distance of
distance_vector (np.array):
vector of distances from point to all other points in its cluster
Returns: core_dist (float)
inverse density of point
"""
n_features = np.shape(point)[0]
n_neighbors = np.shape(distance_vector)[0]
distance_vector = distance_vector[distance_vector != 0]
numerator = ((1 / distance_vector) ** n_features).sum()
core_dist = (numerator / (n_neighbors - 1)) ** (-1 / n_features)
return core_dist
def _calculate_pairwise_distance(self, samples: np.ndarray, dist_function: str):
# TODO: align the metric with distance function
return cdist(samples, samples, metric=dist_function)
def _mutual_reach_dist_graph(self, X, labels, dist_function):
"""
Computes the mutual reach distance complete graph.
Graph of all pair-wise mutual reachability distances between points
Args:
X (np.ndarray): ndarray with dimensions [n_samples, n_features]
data to check validity of clustering
labels (np.array): clustering assignments for data X
dist_dunction (func): function to determine distance between objects
func args must be [np.array, np.array] where each array is a point
Returns: graph (np.ndarray)
array of dimensions (n_samples, n_samples)
Graph of all pair-wise mutual reachability distances between points.
"""
n_samples = np.shape(X)[0]
pairwise_distance = self._calculate_pairwise_distance(X, dist_function)
core_dists = []
for idx in tqdm(range(n_samples)):
class_label = labels[idx]
members = self._get_label_member_indices(labels, class_label)
distance_vector = pairwise_distance[idx, :][members]
core_dists.append(self._core_dist(X[idx], distance_vector))
# to do a bulk np.max we want to repeat core distances
core_dists = np.repeat(np.array(core_dists).reshape(-1, 1), n_samples, axis=1)
# this matrix and its inverse show core_dist in position i,j for point i and point j respectively
core_dists_i = core_dists[:, :, np.newaxis]
core_dists_j = core_dists.T[:, :, np.newaxis]
pairwise_distance = pairwise_distance[:, :, np.newaxis]
# concatenate all distances to compare them all in numpy
mutual_reachability_distance_matrix = np.concatenate([core_dists_i, core_dists_j, pairwise_distance], axis=-1)
graph = np.max(mutual_reachability_distance_matrix, axis=-1)
return graph
def _mutual_reach_dist_MST(self, dist_tree):
"""
Computes minimum spanning tree of the mutual reach distance complete graph
Args:
dist_tree (np.ndarray): array of dimensions (n_samples, n_samples)
Graph of all pair-wise mutual reachability distances
between points.
Returns: minimum_spanning_tree (np.ndarray)
array of dimensions (n_samples, n_samples)
minimum spanning tree of all pair-wise mutual reachability
distances between points.
"""
mst = minimum_spanning_tree(dist_tree).toarray()
return mst + np.transpose(mst)
def _cluster_density_sparseness(self, MST, labels, cluster):
"""
Computes the cluster density sparseness, the minimum density
within a cluster
Args:
MST (np.ndarray): minimum spanning tree of all pair-wise
mutual reachability distances between points.
labels (np.array): clustering assignments for data X
cluster (int): cluster of interest
Returns: cluster_density_sparseness (float)
value corresponding to the minimum density within a cluster
"""
indices = np.where(labels == cluster)[0]
cluster_MST = MST[indices][:, indices]
cluster_density_sparseness = np.max(cluster_MST)
return cluster_density_sparseness
def _cluster_density_separation(self, MST, labels, cluster_i, cluster_j):
"""
Computes the density separation between two clusters, the maximum
density between clusters.
Args:
MST (np.ndarray): minimum spanning tree of all pair-wise
mutual reachability distances between points.
labels (np.array): clustering assignments for data X
cluster_i (int): cluster i of interest
cluster_j (int): cluster j of interest
Returns: density_separation (float):
value corresponding to the maximum density between clusters
"""
indices_i = np.where(labels == cluster_i)[0]
indices_j = np.where(labels == cluster_j)[0]
relevant_paths = self.shortest_paths[indices_i][:, indices_j]
density_separation = np.min(relevant_paths)
return density_separation
def _cluster_validity_index(self, MST, labels, cluster):
"""
Computes the validity of a cluster (validity of assignmnets)
Args:
MST (np.ndarray): minimum spanning tree of all pair-wise
mutual reachability distances between points.
labels (np.array): clustering assignments for data X
cluster (int): cluster of interest
Returns: cluster_validity (float)
value corresponding to the validity of cluster assignments
"""
min_density_separation = np.inf
for cluster_j in np.unique(labels):
if cluster_j != cluster:
cluster_density_separation = self._cluster_density_separation(MST,
labels,
cluster,
cluster_j)
if cluster_density_separation < min_density_separation:
min_density_separation = cluster_density_separation
cluster_density_sparseness = self._cluster_density_sparseness(MST,
labels,
cluster)
numerator = min_density_separation - cluster_density_sparseness
denominator = np.max([min_density_separation, cluster_density_sparseness])
cluster_validity = numerator / denominator
return cluster_validity
def _clustering_validity_index(self, MST, labels):
"""
Computes the validity of all clustering assignments for a
clustering algorithm
Args:
MST (np.ndarray): minimum spanning tree of all pair-wise
mutual reachability distances between points.
labels (np.array): clustering assignments for data X
Returns: validity_index (float):
score in range[-1, 1] indicating validity of clustering assignments
"""
n_samples = len(labels)
validity_index = 0
for label in np.unique(labels):
fraction = np.sum(labels == label) / float(n_samples)
cluster_validity = self._cluster_validity_index(MST, labels, label)
validity_index += fraction * cluster_validity
return validity_index
def _get_label_member_indices(self, labels, cluster):
"""
Helper function to get samples of a specified cluster.
Args:
labels (np.array): clustering assignments for data X
cluster (int): cluster of interest
Returns: members (np.ndarray)
array of dimensions (n_samples,) of indices of samples of cluster
"""
if cluster in self.cluster_lookup:
return self.cluster_lookup[cluster]
indices = np.where(labels == cluster)[0]
self.cluster_lookup[cluster] = indices
return indices
def get_score(samples: np.ndarray, labels: np.ndarray, dist_function: str = 'euclidean', verbose=False):
scorer = DBCV(samples, labels, dist_function, verbose=verbose)
return scorer.get_score()
| [
"scipy.spatial.distance.cdist",
"numpy.sum",
"numpy.unique",
"numpy.transpose",
"scipy.sparse.csgraph.minimum_spanning_tree",
"numpy.shape",
"numpy.max",
"numpy.min",
"numpy.where",
"numpy.array",
"numpy.concatenate",
"scipy.sparse.csgraph.dijkstra"
] | [((1571, 1592), 'scipy.sparse.csgraph.dijkstra', 'csgraph.dijkstra', (['mst'], {}), '(mst)\n', (1587, 1592), False, 'from scipy.sparse import csgraph\n'), ((2807, 2852), 'scipy.spatial.distance.cdist', 'cdist', (['samples', 'samples'], {'metric': 'dist_function'}), '(samples, samples, metric=dist_function)\n', (2812, 2852), False, 'from scipy.spatial.distance import cdist\n'), ((4599, 4671), 'numpy.concatenate', 'np.concatenate', (['[core_dists_i, core_dists_j, pairwise_distance]'], {'axis': '(-1)'}), '([core_dists_i, core_dists_j, pairwise_distance], axis=-1)\n', (4613, 4671), True, 'import numpy as np\n'), ((4688, 4740), 'numpy.max', 'np.max', (['mutual_reachability_distance_matrix'], {'axis': '(-1)'}), '(mutual_reachability_distance_matrix, axis=-1)\n', (4694, 4740), True, 'import numpy as np\n'), ((6136, 6155), 'numpy.max', 'np.max', (['cluster_MST'], {}), '(cluster_MST)\n', (6142, 6155), True, 'import numpy as np\n'), ((7050, 7072), 'numpy.min', 'np.min', (['relevant_paths'], {}), '(relevant_paths)\n', (7056, 7072), True, 'import numpy as np\n'), ((7699, 7716), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (7708, 7716), True, 'import numpy as np\n'), ((8570, 8630), 'numpy.max', 'np.max', (['[min_density_separation, cluster_density_sparseness]'], {}), '([min_density_separation, cluster_density_sparseness])\n', (8576, 8630), True, 'import numpy as np\n'), ((9302, 9319), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (9311, 9319), True, 'import numpy as np\n'), ((2353, 2368), 'numpy.shape', 'np.shape', (['point'], {}), '(point)\n', (2361, 2368), True, 'import numpy as np\n'), ((2394, 2419), 'numpy.shape', 'np.shape', (['distance_vector'], {}), '(distance_vector)\n', (2402, 2419), True, 'import numpy as np\n'), ((3646, 3657), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (3654, 3657), True, 'import numpy as np\n'), ((5414, 5431), 'numpy.transpose', 'np.transpose', (['mst'], {}), '(mst)\n', (5426, 5431), True, 'import numpy as np\n'), ((6021, 6048), 'numpy.where', 'np.where', (['(labels == cluster)'], {}), '(labels == cluster)\n', (6029, 6048), True, 'import numpy as np\n'), ((6864, 6893), 'numpy.where', 'np.where', (['(labels == cluster_i)'], {}), '(labels == cluster_i)\n', (6872, 6893), True, 'import numpy as np\n'), ((6917, 6946), 'numpy.where', 'np.where', (['(labels == cluster_j)'], {}), '(labels == cluster_j)\n', (6925, 6946), True, 'import numpy as np\n'), ((10055, 10082), 'numpy.where', 'np.where', (['(labels == cluster)'], {}), '(labels == cluster)\n', (10063, 10082), True, 'import numpy as np\n'), ((5350, 5382), 'scipy.sparse.csgraph.minimum_spanning_tree', 'minimum_spanning_tree', (['dist_tree'], {}), '(dist_tree)\n', (5371, 5382), False, 'from scipy.sparse.csgraph import minimum_spanning_tree\n'), ((9344, 9367), 'numpy.sum', 'np.sum', (['(labels == label)'], {}), '(labels == label)\n', (9350, 9367), True, 'import numpy as np\n'), ((4154, 4174), 'numpy.array', 'np.array', (['core_dists'], {}), '(core_dists)\n', (4162, 4174), True, 'import numpy as np\n')] |
import numpy as np
import scipy
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 16})
def plot_results(title, ylim=None, xlim=None, whether_save=True):
plt.figure(figsize=(6,4))
for method in ('lh_sgpr', 'lh_vhgpr', 'seq_vhgpr'):
results = np.load('data/' + method + '.npy', allow_pickle=True)
x = results[0][0]
y = [i[1] for i in results]
plt.plot(x, np.mean(y, axis=0))
plt.fill_between(x, np.mean(y, axis =0),
np.mean(y, axis=0) + np.std(y, axis=0),
alpha = 0.2)
p_true, p_sgp_asy = np.load('data/const.npy', allow_pickle=True)
plt.plot(x, np.ones(len(x)) * p_true *(1+0.1),"k--")
plt.plot(x, np.ones(len(x)) * p_true *(1-0.1),"k--")
plt.plot(x, np.ones(len(x)) * p_sgp_asy, ':', color = 'purple')
plt.xlabel('Number of Samples')
plt.ylabel('$P_e$')
plt.title(title)
plt.ylim(ylim)
plt.xlim(xlim)
if whether_save:
plt.savefig('result.pdf', bbox_inches = "tight")
plt.show() | [
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"numpy.load",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylim",
"numpy.std",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.rcParams.update",
"numpy.mean",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"... | [((64, 102), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 16}"], {}), "({'font.size': 16})\n", (83, 102), True, 'import matplotlib.pyplot as plt\n'), ((180, 206), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (190, 206), True, 'import matplotlib.pyplot as plt\n'), ((619, 663), 'numpy.load', 'np.load', (['"""data/const.npy"""'], {'allow_pickle': '(True)'}), "('data/const.npy', allow_pickle=True)\n", (626, 663), True, 'import numpy as np\n'), ((850, 881), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Samples"""'], {}), "('Number of Samples')\n", (860, 881), True, 'import matplotlib.pyplot as plt\n'), ((886, 905), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$P_e$"""'], {}), "('$P_e$')\n", (896, 905), True, 'import matplotlib.pyplot as plt\n'), ((910, 926), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (919, 926), True, 'import matplotlib.pyplot as plt\n'), ((931, 945), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ylim'], {}), '(ylim)\n', (939, 945), True, 'import matplotlib.pyplot as plt\n'), ((950, 964), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlim'], {}), '(xlim)\n', (958, 964), True, 'import matplotlib.pyplot as plt\n'), ((1047, 1057), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1055, 1057), True, 'import matplotlib.pyplot as plt\n'), ((285, 338), 'numpy.load', 'np.load', (["('data/' + method + '.npy')"], {'allow_pickle': '(True)'}), "('data/' + method + '.npy', allow_pickle=True)\n", (292, 338), True, 'import numpy as np\n'), ((994, 1040), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""result.pdf"""'], {'bbox_inches': '"""tight"""'}), "('result.pdf', bbox_inches='tight')\n", (1005, 1040), True, 'import matplotlib.pyplot as plt\n'), ((421, 439), 'numpy.mean', 'np.mean', (['y'], {'axis': '(0)'}), '(y, axis=0)\n', (428, 439), True, 'import numpy as np\n'), ((469, 487), 'numpy.mean', 'np.mean', (['y'], {'axis': '(0)'}), '(y, axis=0)\n', (476, 487), True, 'import numpy as np\n'), ((516, 534), 'numpy.mean', 'np.mean', (['y'], {'axis': '(0)'}), '(y, axis=0)\n', (523, 534), True, 'import numpy as np\n'), ((537, 554), 'numpy.std', 'np.std', (['y'], {'axis': '(0)'}), '(y, axis=0)\n', (543, 554), True, 'import numpy as np\n')] |
from collections import defaultdict
import numpy as np
from sklearn.model_selection import train_test_split
class Samples:
def __init__(self, x, y):
self.x = x
self.y = y
class Data:
def __init__(self, train, test, build_probs, name):
self.train = train
self.test = test
self.name = name
if build_probs:
self.x_prob = self.__get_feature_probability()
self.y_prob = self.__get_label_probability()
self.xy_prob = self.__get_joint_probability()
self.xz_prob = nestedDictionary(2)
self.xyz_prob = nestedDictionary(2)
@staticmethod
def build(samples, labels, build_probs, name):
samplesTrain, samplesTest, labelsTrain, labelsTest = train_test_split(samples, labels, test_size=0.2)
train = Samples(samplesTrain, labelsTrain)
test = Samples(samplesTest, labelsTest)
samples = Data(train, test, build_probs, name)
return samples
def get_xyz_prob(self, x, z):
if x in self.xyz_prob:
if z in self.xyz_prob[x]:
return self.xyz_prob[x][z]
elif z in self.xyz_prob:
if x in self.xyz_prob[z]:
self.xyz_prob[x][z] = np.transpose(self.xyz_prob[z][x])
return self.xyz_prob[x][z]
else:
self.xyz_prob[x][z] = self.__calculate_xyz_density(x, z)
return self.xyz_prob[x][z]
def get_xz_prob(self, x, z):
if x in self.xz_prob:
if z in self.xz_prob[x]:
return self.xz_prob[x][z]
elif z in self.xz_prob:
if x in self.xz_prob[z]:
self.xz_prob[x][z] = np.transpose(self.xz_prob[z][x])
return self.xz_prob[x][z]
else:
self.xz_prob[x][z] = self.__calculate_xz_density(x, z)
return self.xz_prob[x][z]
def get_xy_prob(self, x):
return self.xy_prob[x]
def __calculate_xyz_density(self, x, z):
xyz_values = self.__get_xyz_frequency_map(x, z)
x_value_to_index = self.__value_to_index(self.train.x[:, x])
y_value_to_index = self.__value_to_index(self.train.y)
z_value_to_index = self.__value_to_index(self.train.x[:, z])
xyz_prob = np.zeros((len(x_value_to_index), len(y_value_to_index), len(z_value_to_index)))
for x_value in xyz_values.keys():
for y_value in xyz_values[x_value].keys():
for z_value in xyz_values[x_value][y_value].keys():
xyz_prob[x_value_to_index[x_value]][y_value_to_index[y_value]][z_value_to_index[z_value]] = \
xyz_values[x_value][y_value][z_value]
xyz_prob /= self.train.x.shape[0]
return xyz_prob
def __get_xyz_frequency_map(self, x, z):
xyz_values = nestedDictionary(3)
for sample, label in zip(self.train.x, self.train.y):
z_value = sample[z]
x_value = sample[x]
if x_value in xyz_values and label in xyz_values[x_value] and z_value in xyz_values[x_value][label]:
xyz_values[x_value][label][z_value] += 1
else:
xyz_values[x_value][label][z_value] = 1
return xyz_values
def __get_xz_frequency_map(self, x, z):
xz_values = nestedDictionary(2)
for sample in self.train.x:
z_value = sample[z]
x_value = sample[x]
if x_value in xz_values and z_value in xz_values[x_value]:
xz_values[x_value][z_value] += 1
else:
xz_values[x_value][z_value] = 1
return xz_values
def __get_x_frequency_map(self, x):
x_values = nestedDictionary(1)
for sample in self.train.x:
x_value = sample[x]
if x_value in x_values:
x_values[x_value] += 1
else:
x_values[x_value] = 1
return x_values
def __value_to_index(self, values):
values = sorted(list(set(values)))
return {v: i for i, v in dict(enumerate(values)).items()}
def __calculate_xz_density(self, x, z):
xz_values = self.__get_xz_frequency_map(x, z)
x_value_to_index = self.__value_to_index(self.train.x[:, x])
z_value_to_index = self.__value_to_index(self.train.x[:, z])
xz_prob = np.zeros((len(x_value_to_index), len(z_value_to_index)))
for x_value in xz_values.keys():
for z_value in xz_values[x_value].keys():
xz_prob[x_value_to_index[x_value]][z_value_to_index[z_value]] = \
xz_values[x_value][z_value]
xz_prob /= self.train.x.shape[0]
return xz_prob
def __get_feature_probability(self):
return [self.__calculate_x_density(x) for x in range(self.train.x.shape[1])]
def __calculate_x_density(self, x):
x_values = self.__get_x_frequency_map(x)
x_value_to_index = self.__value_to_index(self.train.x[:, x])
x_prob = np.zeros((len(x_value_to_index)))
for x_value in x_values.keys():
x_prob[x_value_to_index[x_value]] = x_values[x_value]
x_prob /= self.train.x.shape[0]
return x_prob
def __get_label_probability(self):
return self.__calculate_y_density()
def __calculate_y_density(self):
y_values = self.__get_y_frequency_map()
y_value_to_index = self.__value_to_index(self.train.y)
y_prob = np.zeros((len(y_value_to_index)))
for y_value in y_values.keys():
y_prob[y_value_to_index[y_value]] = y_values[y_value]
y_prob /= len(self.train.y)
return y_prob
def __get_y_frequency_map(self):
y_values = nestedDictionary(1)
for label in self.train.y:
if label in y_values:
y_values[label] += 1
else:
y_values[label] = 1
return y_values
def __get_joint_probability(self):
return [self.__calculate_xy_density(x) for x in range(self.train.x.shape[1])]
def __calculate_xy_density(self, x):
xy_values = self.__get_xy_frequency_map(x)
x_value_to_index = self.__value_to_index(self.train.x[:, x])
y_value_to_index = self.__value_to_index(self.train.y)
xy_prob = np.zeros((len(x_value_to_index), len(y_value_to_index)))
for x_value in xy_values.keys():
for y_value in xy_values[x_value].keys():
xy_prob[x_value_to_index[x_value]][y_value_to_index[y_value]] = xy_values[x_value][y_value]
xy_prob /= self.train.x.shape[0]
return xy_prob
def __get_xy_frequency_map(self, x):
xy_values = nestedDictionary(2)
for sample, label in zip(self.train.x, self.train.y):
x_value = sample[x]
if x_value in xy_values and label in xy_values[x_value]:
xy_values[x_value][label] += 1
else:
xy_values[x_value][label] = 1
return xy_values
def nestedDictionary(depth):
if depth == 1:
return defaultdict(np.array)
else:
return defaultdict(lambda: nestedDictionary(depth - 1))
| [
"collections.defaultdict",
"sklearn.model_selection.train_test_split",
"numpy.transpose"
] | [((764, 812), 'sklearn.model_selection.train_test_split', 'train_test_split', (['samples', 'labels'], {'test_size': '(0.2)'}), '(samples, labels, test_size=0.2)\n', (780, 812), False, 'from sklearn.model_selection import train_test_split\n'), ((7049, 7070), 'collections.defaultdict', 'defaultdict', (['np.array'], {}), '(np.array)\n', (7060, 7070), False, 'from collections import defaultdict\n'), ((1246, 1279), 'numpy.transpose', 'np.transpose', (['self.xyz_prob[z][x]'], {}), '(self.xyz_prob[z][x])\n', (1258, 1279), True, 'import numpy as np\n'), ((1694, 1726), 'numpy.transpose', 'np.transpose', (['self.xz_prob[z][x]'], {}), '(self.xz_prob[z][x])\n', (1706, 1726), True, 'import numpy as np\n')] |
import torch
import numpy as np
def gaussian_pdf(x, mu, sigma):
return (1 / (sigma * np.sqrt(2 * np.pi))) * torch.exp(
(-1 / 2) * torch.square((x - mu) / (sigma))
)
def signed_gaussian_pdf(x, mu, sigma):
return (
(1 / (sigma * np.sqrt(2 * np.pi)))
* torch.exp((-1 / 2) * torch.square((x - mu) / (sigma)))
* torch.sign(x - mu)
)
def reverse_gaussian_pdf(y, mu, sigma):
"""
Not the \"inverse gaussian\", but inverses the gaussian function
>>> eq = lambda a, b: torch.all(torch.lt(torch.abs(torch.add(a, -b)), 1e-4))
>>> x = abs(torch.rand((100,)) + 5.0)
>>> gaussian_output = gaussian_pdf(x, 5.0, 1.0)
>>> rev_x = reverse_gaussian_pdf(gaussian_output, 5.0, 1.0)
>>> eq(x, rev_x).item()
True
:param y output of the gaussian
:param mu mean
:param sigma std dev
"""
return torch.sqrt(torch.log(y * (sigma * np.sqrt(2 * np.pi))) * (-2)) * sigma + mu
def reverse_signed_gaussian_pdf(y, mu, sigma):
"""
Not the \"inverse gaussian\", but inverses the signed gaussian function
>>> eq = lambda a, b: torch.all(torch.lt(torch.abs(torch.add(a, -b)), 1e-1))
>>> x = abs(torch.rand((100,)) * 5.0 - 2.5)
>>> gaussian_output = signed_gaussian_pdf(x, 2.5, 10.0)
>>> rev_x = reverse_signed_gaussian_pdf(gaussian_output, 2.5, 10.0)
>>> eq(x, rev_x).item()
True
:param y output of the gaussian
:param mu mean
:param sigma std dev
"""
return (
torch.sqrt(torch.log(abs(y) * (sigma * np.sqrt(2 * np.pi))) * (-2))
* sigma
* torch.sign(y)
+ mu
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| [
"torch.sign",
"torch.square",
"doctest.testmod",
"numpy.sqrt"
] | [((1674, 1691), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (1689, 1691), False, 'import doctest\n'), ((355, 373), 'torch.sign', 'torch.sign', (['(x - mu)'], {}), '(x - mu)\n', (365, 373), False, 'import torch\n'), ((1588, 1601), 'torch.sign', 'torch.sign', (['y'], {}), '(y)\n', (1598, 1601), False, 'import torch\n'), ((91, 109), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (98, 109), True, 'import numpy as np\n'), ((144, 174), 'torch.square', 'torch.square', (['((x - mu) / sigma)'], {}), '((x - mu) / sigma)\n', (156, 174), False, 'import torch\n'), ((259, 277), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (266, 277), True, 'import numpy as np\n'), ((311, 341), 'torch.square', 'torch.square', (['((x - mu) / sigma)'], {}), '((x - mu) / sigma)\n', (323, 341), False, 'import torch\n'), ((910, 928), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (917, 928), True, 'import numpy as np\n'), ((1533, 1551), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (1540, 1551), True, 'import numpy as np\n')] |
import logging
import warnings
import numpy as np
from pytplot import get_data, store_data, options
# use nanmean from bottleneck if it's installed, otherwise use the numpy one
# bottleneck nanmean is ~2.5x faster
try:
import bottleneck as bn
nanmean = bn.nanmean
except ImportError:
nanmean = np.nanmean
logging.captureWarnings(True)
logging.basicConfig(format='%(asctime)s: %(message)s', datefmt='%d-%b-%y %H:%M:%S', level=logging.INFO)
def mms_feeps_omni(eyes, probe='1', datatype='electron', data_units='intensity', data_rate='srvy', level='l2', suffix=''):
"""
This function will calculate the omni-directional FEEPS spectrograms, and is automatically called from mms_load_feeps
Parameters:
eyes: dict
Hash table containing the active sensor eyes
probe: str
probe #, e.g., '4' for MMS4
datatype: str
'electron' or 'ion'
data_units: str
'intensity'
data_rate: str
instrument data rate, e.g., 'srvy' or 'brst'
level: str
data level
suffix: str
suffix of the loaded data
Returns:
List of tplot variables created.
"""
out_vars = []
units_label = ''
if data_units == 'intensity':
units_label = '1/(cm^2-sr-s-keV)'
elif data_units == 'counts':
units_label = '[counts/s]'
prefix = 'mms'+probe+'_epd_feeps_'
if datatype == 'electron':
energies = np.array([33.2, 51.90, 70.6, 89.4, 107.1, 125.2, 146.5, 171.3,
200.2, 234.0, 273.4, 319.4, 373.2, 436.0, 509.2])
else:
energies = np.array([57.9, 76.8, 95.4, 114.1, 133.0, 153.7, 177.6,
205.1, 236.7, 273.2, 315.4, 363.8, 419.7, 484.2, 558.6])
# set unique energy bins per spacecraft; from DLT on 31 Jan 2017
eEcorr = [14.0, -1.0, -3.0, -3.0]
iEcorr = [0.0, 0.0, 0.0, 0.0]
eGfact = [1.0, 1.0, 1.0, 1.0]
iGfact = [0.84, 1.0, 1.0, 1.0]
if probe == '1' and datatype == 'electron':
energies = energies + eEcorr[0]
if probe == '2' and datatype == 'electron':
energies = energies + eEcorr[1]
if probe == '3' and datatype == 'electron':
energies = energies + eEcorr[2]
if probe == '4' and datatype == 'electron':
energies = energies + eEcorr[3]
if probe == '1' and datatype == 'ion':
energies = energies + iEcorr[0]
if probe == '2' and datatype == 'ion':
energies = energies + iEcorr[1]
if probe == '3' and datatype == 'ion':
energies = energies + iEcorr[2]
if probe == '4' and datatype == 'ion':
energies = energies + iEcorr[3]
# percent error around energy bin center to accept data for averaging;
# anything outside of energies[i] +/- en_chk*energies[i] will be changed
# to NAN and not averaged
en_chk = 0.10
top_sensors = eyes['top']
bot_sensors = eyes['bottom']
tmpdata = get_data(prefix+data_rate+'_'+level+'_'+datatype+'_top_'+data_units+'_sensorid_'+str(top_sensors[0])+'_clean_sun_removed'+suffix)
if tmpdata is not None:
if level != 'sitl':
dalleyes = np.empty((len(tmpdata[0]), len(tmpdata[2]), len(top_sensors)+len(bot_sensors)))
dalleyes[:] = np.nan
for idx, sensor in enumerate(top_sensors):
var_name = prefix+data_rate+'_'+level+'_'+datatype+'_top_'+data_units+'_sensorid_'+str(sensor)+'_clean_sun_removed'+suffix
data = get_data(var_name)
dalleyes[:, :, idx] = data[1]
try:
iE = np.where(np.abs(energies-data[2]) > en_chk*energies)
if iE[0].size != 0:
dalleyes[:, iE[0], idx] = np.nan
except Warning:
logging.warning('NaN in energy table encountered; sensor T' + str(sensor))
for idx, sensor in enumerate(bot_sensors):
var_name = prefix+data_rate+'_'+level+'_'+datatype+'_bottom_'+data_units+'_sensorid_'+str(sensor)+'_clean_sun_removed'+suffix
data = get_data(var_name)
dalleyes[:, :, idx+len(top_sensors)] = data[1]
try:
iE = np.where(np.abs(energies-data[2]) > en_chk*energies)
if iE[0].size != 0:
dalleyes[:, iE[0], idx+len(top_sensors)] = np.nan
except Warning:
logging.warning('NaN in energy table encountered; sensor B' + str(sensor))
else: # sitl data
dalleyes = np.empty((len(tmpdata[0]), len(tmpdata[2]), len(top_sensors)))
dalleyes[:] = np.nan
for idx, sensor in enumerate(top_sensors):
var_name = prefix+data_rate+'_'+level+'_'+datatype+'_top_'+data_units+'_sensorid_'+str(sensor)+'_clean_sun_removed'+suffix
data = get_data(var_name)
dalleyes[:, :, idx] = data[1]
iE = np.where(np.abs(energies-data[2]) > en_chk*energies)
if iE[0].size != 0:
dalleyes[:, iE[0], idx] = np.nan
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
flux_omni = nanmean(dalleyes, axis=2)
if probe == '1' and datatype == 'electron':
flux_omni = flux_omni*eGfact[0]
if probe == '2' and datatype == 'electron':
flux_omni = flux_omni*eGfact[1]
if probe == '3' and datatype == 'electron':
flux_omni = flux_omni*eGfact[2]
if probe == '4' and datatype == 'electron':
flux_omni = flux_omni*eGfact[3]
if probe == '1' and datatype == 'ion':
flux_omni = flux_omni*iGfact[0]
if probe == '2' and datatype == 'ion':
flux_omni = flux_omni*iGfact[1]
if probe == '3' and datatype == 'ion':
flux_omni = flux_omni*iGfact[2]
if probe == '4' and datatype == 'ion':
flux_omni = flux_omni*iGfact[3]
store_data('mms'+probe+'_epd_feeps_'+data_rate+'_'+level+'_'+datatype+'_'+data_units+'_omni'+suffix, data={'x': tmpdata[0], 'y': flux_omni, 'v': energies})
options('mms'+probe+'_epd_feeps_'+data_rate+'_'+level+'_'+datatype+'_'+data_units+'_omni'+suffix, 'spec', True)
options('mms'+probe+'_epd_feeps_'+data_rate+'_'+level+'_'+datatype+'_'+data_units+'_omni'+suffix, 'ylog', True)
options('mms'+probe+'_epd_feeps_'+data_rate+'_'+level+'_'+datatype+'_'+data_units+'_omni'+suffix, 'zlog', True)
options('mms'+probe+'_epd_feeps_'+data_rate+'_'+level+'_'+datatype+'_'+data_units+'_omni'+suffix, 'Colormap', 'jet')
options('mms'+probe+'_epd_feeps_'+data_rate+'_'+level+'_'+datatype+'_'+data_units+'_omni'+suffix, 'ztitle', units_label)
options('mms'+probe+'_epd_feeps_'+data_rate+'_'+level+'_'+datatype+'_'+data_units+'_omni'+suffix, 'ytitle', 'MMS' + str(probe) + ' ' + datatype + ' (keV)')
out_vars.append('mms'+probe+'_epd_feeps_'+data_rate+'_'+level+'_'+datatype+'_'+data_units+'_omni'+suffix)
return out_vars
| [
"pytplot.store_data",
"numpy.abs",
"warnings.simplefilter",
"logging.basicConfig",
"pytplot.get_data",
"logging.captureWarnings",
"numpy.array",
"warnings.catch_warnings",
"pytplot.options"
] | [((320, 349), 'logging.captureWarnings', 'logging.captureWarnings', (['(True)'], {}), '(True)\n', (343, 349), False, 'import logging\n'), ((350, 458), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s: %(message)s"""', 'datefmt': '"""%d-%b-%y %H:%M:%S"""', 'level': 'logging.INFO'}), "(format='%(asctime)s: %(message)s', datefmt=\n '%d-%b-%y %H:%M:%S', level=logging.INFO)\n", (369, 458), False, 'import logging\n'), ((1489, 1604), 'numpy.array', 'np.array', (['[33.2, 51.9, 70.6, 89.4, 107.1, 125.2, 146.5, 171.3, 200.2, 234.0, 273.4, \n 319.4, 373.2, 436.0, 509.2]'], {}), '([33.2, 51.9, 70.6, 89.4, 107.1, 125.2, 146.5, 171.3, 200.2, 234.0,\n 273.4, 319.4, 373.2, 436.0, 509.2])\n', (1497, 1604), True, 'import numpy as np\n'), ((1651, 1767), 'numpy.array', 'np.array', (['[57.9, 76.8, 95.4, 114.1, 133.0, 153.7, 177.6, 205.1, 236.7, 273.2, 315.4, \n 363.8, 419.7, 484.2, 558.6]'], {}), '([57.9, 76.8, 95.4, 114.1, 133.0, 153.7, 177.6, 205.1, 236.7, 273.2,\n 315.4, 363.8, 419.7, 484.2, 558.6])\n', (1659, 1767), True, 'import numpy as np\n'), ((6048, 6233), 'pytplot.store_data', 'store_data', (["('mms' + probe + '_epd_feeps_' + data_rate + '_' + level + '_' + datatype +\n '_' + data_units + '_omni' + suffix)"], {'data': "{'x': tmpdata[0], 'y': flux_omni, 'v': energies}"}), "('mms' + probe + '_epd_feeps_' + data_rate + '_' + level + '_' +\n datatype + '_' + data_units + '_omni' + suffix, data={'x': tmpdata[0],\n 'y': flux_omni, 'v': energies})\n", (6058, 6233), False, 'from pytplot import get_data, store_data, options\n'), ((6212, 6349), 'pytplot.options', 'options', (["('mms' + probe + '_epd_feeps_' + data_rate + '_' + level + '_' + datatype +\n '_' + data_units + '_omni' + suffix)", '"""spec"""', '(True)'], {}), "('mms' + probe + '_epd_feeps_' + data_rate + '_' + level + '_' +\n datatype + '_' + data_units + '_omni' + suffix, 'spec', True)\n", (6219, 6349), False, 'from pytplot import get_data, store_data, options\n'), ((6332, 6469), 'pytplot.options', 'options', (["('mms' + probe + '_epd_feeps_' + data_rate + '_' + level + '_' + datatype +\n '_' + data_units + '_omni' + suffix)", '"""ylog"""', '(True)'], {}), "('mms' + probe + '_epd_feeps_' + data_rate + '_' + level + '_' +\n datatype + '_' + data_units + '_omni' + suffix, 'ylog', True)\n", (6339, 6469), False, 'from pytplot import get_data, store_data, options\n'), ((6452, 6589), 'pytplot.options', 'options', (["('mms' + probe + '_epd_feeps_' + data_rate + '_' + level + '_' + datatype +\n '_' + data_units + '_omni' + suffix)", '"""zlog"""', '(True)'], {}), "('mms' + probe + '_epd_feeps_' + data_rate + '_' + level + '_' +\n datatype + '_' + data_units + '_omni' + suffix, 'zlog', True)\n", (6459, 6589), False, 'from pytplot import get_data, store_data, options\n'), ((6572, 6714), 'pytplot.options', 'options', (["('mms' + probe + '_epd_feeps_' + data_rate + '_' + level + '_' + datatype +\n '_' + data_units + '_omni' + suffix)", '"""Colormap"""', '"""jet"""'], {}), "('mms' + probe + '_epd_feeps_' + data_rate + '_' + level + '_' +\n datatype + '_' + data_units + '_omni' + suffix, 'Colormap', 'jet')\n", (6579, 6714), False, 'from pytplot import get_data, store_data, options\n'), ((6697, 6843), 'pytplot.options', 'options', (["('mms' + probe + '_epd_feeps_' + data_rate + '_' + level + '_' + datatype +\n '_' + data_units + '_omni' + suffix)", '"""ztitle"""', 'units_label'], {}), "('mms' + probe + '_epd_feeps_' + data_rate + '_' + level + '_' +\n datatype + '_' + data_units + '_omni' + suffix, 'ztitle', units_label)\n", (6704, 6843), False, 'from pytplot import get_data, store_data, options\n'), ((5143, 5168), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (5166, 5168), False, 'import warnings\n'), ((5182, 5238), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {'category': 'RuntimeWarning'}), "('ignore', category=RuntimeWarning)\n", (5203, 5238), False, 'import warnings\n'), ((3508, 3526), 'pytplot.get_data', 'get_data', (['var_name'], {}), '(var_name)\n', (3516, 3526), False, 'from pytplot import get_data, store_data, options\n'), ((4116, 4134), 'pytplot.get_data', 'get_data', (['var_name'], {}), '(var_name)\n', (4124, 4134), False, 'from pytplot import get_data, store_data, options\n'), ((4901, 4919), 'pytplot.get_data', 'get_data', (['var_name'], {}), '(var_name)\n', (4909, 4919), False, 'from pytplot import get_data, store_data, options\n'), ((4996, 5022), 'numpy.abs', 'np.abs', (['(energies - data[2])'], {}), '(energies - data[2])\n', (5002, 5022), True, 'import numpy as np\n'), ((3628, 3654), 'numpy.abs', 'np.abs', (['(energies - data[2])'], {}), '(energies - data[2])\n', (3634, 3654), True, 'import numpy as np\n'), ((4253, 4279), 'numpy.abs', 'np.abs', (['(energies - data[2])'], {}), '(energies - data[2])\n', (4259, 4279), True, 'import numpy as np\n')] |
"""
Masked Grid for Two Sides of a Fault
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In this example, I demonstrate how to use a surface mesh of a fault in the
subsurface to create a data mask on a modeling grid. This is a particularly
useful exercise for scenarios where you may want to perform some sort of
modeling in a different manner due to geological differences on the two sides
of the fault - but still have a single modeling grid.
Let's get to it!
"""
import numpy as np
# sphinx_gallery_thumbnail_number = 4
import pyvista as pv
from pyvista import examples
###############################################################################
path, _ = examples.downloads._download_file("opal_mound_fault.vtk")
fault = pv.read(path)
fault
###############################################################################
# Create the modelling grid if you don't already have one
grid = pv.UniformGrid()
# Bottom south-west corner
grid.origin = (329700, 4252600, -2700)
# Cell sizes
grid.spacing = (500, 500, 500)
# Number of cells in each direction
grid.dimensions = (30, 35, 10)
grid
###############################################################################
# Take a quick preview to see where the fault is inside of the grid
p = pv.Plotter()
p.add_mesh(grid, opacity=0.5)
p.add_mesh(fault, color="orange")
p.show()
###############################################################################
# You may notice that the modeling grid's extent is far greater than that of
# the fault -- not to worry! PyVista's `clip_surface` filter and the utility
# I'm going to share below handles this quite well by interpolating the fault's
# plane outward.
#
# This is a reusable utility for performing the mask:
def mask_mesh_by_surface(mesh, surface):
grid = mesh.copy()
# Split the mesh by the fault
grid["pids"] = np.arange(grid.n_points)
grid["cids"] = np.arange(grid.n_cells)
a = grid.clip_surface(surface, invert=False, compute_distance=True)
b = grid.clip_surface(surface, invert=True, compute_distance=True)
# Inject the mask
grid["cell_mask"] = np.zeros(grid.n_cells, dtype=int)
grid["cell_mask"][a["cids"]] = 1
grid["cell_mask"][b["cids"]] = 2
# Use implicit distance to get point mask
lpids = np.argwhere(grid["implicit_distance"] >= 0)
gpids = np.argwhere(grid["implicit_distance"] < 0)
grid["point_mask"] = np.zeros(grid.n_points, dtype=int)
grid["point_mask"][lpids] = 1
grid["point_mask"][gpids] = 2
return grid
###############################################################################
# Let's run it and take a look at the result!
masked = mask_mesh_by_surface(grid, fault)
p = pv.Plotter()
p.add_mesh(fault, color="orange")
p.add_mesh(masked, scalars="point_mask", opacity=0.5)
p.show()
###############################################################################
# And here is how you might use that mask to do some sort of fancy modeling.
# In my example, I'm going to use a rather sophisticated distance calculation:
ids = np.argwhere(masked["point_mask"] == 1).ravel()
pts = grid.points[ids]
len(pts)
###############################################################################
# Compute distance from TNE corner
compute = lambda a, b: np.sqrt(np.sum((b - a) ** 2, axis=1))
dist = compute(pts, np.repeat([masked.bounds[1::2]], pts.shape[0], axis=0))
###############################################################################
# Add those results back to the source grid
masked["cool_math"] = np.zeros(grid.n_points) # Need to preallocate
masked["cool_math"][ids] = dist
# Do some different math for the other side
...
###############################################################################
# Display!
masked.plot(scalars="cool_math")
###############################################################################
# Visualize one side of the masked grid
a = masked.threshold(1.5, scalars="cell_mask", invert=True)
p = pv.Plotter()
p.add_mesh(a, scalars="cool_math")
p.add_mesh(fault, color="orange")
p.show()
| [
"numpy.sum",
"pyvista.examples.downloads._download_file",
"pyvista.read",
"numpy.zeros",
"pyvista.Plotter",
"numpy.arange",
"numpy.argwhere",
"pyvista.UniformGrid",
"numpy.repeat"
] | [((656, 713), 'pyvista.examples.downloads._download_file', 'examples.downloads._download_file', (['"""opal_mound_fault.vtk"""'], {}), "('opal_mound_fault.vtk')\n", (689, 713), False, 'from pyvista import examples\n'), ((722, 735), 'pyvista.read', 'pv.read', (['path'], {}), '(path)\n', (729, 735), True, 'import pyvista as pv\n'), ((889, 905), 'pyvista.UniformGrid', 'pv.UniformGrid', ([], {}), '()\n', (903, 905), True, 'import pyvista as pv\n'), ((1241, 1253), 'pyvista.Plotter', 'pv.Plotter', ([], {}), '()\n', (1251, 1253), True, 'import pyvista as pv\n'), ((2674, 2686), 'pyvista.Plotter', 'pv.Plotter', ([], {}), '()\n', (2684, 2686), True, 'import pyvista as pv\n'), ((3506, 3529), 'numpy.zeros', 'np.zeros', (['grid.n_points'], {}), '(grid.n_points)\n', (3514, 3529), True, 'import numpy as np\n'), ((3945, 3957), 'pyvista.Plotter', 'pv.Plotter', ([], {}), '()\n', (3955, 3957), True, 'import pyvista as pv\n'), ((1832, 1856), 'numpy.arange', 'np.arange', (['grid.n_points'], {}), '(grid.n_points)\n', (1841, 1856), True, 'import numpy as np\n'), ((1876, 1899), 'numpy.arange', 'np.arange', (['grid.n_cells'], {}), '(grid.n_cells)\n', (1885, 1899), True, 'import numpy as np\n'), ((2089, 2122), 'numpy.zeros', 'np.zeros', (['grid.n_cells'], {'dtype': 'int'}), '(grid.n_cells, dtype=int)\n', (2097, 2122), True, 'import numpy as np\n'), ((2255, 2298), 'numpy.argwhere', 'np.argwhere', (["(grid['implicit_distance'] >= 0)"], {}), "(grid['implicit_distance'] >= 0)\n", (2266, 2298), True, 'import numpy as np\n'), ((2311, 2353), 'numpy.argwhere', 'np.argwhere', (["(grid['implicit_distance'] < 0)"], {}), "(grid['implicit_distance'] < 0)\n", (2322, 2353), True, 'import numpy as np\n'), ((2379, 2413), 'numpy.zeros', 'np.zeros', (['grid.n_points'], {'dtype': 'int'}), '(grid.n_points, dtype=int)\n', (2387, 2413), True, 'import numpy as np\n'), ((3303, 3357), 'numpy.repeat', 'np.repeat', (['[masked.bounds[1::2]]', 'pts.shape[0]'], {'axis': '(0)'}), '([masked.bounds[1::2]], pts.shape[0], axis=0)\n', (3312, 3357), True, 'import numpy as np\n'), ((3027, 3065), 'numpy.argwhere', 'np.argwhere', (["(masked['point_mask'] == 1)"], {}), "(masked['point_mask'] == 1)\n", (3038, 3065), True, 'import numpy as np\n'), ((3253, 3281), 'numpy.sum', 'np.sum', (['((b - a) ** 2)'], {'axis': '(1)'}), '((b - a) ** 2, axis=1)\n', (3259, 3281), True, 'import numpy as np\n')] |
#! /usr/bin/env python
#
#
# Tool: dumps a candidate waveform to a frame file.
# Default GPS time used is boring
# Tries to be as close to C as possible -- no interface via pylal/glue
#
# EXAMPLE
# python util_NRWriteFrame.py --group 'Sequence-SXS-All' --param 1 --verbose
# python util_NRWriteFrame.py --incl 1.5 --verbose # edge on
#
# WARNING: My version does NOT interpolate the signal to a synchronized set of sample times.
# This may cause problems for some applications, particularly at low sample rates.
from __future__ import print_function
import argparse
import sys
import numpy as np
import RIFT.lalsimutils as lalsimutils
import lalsimulation as lalsim
import lalframe
import lal
import RIFT.physics.EOBTidalExternalC as eobwf
parser = argparse.ArgumentParser()
parser.add_argument("--fname", default=None, help = "Base name for output frame file. Otherwise auto-generated ")
parser.add_argument("--instrument", default="H1",help="Use H1, L1,V1")
parser.add_argument("--inj", dest='inj', default=None,help="inspiral XML file containing injection information. Used for extrinsic information only")
parser.add_argument("--mass1", default=1.50,type=float,help="Mass in solar masses") # 150 turns out to be ok for Healy et al sims
parser.add_argument("--mass2", default=1.35,type=float,help="Mass in solar masses")
parser.add_argument("--lambda1",default=590,type=float)
parser.add_argument("--lambda2", default=590,type=float)
parser.add_argument("--fmin", default=35,type=float,help="Mininmum frequency in Hz, default is 40Hz to make short enough waveforms. Focus will be iLIGO to keep comutations short")
parser.add_argument("--lmax", default=2, type=int)
parser.add_argument("--event",type=int, dest="event_id", default=None,help="event ID of injection XML to use.")
parser.add_argument("--srate",type=int,default=16384,help="Sampling rate")
parser.add_argument("--seglen", type=float,default=256*2., help="Default window size for processing.")
parser.add_argument("--incl",default=0,type=float,help="Set the inclination of the simuation. Helpful for aligned spin tests")
parser.add_argument("--start", type=int,default=None)
parser.add_argument("--stop", type=int,default=None)
parser.add_argument("--approx",type=str,default=None,help="Unused")
parser.add_argument("--single-ifo",action='store_true',default=None,help="Unused")
parser.add_argument("--verbose", action="store_true",default=False, help="Required to build post-frame-generating sanity-test plots")
parser.add_argument("--save-plots",default=False,action='store_true', help="Write plots to file (only useful for OSX, where interactive is default")
opts= parser.parse_args()
# Window size : 8 s is usually more than enough, though we will fill a 16s buffer to be sure.
T_window = 128. # default. Note in practice most NS-NS waveforms will need to be many tens of minutes long
# Generate signal
P=lalsimutils.ChooseWaveformParams()
P.m1 = opts.mass1 *lal.MSUN_SI
P.m2 = opts.mass2 *lal.MSUN_SI
P.dist = 150*1e6*lal.PC_SI
P.lambda1 = opts.lambda1
P.lambda2 = opts.lambda2
P.fmin=opts.fmin # Just for comparison! Obviously only good for iLIGO
P.ampO=-1 # include 'full physics'
P.deltaT=1./16384
P.taper = lalsim.SIM_INSPIRAL_TAPER_START
# This must be done BEFORE changing the duration
P.scale_to_snr(20,lalsim.SimNoisePSDaLIGOZeroDetHighPower,['H1', 'L1'])
if opts.start and opts.stop:
opts.seglen = opts.stop-opts.start # override
P.deltaF = 1./opts.seglen #lalsimutils.findDeltaF(P)
if P.deltaF > 1./T_window:
print(" time too short ")
if not opts.inj:
P.taper = lalsimutils.lsu_TAPER_START
P.tref = 1000000000 #1000000000 # default
P.dist = 150*1e6*lal.PC_SI # default
else:
from glue.ligolw import lsctables, table, utils # check all are needed
filename = opts.inj
event = opts.event_id
xmldoc = utils.load_filename(filename, verbose = True,contenthandler =lalsimutils.cthdler)
sim_inspiral_table = table.get_table(xmldoc, lsctables.SimInspiralTable.tableName)
P.copy_sim_inspiral(sim_inspiral_table[int(event)])
P.detector = opts.instrument
P.print_params()
# FAIL if masses are not viable
if P.m1/lal.MSUN_SI > 3 or P.m2/lal.MSUN_SI > 3:
print(" Invalid NS mass ")
sys.exit(0)
wfP = eobwf.WaveformModeCatalog(P,lmax=opts.lmax)
print(" Loaded modes ", wfP.waveform_modes_complex.keys())
print(" Duration of stored signal ", wfP.estimateDurationSec())
mtotMsun = (wfP.P.m1+wfP.P.m2)/lal.MSUN_SI
# Generate signal
hoft = wfP.real_hoft() # include translation of source, but NOT interpolation onto regular time grid
print(" Original signal (min, max) ", np.min(hoft.data.data) ,np.max(hoft.data.data))
print(" Original signal duration ", hoft.deltaT*hoft.data.length)
# zero pad to be opts.seglen long
TDlenGoal = int(opts.seglen/hoft.deltaT)
if TDlenGoal < hoft.data.length:
print(" seglen too short -- signal truncation would be required")
sys.exit(0)
nptsOrig = hoft.data.length
hoft = lal.ResizeREAL8TimeSeries(hoft, 0, TDlenGoal)
hoft.data.data[nptsOrig:TDlenGoal] = 0 # np.zeros(TDlenGoal-nptsOrig) # zero out the tail
print(" Resized signal (min, max) ", np.min(hoft.data.data) ,np.max(hoft.data.data))
# zero pad some more on either side, to make sure the segment covers start to stop
if opts.start and hoft.epoch > opts.start:
nToAddBefore = int((hoft.epoch-opts.start)/hoft.deltaT)
print("Padding start ", nToAddBefore, hoft.data.length)
ht = lal.CreateREAL8TimeSeries("Template h(t)",
hoft.epoch - nToAddBefore*hoft.deltaT, 0, hoft.deltaT, lalsimutils.lsu_DimensionlessUnit,
hoft.data.length+nToAddBefore)
ht.data.data[0:ht.data.length] = np.zeros(ht.data.length) # initialize to zero for safety
ht.data.data[nToAddBefore:nToAddBefore+hoft.data.length] = hoft.data.data
hoft = ht
if opts.stop and hoft.epoch+hoft.data.length*hoft.deltaT < opts.stop:
nToAddAtEnd = int( (-(hoft.epoch+hoft.data.length*hoft.deltaT)+opts.stop)/hoft.deltaT)
else:
nToAddAtEnd=0
if nToAddAtEnd <=0:
nToAddAtEnd = int(1/hoft.deltaT) # always at at least 1s of padding at end
print("Padding end ", nToAddAtEnd, hoft.data.length)
nptsNow = hoft.data.length
hoft = lal.ResizeREAL8TimeSeries(hoft,0, int(hoft.data.length+nToAddAtEnd))
hoft.data.data[nptsNow:hoft.data.length] = 0
print(" Padded signal (min, max) ", np.min(hoft.data.data) ,np.max(hoft.data.data))
channel = opts.instrument+":FAKE-STRAIN"
tstart = int(hoft.epoch)
duration = int(hoft.data.length*hoft.deltaT)
if not opts.fname:
fname = opts.instrument.replace("1","")+"-fake_strain-"+str(tstart)+"-"+str(duration)+".gwf"
print("Writing signal with ", hoft.data.length*hoft.deltaT, " to file ", fname)
print("Maximum original ", np.max(hoft.data.data))
print("Start time", hoft.epoch)
lalsimutils.hoft_to_frame_data(fname,channel,hoft)
bNoInteractivePlots=True # default
fig_extension = '.jpg'
try:
import matplotlib
print(" Matplotlib backend ", matplotlib.get_backend())
if matplotlib.get_backend() is 'MacOSX':
if opts.save_plots:
print(" OSX without interactive plots")
bNoInteractivePlots=True
fig_extension='.jpg'
else: # Interactive plots
print(" OSX with interactive plots")
bNoInteractivePlots=False
elif matplotlib.get_backend() is 'agg':
fig_extension = '.png'
bNoInteractivePlots=True
print(" No OSX; no interactive plots ")
else:
print(" Unknown configuration ")
fig_extension = '.png'
bNoInteractivePlots =True
from matplotlib import pyplot as plt
bNoPlots=False
except:
from matplotlib import pyplot as plt
fig_extension = '.png'
print(" - no matplotlib - ")
bNoInteractivePlots = True
bNoPlots = False
# TEST: Confirm it works by reading the frame
if opts.verbose and not bNoPlots:
import os
# from matplotlib import pyplot as plt
# First must create corresponding cache file
os.system("echo "+ fname+ " | lalapps_path2cache > test.cache")
# Now I can read it
# Beware that the results are OFFSET FROM ONE ANOTHER due to PADDING,
# but that the time associations are correct
hoft2 = lalsimutils.frame_data_to_hoft("test.cache", channel)
tvals2 = (float(hoft2.epoch) - float(wfP.P.tref)) + np.arange(hoft2.data.length)*hoft2.deltaT
tvals = (float(hoft.epoch) - float(wfP.P.tref)) + np.arange(hoft.data.length)*hoft.deltaT
ncrit = np.argmax(hoft2.data.data)
tcrit = float(hoft2.epoch) - float(wfP.P.tref) + ncrit*hoft2.deltaT # zero time
print(" Maximum original ", np.max(hoft.data.data), " size ", len(tvals), len(hoft.data.data))
print(" Maximum frames ", np.max(hoft2.data.data), " size ", len(tvals2), len(hoft2.data.data))
print(" Location of maximum in samples. relative time ", ncrit, tcrit)
print(" Location of maximum in samples, compared to tref", tcrit+P.tref, end=' ')
print(" Location of maximum as GPS time ", ncrit*hoft2.deltaT+ float(hof2.epoch))
plt.plot(tvals2,hoft2.data.data,label='Fr')
plt.xlim(tcrit-1,tcrit+1)
plt.plot(tvals,hoft.data.data,label='orig')
plt.legend();
if not bNoInteractivePlots:
plt.show()
else:
for indx in [1]:
print("Writing figure ", indx)
plt.xlim(tcrit-0.1,tcrit+0.01)
plt.figure(indx); plt.savefig("eob-framedump-" +str(indx)+fig_extension)
# plt.xlim(min(tvals2),max(tvals2)) # full range with pad
# plt.figure(indx); plt.savefig("eob-framedump-full-" +str(indx)+fig_extension)
| [
"argparse.ArgumentParser",
"numpy.argmax",
"matplotlib.pyplot.figure",
"glue.ligolw.table.get_table",
"numpy.arange",
"matplotlib.get_backend",
"RIFT.lalsimutils.hoft_to_frame_data",
"RIFT.lalsimutils.frame_data_to_hoft",
"numpy.max",
"matplotlib.pyplot.show",
"RIFT.lalsimutils.ChooseWaveformPar... | [((787, 812), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (810, 812), False, 'import argparse\n'), ((2921, 2955), 'RIFT.lalsimutils.ChooseWaveformParams', 'lalsimutils.ChooseWaveformParams', ([], {}), '()\n', (2953, 2955), True, 'import RIFT.lalsimutils as lalsimutils\n'), ((4288, 4332), 'RIFT.physics.EOBTidalExternalC.WaveformModeCatalog', 'eobwf.WaveformModeCatalog', (['P'], {'lmax': 'opts.lmax'}), '(P, lmax=opts.lmax)\n', (4313, 4332), True, 'import RIFT.physics.EOBTidalExternalC as eobwf\n'), ((5002, 5047), 'lal.ResizeREAL8TimeSeries', 'lal.ResizeREAL8TimeSeries', (['hoft', '(0)', 'TDlenGoal'], {}), '(hoft, 0, TDlenGoal)\n', (5027, 5047), False, 'import lal\n'), ((6820, 6872), 'RIFT.lalsimutils.hoft_to_frame_data', 'lalsimutils.hoft_to_frame_data', (['fname', 'channel', 'hoft'], {}), '(fname, channel, hoft)\n', (6850, 6872), True, 'import RIFT.lalsimutils as lalsimutils\n'), ((3880, 3959), 'glue.ligolw.utils.load_filename', 'utils.load_filename', (['filename'], {'verbose': '(True)', 'contenthandler': 'lalsimutils.cthdler'}), '(filename, verbose=True, contenthandler=lalsimutils.cthdler)\n', (3899, 3959), False, 'from glue.ligolw import lsctables, table, utils\n'), ((3987, 4048), 'glue.ligolw.table.get_table', 'table.get_table', (['xmldoc', 'lsctables.SimInspiralTable.tableName'], {}), '(xmldoc, lsctables.SimInspiralTable.tableName)\n', (4002, 4048), False, 'from glue.ligolw import lsctables, table, utils\n'), ((4269, 4280), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (4277, 4280), False, 'import sys\n'), ((4659, 4681), 'numpy.min', 'np.min', (['hoft.data.data'], {}), '(hoft.data.data)\n', (4665, 4681), True, 'import numpy as np\n'), ((4683, 4705), 'numpy.max', 'np.max', (['hoft.data.data'], {}), '(hoft.data.data)\n', (4689, 4705), True, 'import numpy as np\n'), ((4955, 4966), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (4963, 4966), False, 'import sys\n'), ((5175, 5197), 'numpy.min', 'np.min', (['hoft.data.data'], {}), '(hoft.data.data)\n', (5181, 5197), True, 'import numpy as np\n'), ((5199, 5221), 'numpy.max', 'np.max', (['hoft.data.data'], {}), '(hoft.data.data)\n', (5205, 5221), True, 'import numpy as np\n'), ((5478, 5655), 'lal.CreateREAL8TimeSeries', 'lal.CreateREAL8TimeSeries', (['"""Template h(t)"""', '(hoft.epoch - nToAddBefore * hoft.deltaT)', '(0)', 'hoft.deltaT', 'lalsimutils.lsu_DimensionlessUnit', '(hoft.data.length + nToAddBefore)'], {}), "('Template h(t)', hoft.epoch - nToAddBefore * hoft\n .deltaT, 0, hoft.deltaT, lalsimutils.lsu_DimensionlessUnit, hoft.data.\n length + nToAddBefore)\n", (5503, 5655), False, 'import lal\n'), ((5705, 5729), 'numpy.zeros', 'np.zeros', (['ht.data.length'], {}), '(ht.data.length)\n', (5713, 5729), True, 'import numpy as np\n'), ((6378, 6400), 'numpy.min', 'np.min', (['hoft.data.data'], {}), '(hoft.data.data)\n', (6384, 6400), True, 'import numpy as np\n'), ((6402, 6424), 'numpy.max', 'np.max', (['hoft.data.data'], {}), '(hoft.data.data)\n', (6408, 6424), True, 'import numpy as np\n'), ((6764, 6786), 'numpy.max', 'np.max', (['hoft.data.data'], {}), '(hoft.data.data)\n', (6770, 6786), True, 'import numpy as np\n'), ((8021, 8088), 'os.system', 'os.system', (["('echo ' + fname + ' | lalapps_path2cache > test.cache')"], {}), "('echo ' + fname + ' | lalapps_path2cache > test.cache')\n", (8030, 8088), False, 'import os\n'), ((8249, 8302), 'RIFT.lalsimutils.frame_data_to_hoft', 'lalsimutils.frame_data_to_hoft', (['"""test.cache"""', 'channel'], {}), "('test.cache', channel)\n", (8279, 8302), True, 'import RIFT.lalsimutils as lalsimutils\n'), ((8510, 8536), 'numpy.argmax', 'np.argmax', (['hoft2.data.data'], {}), '(hoft2.data.data)\n', (8519, 8536), True, 'import numpy as np\n'), ((9076, 9121), 'matplotlib.pyplot.plot', 'plt.plot', (['tvals2', 'hoft2.data.data'], {'label': '"""Fr"""'}), "(tvals2, hoft2.data.data, label='Fr')\n", (9084, 9121), True, 'from matplotlib import pyplot as plt\n'), ((9124, 9154), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(tcrit - 1)', '(tcrit + 1)'], {}), '(tcrit - 1, tcrit + 1)\n', (9132, 9154), True, 'from matplotlib import pyplot as plt\n'), ((9154, 9199), 'matplotlib.pyplot.plot', 'plt.plot', (['tvals', 'hoft.data.data'], {'label': '"""orig"""'}), "(tvals, hoft.data.data, label='orig')\n", (9162, 9199), True, 'from matplotlib import pyplot as plt\n'), ((9202, 9214), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (9212, 9214), True, 'from matplotlib import pyplot as plt\n'), ((6991, 7015), 'matplotlib.get_backend', 'matplotlib.get_backend', ([], {}), '()\n', (7013, 7015), False, 'import matplotlib\n'), ((7024, 7048), 'matplotlib.get_backend', 'matplotlib.get_backend', ([], {}), '()\n', (7046, 7048), False, 'import matplotlib\n'), ((8657, 8679), 'numpy.max', 'np.max', (['hoft.data.data'], {}), '(hoft.data.data)\n', (8663, 8679), True, 'import numpy as np\n'), ((8754, 8777), 'numpy.max', 'np.max', (['hoft2.data.data'], {}), '(hoft2.data.data)\n', (8760, 8777), True, 'import numpy as np\n'), ((9258, 9268), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9266, 9268), True, 'from matplotlib import pyplot as plt\n'), ((7346, 7370), 'matplotlib.get_backend', 'matplotlib.get_backend', ([], {}), '()\n', (7368, 7370), False, 'import matplotlib\n'), ((8360, 8388), 'numpy.arange', 'np.arange', (['hoft2.data.length'], {}), '(hoft2.data.length)\n', (8369, 8388), True, 'import numpy as np\n'), ((8457, 8484), 'numpy.arange', 'np.arange', (['hoft.data.length'], {}), '(hoft.data.length)\n', (8466, 8484), True, 'import numpy as np\n'), ((9359, 9394), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(tcrit - 0.1)', '(tcrit + 0.01)'], {}), '(tcrit - 0.1, tcrit + 0.01)\n', (9367, 9394), True, 'from matplotlib import pyplot as plt\n'), ((9402, 9418), 'matplotlib.pyplot.figure', 'plt.figure', (['indx'], {}), '(indx)\n', (9412, 9418), True, 'from matplotlib import pyplot as plt\n')] |
# Copyright 2019 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Original source from https://github.com/kubeflow/kfserving/blob/master/python/
# alibiexplainer/alibiexplainer/anchor_images.py
# and then modified.
#
import logging
from typing import List, Optional
import alibi
import numpy as np
from alibi.api.interfaces import Explanation
from alibiexplainer.constants import SELDON_LOGLEVEL
from alibiexplainer.explainer_wrapper import ExplainerWrapper
logging.basicConfig(level=SELDON_LOGLEVEL)
class AnchorImages(ExplainerWrapper):
def __init__(
self, explainer: Optional[alibi.explainers.AnchorImage], **kwargs
) -> None:
if explainer is None:
raise Exception("Anchor images requires a built explainer")
self.anchors_image = explainer
self.kwargs = kwargs
def explain(self, inputs: List) -> Explanation:
arr = np.array(inputs)
logging.info("Calling explain on image of shape %s", (arr.shape,))
logging.info("anchor image call with %s", self.kwargs)
anchor_exp = self.anchors_image.explain(arr[0], **self.kwargs)
return anchor_exp
| [
"logging.info",
"numpy.array",
"logging.basicConfig"
] | [((977, 1019), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'SELDON_LOGLEVEL'}), '(level=SELDON_LOGLEVEL)\n', (996, 1019), False, 'import logging\n'), ((1404, 1420), 'numpy.array', 'np.array', (['inputs'], {}), '(inputs)\n', (1412, 1420), True, 'import numpy as np\n'), ((1429, 1495), 'logging.info', 'logging.info', (['"""Calling explain on image of shape %s"""', '(arr.shape,)'], {}), "('Calling explain on image of shape %s', (arr.shape,))\n", (1441, 1495), False, 'import logging\n'), ((1504, 1558), 'logging.info', 'logging.info', (['"""anchor image call with %s"""', 'self.kwargs'], {}), "('anchor image call with %s', self.kwargs)\n", (1516, 1558), False, 'import logging\n')] |
import numpy as np
from preprocessing import extractFeatures
def predict(wav, model):
mfccs = extractFeatures(wav)
if mfccs.shape[1] > 433:
mfccs = mfccs[:,:433]
elif mfccs.shape[1] < 433:
mfccs = np.concatenate((mfccs,mfccs[:,(mfccs.shape[1] - (433-mfccs.shape[1])):mfccs.shape[1]]), axis=1)
modelInput = mfccs.reshape(1, 40, 433, 1)
results = model.predict(modelInput)
predProbaList = [results[:,0][0],results[:,1][0],results[:,2][0],results[:,3][0]]
problem = np.argmax(results)
pred = False
detail = ['Component OK']
# pred1 = predProbaList[1] >= 0.7
if problem == 1:
detail = ['Component is imbalanced']
# pred2 = predProbaList[2] >= 0.7
if problem == 2:
detail = ['Component is clogged']
# pred3 = predProbaList[3] >= 0.7
if problem == 3:
detail = ['Voltage change']
if problem in [1,2,3]:
pred = True
response = {
"Anomaly":bool(pred),
"Details":{
"Message":detail[0],
"Probabilities":predProbaList
}
}
# for var in ['mfccs','model','wav','modelInput','results','predProbaList','problem','pred','detail']:
# del globals()[var]
# del globals()['var']
return response | [
"numpy.concatenate",
"preprocessing.extractFeatures",
"numpy.argmax"
] | [((100, 120), 'preprocessing.extractFeatures', 'extractFeatures', (['wav'], {}), '(wav)\n', (115, 120), False, 'from preprocessing import extractFeatures\n'), ((509, 527), 'numpy.argmax', 'np.argmax', (['results'], {}), '(results)\n', (518, 527), True, 'import numpy as np\n'), ((227, 329), 'numpy.concatenate', 'np.concatenate', (['(mfccs, mfccs[:, mfccs.shape[1] - (433 - mfccs.shape[1]):mfccs.shape[1]])'], {'axis': '(1)'}), '((mfccs, mfccs[:, mfccs.shape[1] - (433 - mfccs.shape[1]):\n mfccs.shape[1]]), axis=1)\n', (241, 329), True, 'import numpy as np\n')] |
"""
Imports we need.
Note: You may _NOT_ add any more imports than these.
"""
import argparse
import imageio
import logging
import numpy as np
from PIL import Image
def load_image(filename):
"""Loads the provided image file, and returns it as a numpy array."""
im = Image.open(filename)
return np.array(im)
def build_A(pts1, pts2):
"""
Constructs the intermediate matrix A used in the total least squares
computation of an homography mapping pts1 to pts2.
Args:
pts1: An N-by-2 dimensional array of source points. This pts1[0,0] is x1, pts1[0,1] is y1, etc...
pts2: An N-by-2 dimensional array of desitination points.
Returns:
A 2Nx9 matrix A that we'll use to solve for h
"""
if pts1.shape != pts2.shape:
raise ValueError('The source points for homography computation must have the same shape (%s vs %s)' % (
str(pts1.shape), str(pts2.shape)))
if pts1.shape[0] < 4:
raise ValueError('There must be at least 4 pairs of correspondences.')
# TODO: Create A which is 2N by 9...
A = np.array([
[pts1[0,0], pts1[0,1], 1, 0, 0 ,0, -pts2[0,0]*pts1[0,0], -pts2[0,0]*pts1[0,1], -pts2[0,0]],
[0, 0 ,0, pts1[0,0], pts1[0,1], 1, -pts2[0,1]*pts1[0,0], -pts2[0,1]*pts1[0,1], -pts2[0,1]],
[pts1[1,0], pts1[1,1], 1, 0, 0 ,0, -pts2[1,0]*pts1[1,0], -pts2[1,0]*pts1[1,1], -pts2[1,0]],
[0, 0 ,0, pts1[1,0], pts1[1,1], 1, -pts2[1,1]*pts1[1,0], -pts2[1,1]*pts1[1,1], -pts2[1,1]],
[pts1[2,0], pts1[2,1], 1, 0, 0 ,0, -pts2[2,0]*pts1[2,0], -pts2[2,0]*pts1[2,1], -pts2[2,0]],
[0, 0 ,0, pts1[2,0], pts1[2,1], 1, -pts2[2,1]*pts1[2,0], -pts2[2,1]*pts1[2,1], -pts2[2,1]],
[pts1[3,0], pts1[3,1], 1, 0, 0 ,0, -pts2[3,0]*pts1[3,0], -pts2[3,0]*pts1[3,1], -pts2[3,0]],
[0, 0 ,0, pts1[3,0], pts1[3,1], 1, -pts2[3,1]*pts1[3,0], -pts2[3,1]*pts1[3,1], -pts2[3,1]],
])
# TODO: iterate over the points and populate the rows of A.
return A
def compute_H(pts1, pts2):
"""
Computes an homography mapping one set of co-planar points (pts1)
to another (pts2).
Args:
pts1: An N-by-2 dimensional array of source points. This pts1[0,0] is x1, pts1[0,1] is y1, etc...
pts2: An N-by-2 dimensional array of desitination points.
Returns:
A 3x3 homography matrix that maps homogeneous coordinates of pts 1 to those in pts2.
"""
# TODO: Construct the intermediate A matrix using build_A
A = build_A(pts1, pts2)
# TODO: Compute the symmetric matrix AtA.
AtA = A.T @ A
# TODO: Compute the eigenvalues and eigenvectors of AtA.
eig_vals, eig_vecs = np.linalg.eig(AtA)
# TODO: Determine which eigenvalue is the smallest
min_eig_val_index = eig_vals.argmin()
# TODO: Return the eigenvector corresponding to the smallest eigenvalue, reshaped
# as a 3x3 matrix.
min_eig_vec = eig_vecs[:,min_eig_val_index].reshape(3,3)
return min_eig_vec
def bilinear_interp(image, point):
"""
Looks up the pixel values in an image at a given point using bilinear
interpolation. point is in the format (x, y).
Args:
image: The image to sample
point: A tuple of floating point (x, y) values.
Returns:
A 3-dimensional numpy array representing the pixel value interpolated by "point".
"""
# TODO: extract x and y from point
y, x = point
# TODO: Compute i,j as the integer parts of x, y
i = np.int(x)
j = np.int(y)
# TODO: check that i + 1 and j + 1 are within range of the image. if not, just return the pixel at i, j
# print(image.shape)
# print(x,i, y, j)
if i + 1 >= image.shape[0] or j + 1 >= image.shape[1]:
return image[i, j]
# TODO: Compute a and b as the floating point parts of x, y
a = x - i
b = y - j
# TODO: Take a linear combination of the four points weighted according to the inverse area around them
# (i.e., the formula for bilinear interpolation)
return (1-a)*(1-b)*image[i,j] +a*(1-b)*image[i+1, j] +a*b*image[i+1, j+1] + (1-a)*b*image[i,j+1]
def apply_homography(H, points):
"""
Applies the homography matrix H to the provided cartesian points and returns the results
as cartesian coordinates.
Args:
H: A 3x3 floating point homography matrix.
points: An Nx2 matrix of x,y points to apply the homography to.
Returns:
An Nx2 matrix of points that are the result of applying H to points.
"""
# TODO: First, transform the points to homogenous coordinates by adding a `1`
# homogenous_points
h_points = np.append(points, np.ones((len(points),1)), axis=1)
# TODO: Apply the homography
hg = H @ h_points.T
# TODO: Convert the result back to cartesian coordinates and return the results
return (hg[:2] / hg[2]).T
def warp_homography(source, target_shape, Hinv):
"""
Warp the source image into the target coordinate frame using a provided
inverse homography transformation.
Args:
source: A 3-channel image represented as a numpy array.
target_shape: A 3-tuple indicating the desired results height, width, and channels, respectively
Hinv: A homography that maps locations in the result to locations in the source image.
Returns:
An image of target_shape with source's type containing the source image warped by the homography.
"""
# TODO: allocation a numpy array of zeros that is size target_shape and the same type as source.
result = np.zeros(target_shape, dtype=source.dtype)
# TODO: Iterate over all pixels in the target image
for x in range(len(result)):
for y in range(len(result[0])):
# TODO: apply the homography to the x,y location
p = apply_homography(Hinv, np.array([[x, y]]))[0]
# TODO: check if the homography result is outside the source image. If so, move on to next pixel.
# TODO: Otherwise, set the pixel at this location to the bilinear interpolation result.
if p[0] >= 0 and p[0] <= source.shape[0] and p[1] >= 0 and p[1] <= source.shape[1]:
result[x, y] = bilinear_interp(source, (p[1], p[0])) # source[np.int(p[0]), np.int(p[1])]
# return the output image
return result
def rectify_image(image, source_points, target_points, crop):
"""
Warps the input image source_points to the plane defined by target_points.
Args:
image: The input image to warp.
source_points: The coordinates in the input image to warp from.
target_points: The coordinates to warp the corresponding source points to.
crop: If False, all pixels from the input image are shown. If true, the image is cropped to
not show any black pixels.
Returns:
A new image containing the input image rectified to target_points.
"""
# TODO: Compute the rectifying homography H that warps the source points to the
# target points.
H = compute_H(source_points[:,[1,0]], target_points[:,[1,0]])
# TODO: Apply the homography to a rectangle of the bounding box of the of the image to find the
# warped bounding box in the rectified space.
src_box = np.array([
[0, 0],
[image.shape[0], 0],
[0, image.shape[1]],
[image.shape[0], image.shape[1]]
])
tar_box = apply_homography(H, src_box)
# Find the min_x and min_y values in the warped space to keep.
if crop:
min_y = sorted(tar_box[:,1])[1]
min_x = sorted(tar_box[:,0])[1]
# TODO: pick the second smallest values of x and y in the warped bounding box
else:
# TODO: Compute the min x and min y of the warped bounding box
min_y = np.min(tar_box[:,1])
min_x = np.min(tar_box[:,0])
# TODO: Compute a translation matrix T such that min_x and min_y will go to zero
t = np.array([
[1, 0, -min_x],
[0, 1, -min_y],
[0, 0, 1]
])
# TODO: Compute the rectified bounding box by applying the translation matrix to
# the warped bounding box.
tar_box = np.append(tar_box, np.ones((len(tar_box), 1)), axis=1)
tar_box = (t @ tar_box.T)[:2].T
# TODO: Compute the inverse homography that maps the rectified bounding box to the original bounding box
Hinv = compute_H(tar_box, src_box)
# Hinv = np.linalg.inv(H)
# Determine the shape of the output image
if crop:
max_y = np.int(sorted(tar_box[:,1])[-2])
max_x = np.int(sorted(tar_box[:,0])[-2])
# TODO: Determine the side of the final output image as the second highest X and Y values of the
# rectified bounding box
else:
# TODO: Determine the side of the final output image as the maximum X and Y values of the
# rectified bounding box
max_y = np.int(np.max(tar_box[:,1]))
max_x = np.int(np.max(tar_box[:,0]))
# TODO: Finally call warp_homography to rectify the image and return the result
return warp_homography(image, (max_x, max_y, 3), Hinv)
def blend_with_mask(source, target, mask):
"""
Blends the source image with the target image according to the mask.
Pixels with value "1" are source pixels, "0" are target pixels, and
intermediate values are interpolated linearly between the two.
Args:
source: The source image.
target: The target image.
mask: The mask to use
Returns:
A new image representing the linear combination of the mask (and it's inverse)
with source and target, respectively.
"""
# TODO: First, convert the mask image to be a floating point between 0 and 1
# TODO: Next, use it to make a linear combination of the pixels
mask_float = mask
mask = mask.astype(np.float)
new_img = np.zeros_like(source)
for x in range(len(mask)):
for y in range(len(mask[0])):
# get greyscale https://en.wikipedia.org/wiki/Grayscale
gs = mask[x,y,0] * 0.2126 + mask[x,y,1] * 0.7152 + mask[x,y,2] * 0.0722
gs_float = gs / 255.0
new_img[x, y] = (1-gs_float) * target[x, y] + gs_float * source[x, y]
# TODO: Convert the result to be the same type as source and return the result
return new_img.astype(source.dtype)
def composite_image(source, target, source_pts, target_pts, mask):
"""
Composites a masked planar region of the source image onto a
corresponding planar region of the target image via homography warping.
Args:
source: The source image to warp onto the target.
target: The target image that the source image will be warped to.
source_pts: The coordinates on the source image.
target_pts: The corresponding coordinates on the target image.
mask: A greyscale image representing the mast to use.
"""
# TODO: Compute the homography to warp points from the target to the source coordinate frame.
H = compute_H(source_pts[:,[1,0]], target_pts[:,[1,0]])
# TODO: Warp the source image to a new image (that has the same shape as target) using the homography.
Hinv = np.linalg.inv(H)
warped_img = warp_homography(source, (target.shape[0], target.shape[1], 3), Hinv)
# TODO: Blend the warped images and return them.
return blend_with_mask(warped_img, target, mask)
def rectify(args):
"""
The 'main' function for the rectify command.
"""
# Loads the source points into a 4-by-2 array
source_points = np.array(args.source).reshape(4, 2)
# load the destination points, or select some smart default ones if None
if args.dst == None:
height = np.abs(
np.max(source_points[:, 1]) - np.min(source_points[:, 1]))
width = np.abs(
np.max(source_points[:, 0]) - np.min(source_points[:, 0]))
args.dst = [0.0, height, 0.0, 0.0, width, 0.0, width, height]
target_points = np.array(args.dst).reshape(4, 2)
# load the input image
logging.info('Loading input image %s' % (args.input))
inputImage = load_image(args.input)
# Compute the rectified image
result = rectify_image(inputImage, source_points, target_points, args.crop)
# save the result
logging.info('Saving result to %s' % (args.output))
imageio.imwrite(args.output, result)
def composite(args):
"""
The 'main' function for the composite command.
"""
# load the input image
logging.info('Loading input image %s' % (args.input))
inputImage = load_image(args.input)
# load the target image
logging.info('Loading target image %s' % (args.target))
targetImage = load_image(args.target)
# load the mask image
logging.info('Loading mask image %s' % (args.mask))
maskImage = load_image(args.mask)
# If None, set the source points or sets them to the whole input image
if args.source == None:
(height, width, _) = inputImage.shape
args.source = [0.0, height, 0.0, 0.0, width, 0.0, width, height]
# Loads the source points into a 4-by-2 array
source_points = np.array(args.source).reshape(4, 2)
# Loads the target points into a 4-by-2 array
target_points = np.array(args.dst).reshape(4, 2)
# Compute the composite image
result = composite_image(inputImage, targetImage,
source_points, target_points, maskImage)
# save the result
logging.info('Saving result to %s' % (args.output))
imageio.imwrite(args.output, result)
"""
The main function
"""
if __name__ == '__main__':
logging.basicConfig(
format='%(levelname)s: %(message)s', level=logging.INFO)
logging.basicConfig(
format='%(levelname)s: %(message)s', level=logging.INFO)
parser = argparse.ArgumentParser(
description='Warps an image by the computed homography between two rectangles.')
subparsers = parser.add_subparsers(help='sub-command help')
parser_rectify = subparsers.add_parser(
'rectify', help='Rectifies an image such that the input rectangle is front-parallel.')
parser_rectify.add_argument('input', type=str, help='The image to warp.',
default="example_rectify_input.jpg")
parser_rectify.add_argument('source', metavar='f', type=float, nargs=8,
help='A floating point value part of x1 y1 ... x4 y4',
default=[3, 481, 80, 0, 602, 215, 637, 475])
parser_rectify.add_argument(
'--crop', help='If true, the result image is cropped.', action='store_true', default=False)
parser_rectify.add_argument('--dst', metavar='x', type=float, nargs='+',
default=None, help='The four destination points in the output image.')
parser_rectify.add_argument(
'output', type=str, help='Where to save the result.',
default="myexample_rectify_output.jpg")
parser_rectify.set_defaults(func=rectify)
parser_composite = subparsers.add_parser(
'composite', help='Warps the input image onto the target points of the target image.')
parser_composite.add_argument(
'input', type=str, help='The source image to warp.')
parser_composite.add_argument(
'target', type=str, help='The target image to warp to.')
parser_composite.add_argument('dst', metavar='f', type=float, nargs=8,
help='A floating point value part of x1 y1 ... x4 y4 defining the box on the target image.')
parser_composite.add_argument(
'mask', type=str, help='A mask image the same size as the target image.')
parser_composite.add_argument('--source', metavar='x', type=float, nargs='+',
default=None, help='The four source points in the input image. If ommited, the whole image is used.')
parser_composite.add_argument(
'output', type=str, help='Where to save the result.')
parser_composite.set_defaults(func=composite)
args = parser.parse_args()
args.func(args)
| [
"numpy.zeros_like",
"argparse.ArgumentParser",
"logging.basicConfig",
"numpy.zeros",
"numpy.linalg.eig",
"PIL.Image.open",
"logging.info",
"numpy.min",
"numpy.max",
"numpy.int",
"numpy.array",
"numpy.linalg.inv",
"imageio.imwrite"
] | [((277, 297), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (287, 297), False, 'from PIL import Image\n'), ((309, 321), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (317, 321), True, 'import numpy as np\n'), ((1096, 1979), 'numpy.array', 'np.array', (['[[pts1[0, 0], pts1[0, 1], 1, 0, 0, 0, -pts2[0, 0] * pts1[0, 0], -pts2[0, 0] *\n pts1[0, 1], -pts2[0, 0]], [0, 0, 0, pts1[0, 0], pts1[0, 1], 1, -pts2[0,\n 1] * pts1[0, 0], -pts2[0, 1] * pts1[0, 1], -pts2[0, 1]], [pts1[1, 0],\n pts1[1, 1], 1, 0, 0, 0, -pts2[1, 0] * pts1[1, 0], -pts2[1, 0] * pts1[1,\n 1], -pts2[1, 0]], [0, 0, 0, pts1[1, 0], pts1[1, 1], 1, -pts2[1, 1] *\n pts1[1, 0], -pts2[1, 1] * pts1[1, 1], -pts2[1, 1]], [pts1[2, 0], pts1[2,\n 1], 1, 0, 0, 0, -pts2[2, 0] * pts1[2, 0], -pts2[2, 0] * pts1[2, 1], -\n pts2[2, 0]], [0, 0, 0, pts1[2, 0], pts1[2, 1], 1, -pts2[2, 1] * pts1[2,\n 0], -pts2[2, 1] * pts1[2, 1], -pts2[2, 1]], [pts1[3, 0], pts1[3, 1], 1,\n 0, 0, 0, -pts2[3, 0] * pts1[3, 0], -pts2[3, 0] * pts1[3, 1], -pts2[3, 0\n ]], [0, 0, 0, pts1[3, 0], pts1[3, 1], 1, -pts2[3, 1] * pts1[3, 0], -\n pts2[3, 1] * pts1[3, 1], -pts2[3, 1]]]'], {}), '([[pts1[0, 0], pts1[0, 1], 1, 0, 0, 0, -pts2[0, 0] * pts1[0, 0], -\n pts2[0, 0] * pts1[0, 1], -pts2[0, 0]], [0, 0, 0, pts1[0, 0], pts1[0, 1],\n 1, -pts2[0, 1] * pts1[0, 0], -pts2[0, 1] * pts1[0, 1], -pts2[0, 1]], [\n pts1[1, 0], pts1[1, 1], 1, 0, 0, 0, -pts2[1, 0] * pts1[1, 0], -pts2[1, \n 0] * pts1[1, 1], -pts2[1, 0]], [0, 0, 0, pts1[1, 0], pts1[1, 1], 1, -\n pts2[1, 1] * pts1[1, 0], -pts2[1, 1] * pts1[1, 1], -pts2[1, 1]], [pts1[\n 2, 0], pts1[2, 1], 1, 0, 0, 0, -pts2[2, 0] * pts1[2, 0], -pts2[2, 0] *\n pts1[2, 1], -pts2[2, 0]], [0, 0, 0, pts1[2, 0], pts1[2, 1], 1, -pts2[2,\n 1] * pts1[2, 0], -pts2[2, 1] * pts1[2, 1], -pts2[2, 1]], [pts1[3, 0],\n pts1[3, 1], 1, 0, 0, 0, -pts2[3, 0] * pts1[3, 0], -pts2[3, 0] * pts1[3,\n 1], -pts2[3, 0]], [0, 0, 0, pts1[3, 0], pts1[3, 1], 1, -pts2[3, 1] *\n pts1[3, 0], -pts2[3, 1] * pts1[3, 1], -pts2[3, 1]]])\n', (1104, 1979), True, 'import numpy as np\n'), ((2667, 2685), 'numpy.linalg.eig', 'np.linalg.eig', (['AtA'], {}), '(AtA)\n', (2680, 2685), True, 'import numpy as np\n'), ((3494, 3503), 'numpy.int', 'np.int', (['x'], {}), '(x)\n', (3500, 3503), True, 'import numpy as np\n'), ((3512, 3521), 'numpy.int', 'np.int', (['y'], {}), '(y)\n', (3518, 3521), True, 'import numpy as np\n'), ((5584, 5626), 'numpy.zeros', 'np.zeros', (['target_shape'], {'dtype': 'source.dtype'}), '(target_shape, dtype=source.dtype)\n', (5592, 5626), True, 'import numpy as np\n'), ((7315, 7413), 'numpy.array', 'np.array', (['[[0, 0], [image.shape[0], 0], [0, image.shape[1]], [image.shape[0], image.\n shape[1]]]'], {}), '([[0, 0], [image.shape[0], 0], [0, image.shape[1]], [image.shape[0],\n image.shape[1]]])\n', (7323, 7413), True, 'import numpy as np\n'), ((7987, 8040), 'numpy.array', 'np.array', (['[[1, 0, -min_x], [0, 1, -min_y], [0, 0, 1]]'], {}), '([[1, 0, -min_x], [0, 1, -min_y], [0, 0, 1]])\n', (7995, 8040), True, 'import numpy as np\n'), ((9907, 9928), 'numpy.zeros_like', 'np.zeros_like', (['source'], {}), '(source)\n', (9920, 9928), True, 'import numpy as np\n'), ((11244, 11260), 'numpy.linalg.inv', 'np.linalg.inv', (['H'], {}), '(H)\n', (11257, 11260), True, 'import numpy as np\n'), ((12097, 12148), 'logging.info', 'logging.info', (["('Loading input image %s' % args.input)"], {}), "('Loading input image %s' % args.input)\n", (12109, 12148), False, 'import logging\n'), ((12333, 12382), 'logging.info', 'logging.info', (["('Saving result to %s' % args.output)"], {}), "('Saving result to %s' % args.output)\n", (12345, 12382), False, 'import logging\n'), ((12389, 12425), 'imageio.imwrite', 'imageio.imwrite', (['args.output', 'result'], {}), '(args.output, result)\n', (12404, 12425), False, 'import imageio\n'), ((12548, 12599), 'logging.info', 'logging.info', (["('Loading input image %s' % args.input)"], {}), "('Loading input image %s' % args.input)\n", (12560, 12599), False, 'import logging\n'), ((12675, 12728), 'logging.info', 'logging.info', (["('Loading target image %s' % args.target)"], {}), "('Loading target image %s' % args.target)\n", (12687, 12728), False, 'import logging\n'), ((12804, 12853), 'logging.info', 'logging.info', (["('Loading mask image %s' % args.mask)"], {}), "('Loading mask image %s' % args.mask)\n", (12816, 12853), False, 'import logging\n'), ((13514, 13563), 'logging.info', 'logging.info', (["('Saving result to %s' % args.output)"], {}), "('Saving result to %s' % args.output)\n", (13526, 13563), False, 'import logging\n'), ((13570, 13606), 'imageio.imwrite', 'imageio.imwrite', (['args.output', 'result'], {}), '(args.output, result)\n', (13585, 13606), False, 'import imageio\n'), ((13666, 13742), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(levelname)s: %(message)s"""', 'level': 'logging.INFO'}), "(format='%(levelname)s: %(message)s', level=logging.INFO)\n", (13685, 13742), False, 'import logging\n'), ((13756, 13832), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(levelname)s: %(message)s"""', 'level': 'logging.INFO'}), "(format='%(levelname)s: %(message)s', level=logging.INFO)\n", (13775, 13832), False, 'import logging\n'), ((13855, 13964), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Warps an image by the computed homography between two rectangles."""'}), "(description=\n 'Warps an image by the computed homography between two rectangles.')\n", (13878, 13964), False, 'import argparse\n'), ((7836, 7857), 'numpy.min', 'np.min', (['tar_box[:, 1]'], {}), '(tar_box[:, 1])\n', (7842, 7857), True, 'import numpy as np\n'), ((7873, 7894), 'numpy.min', 'np.min', (['tar_box[:, 0]'], {}), '(tar_box[:, 0])\n', (7879, 7894), True, 'import numpy as np\n'), ((8932, 8953), 'numpy.max', 'np.max', (['tar_box[:, 1]'], {}), '(tar_box[:, 1])\n', (8938, 8953), True, 'import numpy as np\n'), ((8977, 8998), 'numpy.max', 'np.max', (['tar_box[:, 0]'], {}), '(tar_box[:, 0])\n', (8983, 8998), True, 'import numpy as np\n'), ((11611, 11632), 'numpy.array', 'np.array', (['args.source'], {}), '(args.source)\n', (11619, 11632), True, 'import numpy as np\n'), ((12032, 12050), 'numpy.array', 'np.array', (['args.dst'], {}), '(args.dst)\n', (12040, 12050), True, 'import numpy as np\n'), ((13188, 13209), 'numpy.array', 'np.array', (['args.source'], {}), '(args.source)\n', (13196, 13209), True, 'import numpy as np\n'), ((13295, 13313), 'numpy.array', 'np.array', (['args.dst'], {}), '(args.dst)\n', (13303, 13313), True, 'import numpy as np\n'), ((11787, 11814), 'numpy.max', 'np.max', (['source_points[:, 1]'], {}), '(source_points[:, 1])\n', (11793, 11814), True, 'import numpy as np\n'), ((11817, 11844), 'numpy.min', 'np.min', (['source_points[:, 1]'], {}), '(source_points[:, 1])\n', (11823, 11844), True, 'import numpy as np\n'), ((11882, 11909), 'numpy.max', 'np.max', (['source_points[:, 0]'], {}), '(source_points[:, 0])\n', (11888, 11909), True, 'import numpy as np\n'), ((11912, 11939), 'numpy.min', 'np.min', (['source_points[:, 0]'], {}), '(source_points[:, 0])\n', (11918, 11939), True, 'import numpy as np\n'), ((5857, 5875), 'numpy.array', 'np.array', (['[[x, y]]'], {}), '([[x, y]])\n', (5865, 5875), True, 'import numpy as np\n')] |
"""
Experiment to compute the fraction of unique filtration values for cells in an Alpha complex
"""
import os
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from dmt.complexes import AlphaComplex
RESULT_PATH = os.path.join(os.path.dirname(__file__), "reducible.csv")
PLOT_PATH = os.path.join(os.path.dirname(__file__), "reducible_fractions.pdf")
def get_points(point_count, dim):
return np.random.random((point_count, dim)) # Only know asymptotics for uniform dist
def asymptotic_obtuseness_probability():
""" Probability of a Delaunay triangle to be obtuse for uniformly distributed points
https://math.stackexchange.com/a/2839303
"""
return 0.5
def asymptotic_triangle_fraction():
"""
Each triangle has three edges, each inner edge two triangles
Boundary edges are rare, compared to inner edges.
Therefore, the asymptotic ratio e:t is 3:2.
Euler characteristic tells us that v+e-t=1, therefore, asymptotically v=0.5t
"""
return 1./3
def asymptotic_reducible_cell_fraction():
# Reductions depend only on the triangles, each reduction reduces two cells
return 2 * asymptotic_triangle_fraction() * asymptotic_obtuseness_probability()
def create_complexes(point_counts, dim, repeat):
cplx_dfs = []
results = []
for r in range(repeat):
points = get_points(max(point_counts), dim)
for point_count in point_counts:
cplx = AlphaComplex(points[:point_count])
cplx_df = pd.DataFrame(data=dict(filtration=cplx.filtration, dim=cplx.cell_dimensions))
cplx_df = cplx_df.join(cplx_df.filtration.value_counts(), on="filtration", rsuffix="_count")
cplx_df["point_count"] = point_count
cplx_df["repeat"] = r
cplx_dfs.append(cplx_df)
results.append({"point_count": point_count,
"repeat": r,
"complex_size": len(cplx_df),
"0-cells": sum(cplx_df.dim == 0),
"1-cells": sum(cplx_df.dim == 1),
"2-cells": sum(cplx_df.dim == 2),
"reducible_cells": sum((cplx_df.dim > 0) & (cplx_df.filtration_count > 1)),
"reducible_2-cells": sum((cplx_df.dim == 2) & (cplx_df.filtration_count > 1))})
results = pd.DataFrame(results)
results["reducible_fraction"] = results["reducible_cells"] / results["complex_size"]
results["reducible_2-cell_fraction"] = results["reducible_2-cells"] / results["2-cells"]
results["2-cell_fraction"] = results["2-cells"] / results["complex_size"]
complex_df = pd.concat(cplx_dfs)
return results, complex_df
def plot_results(results):
plot_df = pd.melt(results, id_vars=["point_count"],
value_vars=["2-cell_fraction", "reducible_fraction", "reducible_2-cell_fraction"],
value_name="Fraction",
var_name="Type")
plot_df["Asymptotic"] = False
point_counts = sorted(set(plot_df["point_count"]))
plot_df = pd.concat([plot_df, pd.DataFrame({"point_count": point_counts,
"Type": ["2-cell_fraction"] * len(point_counts),
"Fraction": [asymptotic_triangle_fraction()] * len(point_counts),
"Asymptotic": [True] * len(point_counts)})])
plot_df = pd.concat([plot_df, pd.DataFrame({"point_count": point_counts,
"Type": ["reducible_2-cell_fraction"] * len(point_counts),
"Fraction": [asymptotic_obtuseness_probability()] * len(point_counts),
"Asymptotic": [True] * len(point_counts)})])
plot_df = pd.concat([plot_df, pd.DataFrame({"point_count": point_counts,
"Type": ["reducible_fraction"] * len(point_counts),
"Fraction": [asymptotic_reducible_cell_fraction()] * len(point_counts),
"Asymptotic": [True] * len(point_counts)})])
ax = sns.lineplot(x="point_count", y="Fraction", hue="Type", style="Asymptotic", data=plot_df)
ax.set_ylim(-0.05, 1)
ax.set_xlabel("Number of points")
def main():
precomputed = True
dim = 2
point_counts = [10, 20, 50, 100, 200, 500, 1000, 2000]
repeat = 50
if precomputed:
results = pd.read_csv(RESULT_PATH)
else:
results, complex_df = create_complexes(point_counts, dim, repeat)
results.to_csv(RESULT_PATH)
plot_results(results)
plt.savefig(PLOT_PATH, bbox_inches="tight")
if __name__ == '__main__':
main()
| [
"pandas.DataFrame",
"seaborn.lineplot",
"dmt.complexes.AlphaComplex",
"pandas.read_csv",
"os.path.dirname",
"numpy.random.random",
"pandas.melt",
"pandas.concat",
"matplotlib.pyplot.savefig"
] | [((274, 299), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (289, 299), False, 'import os\n'), ((343, 368), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (358, 368), False, 'import os\n'), ((444, 480), 'numpy.random.random', 'np.random.random', (['(point_count, dim)'], {}), '((point_count, dim))\n', (460, 480), True, 'import numpy as np\n'), ((2402, 2423), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {}), '(results)\n', (2414, 2423), True, 'import pandas as pd\n'), ((2701, 2720), 'pandas.concat', 'pd.concat', (['cplx_dfs'], {}), '(cplx_dfs)\n', (2710, 2720), True, 'import pandas as pd\n'), ((2795, 2968), 'pandas.melt', 'pd.melt', (['results'], {'id_vars': "['point_count']", 'value_vars': "['2-cell_fraction', 'reducible_fraction', 'reducible_2-cell_fraction']", 'value_name': '"""Fraction"""', 'var_name': '"""Type"""'}), "(results, id_vars=['point_count'], value_vars=['2-cell_fraction',\n 'reducible_fraction', 'reducible_2-cell_fraction'], value_name=\n 'Fraction', var_name='Type')\n", (2802, 2968), True, 'import pandas as pd\n'), ((4291, 4384), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': '"""point_count"""', 'y': '"""Fraction"""', 'hue': '"""Type"""', 'style': '"""Asymptotic"""', 'data': 'plot_df'}), "(x='point_count', y='Fraction', hue='Type', style='Asymptotic',\n data=plot_df)\n", (4303, 4384), True, 'import seaborn as sns\n'), ((4782, 4825), 'matplotlib.pyplot.savefig', 'plt.savefig', (['PLOT_PATH'], {'bbox_inches': '"""tight"""'}), "(PLOT_PATH, bbox_inches='tight')\n", (4793, 4825), True, 'import matplotlib.pyplot as plt\n'), ((4607, 4631), 'pandas.read_csv', 'pd.read_csv', (['RESULT_PATH'], {}), '(RESULT_PATH)\n', (4618, 4631), True, 'import pandas as pd\n'), ((1475, 1509), 'dmt.complexes.AlphaComplex', 'AlphaComplex', (['points[:point_count]'], {}), '(points[:point_count])\n', (1487, 1509), False, 'from dmt.complexes import AlphaComplex\n')] |
from FT.weighted_tracts import load_ft, nodes_labels_mega, nodes_by_index_mega
import matplotlib.pyplot as plt
from FT.all_subj import all_subj_names
from dipy.tracking import utils
import numpy as np
from dipy.tracking.streamline import values_from_volume
import nibabel as nib
import os
index_to_text_file = r'C:\Users\Admin\my_scripts\aal\megaatlas\megaatlas2nii.txt'
subj = all_subj_names
weight_by='pasiS'
for s in subj:
folder_name = r'C:\Users\Admin\my_scripts\Ax3D_Pack\V5' + s
tract_path = folder_name+ r'\streamlines' + s + '_wholebrain.trk'
streamlines = load_ft(tract_path)
for file in os.listdir(folder_name):
if file.endswith(".bvec"):
bvec_file = os.path.join(folder_name, file)
nii_file = bvec_file[:-4:]+'nii'
lab_labels_index, affine = nodes_by_index_mega(folder_name)
labels_headers, idx = nodes_labels_mega(index_to_text_file)
m, grouping = utils.connectivity_matrix(streamlines, lab_labels_index, affine=affine,
return_mapping=True,
mapping_as_streamlines=True)
mm = m[1:]
mm = mm[:,1:]
mm = mm[idx]
mm = mm[:, idx]
weight_by_file = nii_file[:-4:] + '_' + weight_by + '.nii'
weight_by_img = nib.load(weight_by_file)
weight_by_data = weight_by_img.get_data()
affine = weight_by_img.affine
m_weighted = np.zeros((len(idx),len(idx)), dtype='float64')
for pair, tracts in grouping.items():
if pair[0] == 0 or pair[1] == 0:
continue
else:
mean_vol_per_tract = []
vol_per_tract = values_from_volume(weight_by_data, tracts, affine=affine)
for s in vol_per_tract:
mean_vol_per_tract.append(np.mean(s))
mean_path_vol = np.nanmean(mean_vol_per_tract)
m_weighted[pair[0]-1, pair[1]-1] = mean_path_vol
m_weighted[pair[1]-1, pair[0]-1] = mean_path_vol
mm_weighted = m_weighted[idx]
mm_weighted = mm_weighted[:, idx]
np.save(folder_name + r'\non-weighted_non-norm', mm)
np.save(folder_name + r'\weighted_non-norm', mm_weighted)
nw = np.reshape(mm,(23409,))
w = np.reshape(mm_weighted, (23409,))
plt.figure(figsize=[12,6])
ax0 = plt.subplot(1,2,1)
ax0.set_title('# tracts')
ax0.hist(nw[nw>0], bins=30)
ax1 = plt.subplot(1,2,2)
ax1.set_title('Average Pasi')
ax1.hist(w[w>0], bins=30)
plt.show()
| [
"matplotlib.pyplot.subplot",
"FT.weighted_tracts.nodes_labels_mega",
"numpy.save",
"matplotlib.pyplot.show",
"nibabel.load",
"dipy.tracking.utils.connectivity_matrix",
"dipy.tracking.streamline.values_from_volume",
"matplotlib.pyplot.figure",
"numpy.mean",
"FT.weighted_tracts.nodes_by_index_mega",... | [((580, 599), 'FT.weighted_tracts.load_ft', 'load_ft', (['tract_path'], {}), '(tract_path)\n', (587, 599), False, 'from FT.weighted_tracts import load_ft, nodes_labels_mega, nodes_by_index_mega\n'), ((616, 639), 'os.listdir', 'os.listdir', (['folder_name'], {}), '(folder_name)\n', (626, 639), False, 'import os\n'), ((801, 833), 'FT.weighted_tracts.nodes_by_index_mega', 'nodes_by_index_mega', (['folder_name'], {}), '(folder_name)\n', (820, 833), False, 'from FT.weighted_tracts import load_ft, nodes_labels_mega, nodes_by_index_mega\n'), ((860, 897), 'FT.weighted_tracts.nodes_labels_mega', 'nodes_labels_mega', (['index_to_text_file'], {}), '(index_to_text_file)\n', (877, 897), False, 'from FT.weighted_tracts import load_ft, nodes_labels_mega, nodes_by_index_mega\n'), ((916, 1041), 'dipy.tracking.utils.connectivity_matrix', 'utils.connectivity_matrix', (['streamlines', 'lab_labels_index'], {'affine': 'affine', 'return_mapping': '(True)', 'mapping_as_streamlines': '(True)'}), '(streamlines, lab_labels_index, affine=affine,\n return_mapping=True, mapping_as_streamlines=True)\n', (941, 1041), False, 'from dipy.tracking import utils\n'), ((1280, 1304), 'nibabel.load', 'nib.load', (['weight_by_file'], {}), '(weight_by_file)\n', (1288, 1304), True, 'import nibabel as nib\n'), ((2038, 2090), 'numpy.save', 'np.save', (["(folder_name + '\\\\non-weighted_non-norm')", 'mm'], {}), "(folder_name + '\\\\non-weighted_non-norm', mm)\n", (2045, 2090), True, 'import numpy as np\n'), ((2095, 2152), 'numpy.save', 'np.save', (["(folder_name + '\\\\weighted_non-norm')", 'mm_weighted'], {}), "(folder_name + '\\\\weighted_non-norm', mm_weighted)\n", (2102, 2152), True, 'import numpy as np\n'), ((2162, 2186), 'numpy.reshape', 'np.reshape', (['mm', '(23409,)'], {}), '(mm, (23409,))\n', (2172, 2186), True, 'import numpy as np\n'), ((2194, 2227), 'numpy.reshape', 'np.reshape', (['mm_weighted', '(23409,)'], {}), '(mm_weighted, (23409,))\n', (2204, 2227), True, 'import numpy as np\n'), ((2233, 2260), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[12, 6]'}), '(figsize=[12, 6])\n', (2243, 2260), True, 'import matplotlib.pyplot as plt\n'), ((2271, 2291), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (2282, 2291), True, 'import matplotlib.pyplot as plt\n'), ((2364, 2384), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (2375, 2384), True, 'import matplotlib.pyplot as plt\n'), ((2452, 2462), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2460, 2462), True, 'import matplotlib.pyplot as plt\n'), ((700, 731), 'os.path.join', 'os.path.join', (['folder_name', 'file'], {}), '(folder_name, file)\n', (712, 731), False, 'import os\n'), ((1631, 1688), 'dipy.tracking.streamline.values_from_volume', 'values_from_volume', (['weight_by_data', 'tracts'], {'affine': 'affine'}), '(weight_by_data, tracts, affine=affine)\n', (1649, 1688), False, 'from dipy.tracking.streamline import values_from_volume\n'), ((1807, 1837), 'numpy.nanmean', 'np.nanmean', (['mean_vol_per_tract'], {}), '(mean_vol_per_tract)\n', (1817, 1837), True, 'import numpy as np\n'), ((1767, 1777), 'numpy.mean', 'np.mean', (['s'], {}), '(s)\n', (1774, 1777), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import matplotlib.pyplot as plt
import numpy as N
def main(dt=1e-1, R=4, T=1):
"""Example linear stochastic differential equation:
λX(t) dt + μX(t)dW(t)
"""
# The number of points in weiner process we need
wN = int(N.ceil(T/dt))
# First get the random numbers needed to make the weiner process.
dWt = N.sqrt(dt) * N.random.randn(wN-1)
W = N.cumsum(dWt)
W = N.insert(W, 0, 0)
ld = 2
mu = 1
# Euler-Maruyama solution
# Xⱼ = Xⱼ₋₁ + Δt ⋆ (λ ⋆ Xⱼ₋₁) + μ ⋆ Xⱼ₋₁ ⋆ (Wⱼ - Wⱼ₋₁)
# Wⱼ = W[Δt⋆j⋆R]
# Δt = R⋆dt, this is done to make life easy for ourselves.
Dt = R * dt
X = 1 # Initial value X(0)
Xm = 1
vso = [X]
vsm = [Xm]
vdt = [X]
tso = [0]
Xd = 1
vs = [Xd]
for j in range(0, int(wN/R)):
part_sum = sum(dWt[(j*R):((j+1)*R)])
# EM
X = X + (Dt * (ld * X)) + (mu * X * part_sum)
vso.append(X)
tso.append(dt*R*j)
# This is with a large step already, using W(ⱼ - Wⱼ₋₁) = sqrt(Δ
# t) N(0,1)
vdt.append(vdt[-1] + (Dt * ld * X) +
(mu * X * N.sqrt(Dt) * N.random.rand()))
# Milstein's method, with partial derivative.
Xm = (Xm + (Dt * (ld * X)) + (mu * Xm * part_sum)
+ 0.5*mu**2*Xm*((part_sum**2) - Dt))
vsm.append(Xm)
# Deterministic
Xd = Xd + Dt * Xd * ld
vs.append(Xd)
plot_dict = dict()
plot_dict['t'] = tso
plot_dict['vsm'] = vsm
plot_dict['vdt'] = vdt
# This is the real closed form solution
Xtruet = N.arange(0, T, dt)
Xtrue = N.exp((ld-0.5*mu**2)*(Xtruet+mu*W))
plt.plot(Xtruet, Xtrue)
plt.plot(tso, vso, marker='1')
plt.plot(plot_dict['t'], plot_dict['vsm'], marker='2')
plt.plot(plot_dict['t'], plot_dict['vdt'], marker='*')
plt.plot(tso, vs, marker='3')
plt.show()
if __name__ == '__main__':
# N.random.seed(100) # Remove this later on
for i in range(10):
main(dt=(1/2**8), R=4, T=2)
| [
"matplotlib.pyplot.show",
"numpy.ceil",
"matplotlib.pyplot.plot",
"numpy.random.randn",
"numpy.insert",
"numpy.cumsum",
"numpy.arange",
"numpy.exp",
"numpy.random.rand",
"numpy.sqrt"
] | [((396, 409), 'numpy.cumsum', 'N.cumsum', (['dWt'], {}), '(dWt)\n', (404, 409), True, 'import numpy as N\n'), ((418, 435), 'numpy.insert', 'N.insert', (['W', '(0)', '(0)'], {}), '(W, 0, 0)\n', (426, 435), True, 'import numpy as N\n'), ((1616, 1634), 'numpy.arange', 'N.arange', (['(0)', 'T', 'dt'], {}), '(0, T, dt)\n', (1624, 1634), True, 'import numpy as N\n'), ((1647, 1694), 'numpy.exp', 'N.exp', (['((ld - 0.5 * mu ** 2) * (Xtruet + mu * W))'], {}), '((ld - 0.5 * mu ** 2) * (Xtruet + mu * W))\n', (1652, 1694), True, 'import numpy as N\n'), ((1687, 1710), 'matplotlib.pyplot.plot', 'plt.plot', (['Xtruet', 'Xtrue'], {}), '(Xtruet, Xtrue)\n', (1695, 1710), True, 'import matplotlib.pyplot as plt\n'), ((1715, 1745), 'matplotlib.pyplot.plot', 'plt.plot', (['tso', 'vso'], {'marker': '"""1"""'}), "(tso, vso, marker='1')\n", (1723, 1745), True, 'import matplotlib.pyplot as plt\n'), ((1750, 1804), 'matplotlib.pyplot.plot', 'plt.plot', (["plot_dict['t']", "plot_dict['vsm']"], {'marker': '"""2"""'}), "(plot_dict['t'], plot_dict['vsm'], marker='2')\n", (1758, 1804), True, 'import matplotlib.pyplot as plt\n'), ((1809, 1863), 'matplotlib.pyplot.plot', 'plt.plot', (["plot_dict['t']", "plot_dict['vdt']"], {'marker': '"""*"""'}), "(plot_dict['t'], plot_dict['vdt'], marker='*')\n", (1817, 1863), True, 'import matplotlib.pyplot as plt\n'), ((1868, 1897), 'matplotlib.pyplot.plot', 'plt.plot', (['tso', 'vs'], {'marker': '"""3"""'}), "(tso, vs, marker='3')\n", (1876, 1897), True, 'import matplotlib.pyplot as plt\n'), ((1902, 1912), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1910, 1912), True, 'import matplotlib.pyplot as plt\n'), ((260, 274), 'numpy.ceil', 'N.ceil', (['(T / dt)'], {}), '(T / dt)\n', (266, 274), True, 'import numpy as N\n'), ((354, 364), 'numpy.sqrt', 'N.sqrt', (['dt'], {}), '(dt)\n', (360, 364), True, 'import numpy as N\n'), ((367, 389), 'numpy.random.randn', 'N.random.randn', (['(wN - 1)'], {}), '(wN - 1)\n', (381, 389), True, 'import numpy as N\n'), ((1172, 1187), 'numpy.random.rand', 'N.random.rand', ([], {}), '()\n', (1185, 1187), True, 'import numpy as N\n'), ((1159, 1169), 'numpy.sqrt', 'N.sqrt', (['Dt'], {}), '(Dt)\n', (1165, 1169), True, 'import numpy as N\n')] |
import sys
import os
os.environ['ABACUS'] = os.environ['HOME'] + '/abacus'
sys.path.insert(0, os.environ['ABACUS'])
from pathlib import Path
from abacusnbody.data import read_abacus
from abacusnbody.data.compaso_halo_catalog import CompaSOHaloCatalog
import astropy.table
from astropy.table import Table
import numba as nb
import numpy as np
import matplotlib.pyplot as plt
import Corrfunc
box = 2000.
AbacusSummit = Path('/mnt/home/lgarrison/ceph/AbacusSummit/')
def count_pairs(t, domain, nthread=1,):
bins = np.linspace(0.1, 10.1, 2**4 * 3**4 + 1)
print(f'Using {nthread=}')
res = Corrfunc.theory.DD(1, nthread, bins, *t['pos'].T, periodic=False, verbose=True, bin_type='lin')
res = Table(res, meta=t.meta.copy())
res['rmid'] = (res['rmin'] + res['rmax'])/2.
res.meta['N_DD'] = len(t)
res.meta['domain_DD'] = domain
return res
def cutout_sim_particles(sim, cen, width, nthread=1):
t = []
for pfn in sorted(sim.glob('halos/z0.500/*_rv_A/*_000.asdf')):
t += [read_abacus.read_asdf(pfn, load=('pos','pid'))]
t = astropy.table.vstack(t)
pid = []
for pfn in sorted(sim.glob('halos/z0.500/*_pid_A/*_000.asdf')):
pid += [read_abacus.read_asdf(pfn)]
pid = astropy.table.vstack(pid)
t = astropy.table.hstack([t,pid])
L = t.meta['BoxSize']
domain = [L/34, L, L]
pairs = count_pairs(t, domain, nthread=nthread)
iord = argcutout(t['pos'], cen, width, t.meta['BoxSize'])
t = t[iord]
return t, pairs
def plot_tsc(p, cen, width, ngrid, box=box, tscbox=None):
#from Abacus.Analysis.PowerSpectrum import TSC
if tscbox is None:
tscbox = box
subp = cutout(p, cen, width, box)
subp -= cen
subp = np.roll(subp, -1, axis=1) # TSC only lets you project out the z axis
dens = TSC.BinParticlesFromMem(subp, (ngrid,ngrid), tscbox, nthreads=4, inplace=True, norm=True)
fig, ax = plt.subplots(dpi=144)
ax.set_aspect('equal')
ax.imshow(np.log(dens.T + 2), origin='lower')
@nb.njit
def cutout(p, c, w, box):
w = w/2
new = np.empty_like(p)
j = 0
for i in range(len(p)):
for k in range(3):
dx = np.abs(p[i][k] - c[k])
while dx > box/2:
dx = np.abs(dx - box)
if dx > w[k]:
break
else:
for k in range(3):
new[j][k] = p[i][k]
j += 1
return new[:j]
@nb.njit
def argcutout(p, c, w, box):
w = w/2
iord = np.empty(len(p), dtype=np.int64)
j = 0
for i in range(len(p)):
for k in range(3):
dx = np.abs(p[i][k] - c[k])
while dx > box/2:
dx = np.abs(dx - box)
if dx > w[k]:
break
else:
iord[j] = i
j += 1
return iord[:j]
| [
"numpy.abs",
"numpy.log",
"numpy.roll",
"numpy.empty_like",
"sys.path.insert",
"abacusnbody.data.read_abacus.read_asdf",
"pathlib.Path",
"numpy.linspace",
"Corrfunc.theory.DD",
"matplotlib.pyplot.subplots"
] | [((76, 116), 'sys.path.insert', 'sys.path.insert', (['(0)', "os.environ['ABACUS']"], {}), "(0, os.environ['ABACUS'])\n", (91, 116), False, 'import sys\n'), ((421, 467), 'pathlib.Path', 'Path', (['"""/mnt/home/lgarrison/ceph/AbacusSummit/"""'], {}), "('/mnt/home/lgarrison/ceph/AbacusSummit/')\n", (425, 467), False, 'from pathlib import Path\n'), ((520, 563), 'numpy.linspace', 'np.linspace', (['(0.1)', '(10.1)', '(2 ** 4 * 3 ** 4 + 1)'], {}), '(0.1, 10.1, 2 ** 4 * 3 ** 4 + 1)\n', (531, 563), True, 'import numpy as np\n'), ((602, 702), 'Corrfunc.theory.DD', 'Corrfunc.theory.DD', (['(1)', 'nthread', 'bins', "*t['pos'].T"], {'periodic': '(False)', 'verbose': '(True)', 'bin_type': '"""lin"""'}), "(1, nthread, bins, *t['pos'].T, periodic=False, verbose=\n True, bin_type='lin')\n", (620, 702), False, 'import Corrfunc\n'), ((1724, 1749), 'numpy.roll', 'np.roll', (['subp', '(-1)'], {'axis': '(1)'}), '(subp, -1, axis=1)\n', (1731, 1749), True, 'import numpy as np\n'), ((1914, 1935), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'dpi': '(144)'}), '(dpi=144)\n', (1926, 1935), True, 'import matplotlib.pyplot as plt\n'), ((2071, 2087), 'numpy.empty_like', 'np.empty_like', (['p'], {}), '(p)\n', (2084, 2087), True, 'import numpy as np\n'), ((1977, 1995), 'numpy.log', 'np.log', (['(dens.T + 2)'], {}), '(dens.T + 2)\n', (1983, 1995), True, 'import numpy as np\n'), ((1018, 1065), 'abacusnbody.data.read_abacus.read_asdf', 'read_abacus.read_asdf', (['pfn'], {'load': "('pos', 'pid')"}), "(pfn, load=('pos', 'pid'))\n", (1039, 1065), False, 'from abacusnbody.data import read_abacus\n'), ((1196, 1222), 'abacusnbody.data.read_abacus.read_asdf', 'read_abacus.read_asdf', (['pfn'], {}), '(pfn)\n', (1217, 1222), False, 'from abacusnbody.data import read_abacus\n'), ((2170, 2192), 'numpy.abs', 'np.abs', (['(p[i][k] - c[k])'], {}), '(p[i][k] - c[k])\n', (2176, 2192), True, 'import numpy as np\n'), ((2611, 2633), 'numpy.abs', 'np.abs', (['(p[i][k] - c[k])'], {}), '(p[i][k] - c[k])\n', (2617, 2633), True, 'import numpy as np\n'), ((2244, 2260), 'numpy.abs', 'np.abs', (['(dx - box)'], {}), '(dx - box)\n', (2250, 2260), True, 'import numpy as np\n'), ((2685, 2701), 'numpy.abs', 'np.abs', (['(dx - box)'], {}), '(dx - box)\n', (2691, 2701), True, 'import numpy as np\n')] |
#!/usr/bin/python3
"""
Sentiment analysis using NLTK package in Python
"""
__author__ = "Sunil"
__copyright__ = "Copyright (c) 2017 Sunil"
__license__ = "MIT License"
__email__ = "<EMAIL>"
__version__ = "0.1"
import random
import sys
import numpy as np
from nltk.corpus import movie_reviews as reviews
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from sklearn import svm
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.metrics import (accuracy_score, classification_report,
precision_recall_fscore_support)
from sklearn.model_selection import KFold
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import LabelEncoder
from sklearn.tree import DecisionTreeClassifier
class DocumentSanitizer(BaseEstimator, TransformerMixin):
"""
Basic language processing before it's fed to other transformers like
CountVectorizer or TfIdfVectorizer.
"""
def __init__(self):
pass
def fit(self, X, Y):
"""
Fit. Nothing todo here.
"""
return self
def transform(self, X):
# Rmeove stop words, punctuations, numbers, stemming, lemmatization.
# Bigram model if required. Since i'm using Tfidf it has an option to create
# N-grams.
return X
def main():
X = np.array([reviews.raw(fileid) for fileid in reviews.fileids()])
y = np.array([reviews.categories(fileid)[0] for fileid in reviews.fileids()])
data = np.array(list(zip(X, y)), dtype=np.dtype([('f1', np.object), ('f2', np.object)]))
np.random.shuffle(data)
X, y = zip(*data)
X = np.array(X)
y = np.array(y)
labelTransformer = LabelEncoder()
Y = labelTransformer.fit_transform(y)
splitter = KFold(n_splits=10).split
accuracies = []
precisions = []
recalls = []
fmeasures = []
formatTo3Decimals = lambda header, decimal: "{0}:{1:.3f}".format(header, decimal)
for train_index, test_index in splitter(X):
Xtrain, Xtest = X[train_index], X[test_index]
Ytrain, Ytest = Y[train_index], Y[test_index]
pipeline = Pipeline([
("DocumentProcessor", DocumentSanitizer()),
("TfIdfVec", TfidfVectorizer(tokenizer=None, preprocessor=None, lowercase=False,
ngram_range=(1,2))),
# ("CountVec", CountVectorizer()),
("SGDclassifier", SGDClassifier())
# ("svc", svm.SVC(kernel='linear'))
# ("logreg", LogisticRegression())
# ("KNeighborsClassifier", KNeighborsClassifier(n_neighbors=3))
# ("MLPClassifier", MLPClassifier())
# ("DecisionTreeClassifier", DecisionTreeClassifier(max_depth=6))
# ("GaussianNB", GaussianNB()),
# ("RandomForestClassifier", RandomForestClassifier())
])
model = pipeline.fit(Xtrain, Ytrain)
Ypred = model.predict(Xtest)
# print(classification_report(Ytest, Ypred, target_names=labelTransformer.classes_))
precision, recall, f1, support = \
precision_recall_fscore_support(Ytest, Ypred, beta = 1.0, average="macro")
accuracy = accuracy_score(Ytest, Ypred)
accuracies.append(accuracy)
precisions.append(precision)
recalls.append(recall)
fmeasures.append(f1)
# print(formatTo3Decimals("Accuracy", accuracy))
# # print(precision, recall, f1, support)
# print(
# formatTo3Decimals("Precision", precision),
# ";",
# formatTo3Decimals("Recall", recall),
# ";",
# formatTo3Decimals("F1", f1))
# Take average accuracy.
accuracy = np.mean(accuracies)
precision = np.mean(precisions)
recall = np.mean(recalls)
f1 = np.mean(fmeasures)
print(formatTo3Decimals("Accuracy", accuracy))
print("{0};{1};{2}".format(
formatTo3Decimals("Precision", precision),
formatTo3Decimals("Recall", recall),
formatTo3Decimals("F1", f1)))
if __name__ == "__main__":
main() | [
"nltk.corpus.movie_reviews.raw",
"sklearn.linear_model.SGDClassifier",
"sklearn.feature_extraction.text.TfidfVectorizer",
"sklearn.metrics.accuracy_score",
"numpy.dtype",
"sklearn.preprocessing.LabelEncoder",
"sklearn.model_selection.KFold",
"numpy.mean",
"numpy.array",
"nltk.corpus.movie_reviews.... | [((1926, 1949), 'numpy.random.shuffle', 'np.random.shuffle', (['data'], {}), '(data)\n', (1943, 1949), True, 'import numpy as np\n'), ((1981, 1992), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (1989, 1992), True, 'import numpy as np\n'), ((2001, 2012), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (2009, 2012), True, 'import numpy as np\n'), ((2037, 2051), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (2049, 2051), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((4042, 4061), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (4049, 4061), True, 'import numpy as np\n'), ((4078, 4097), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (4085, 4097), True, 'import numpy as np\n'), ((4111, 4127), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (4118, 4127), True, 'import numpy as np\n'), ((4137, 4155), 'numpy.mean', 'np.mean', (['fmeasures'], {}), '(fmeasures)\n', (4144, 4155), True, 'import numpy as np\n'), ((2110, 2128), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(10)'}), '(n_splits=10)\n', (2115, 2128), False, 'from sklearn.model_selection import KFold\n'), ((3426, 3498), 'sklearn.metrics.precision_recall_fscore_support', 'precision_recall_fscore_support', (['Ytest', 'Ypred'], {'beta': '(1.0)', 'average': '"""macro"""'}), "(Ytest, Ypred, beta=1.0, average='macro')\n", (3457, 3498), False, 'from sklearn.metrics import accuracy_score, classification_report, precision_recall_fscore_support\n'), ((3521, 3549), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['Ytest', 'Ypred'], {}), '(Ytest, Ypred)\n', (3535, 3549), False, 'from sklearn.metrics import accuracy_score, classification_report, precision_recall_fscore_support\n'), ((1692, 1711), 'nltk.corpus.movie_reviews.raw', 'reviews.raw', (['fileid'], {}), '(fileid)\n', (1703, 1711), True, 'from nltk.corpus import movie_reviews as reviews\n'), ((1872, 1920), 'numpy.dtype', 'np.dtype', (["[('f1', np.object), ('f2', np.object)]"], {}), "([('f1', np.object), ('f2', np.object)])\n", (1880, 1920), True, 'import numpy as np\n'), ((1726, 1743), 'nltk.corpus.movie_reviews.fileids', 'reviews.fileids', ([], {}), '()\n', (1741, 1743), True, 'from nltk.corpus import movie_reviews as reviews\n'), ((1764, 1790), 'nltk.corpus.movie_reviews.categories', 'reviews.categories', (['fileid'], {}), '(fileid)\n', (1782, 1790), True, 'from nltk.corpus import movie_reviews as reviews\n'), ((1808, 1825), 'nltk.corpus.movie_reviews.fileids', 'reviews.fileids', ([], {}), '()\n', (1823, 1825), True, 'from nltk.corpus import movie_reviews as reviews\n'), ((2571, 2662), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'tokenizer': 'None', 'preprocessor': 'None', 'lowercase': '(False)', 'ngram_range': '(1, 2)'}), '(tokenizer=None, preprocessor=None, lowercase=False,\n ngram_range=(1, 2))\n', (2586, 2662), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\n'), ((2754, 2769), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {}), '()\n', (2767, 2769), False, 'from sklearn.linear_model import LogisticRegression, SGDClassifier\n')] |
#!/usr/bin/env python
import argparse
import configparser
import os
import itertools
import pathlib
import random
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.patches as mpatches
from matplotlib.lines import Line2D
from matplotlib import gridspec
from numpy.polynomial.polynomial import polyfit
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
def units_coef_calc():
"""
Calculates the unit coefficients
Parameters
----------
Returns
-------
: dictionary
"density" in (double) g cm^-3
"pressure" in (double) dyns cm^-3
"r" in (double) km
"j" in (double) m^2 kg
"""
# mas of sun in kg
const_msun = 1.9891e30
# gravitational const in m^3kg^-1s^-2
const_g = 6.67384e-11
# speed of light in ms^-1
const_c = 299792458
global units
units = {}
# units of density in g cm^-3
units["density"] = 1e-3 * const_c**6 / (const_g**3 * const_msun**2)
# units of pressure in dyns cm^-3
units["pressure"] = const_c**8 / (const_g**3 * const_msun**2) * 10
# units of rad coordinate in km
units["R"] = 1e-3 * const_g * const_msun / const_c**2
# units of moment of inertia
units["J"] = 1e7 * const_g**2 * const_msun**3 / const_c**4
return
def map_ms_ls_c():
global map_ms, map_ls, map_c
#~ all marker styles which are nice to know
all_marker_styles = [
"s", "8", ">", "<", "^", "v", "o",
"X", "P", "d", "D", "H", "h", "*", "p",
"$\\bowtie$", "$\\clubsuit$", "$\\diamondsuit$", "$\\heartsuit$",
"$\\spadesuit$",
"$\\bigotimes$", "$\\bigoplus$",
]
#~ all line styles which are nice to know
#~ plus how to create a line style on your own
all_line_styles = [
"-.", "--",
#~ (0, (5, 1, 1, 1, 1, 1)),
(0, (5, 1, 1, 1, 1, 1, 1, 1)),
(0, (5, 1, 1, 1, 1, 1, 1, 1, 1, 1)),
#~ (0, (8, 1, 1, 1, 3, 1, 1, 1))
]
#~ all colors which are nice to know
#~ plus some strange from matplotlib xkcdb
all_colors = [
"#e6194B", "#3cb44b", "#4363d8",
"b", "g", "r", "c", "m", "y"
]
#~ we are expecting to plot only those EOSs
EOS_max18 = [
"SLy", "APR2", "APR3", "APR4", "FPS", "WFF1", "WFF2", "WFF3", "BBB2",
"ENG", "MPA1", "MS1", "MS2", "MS1b", "GNH3", "H3", "H4", "ALF2", "ALF4"
]
#~ map each EOS to different marker style
map_ms = {
"SLy": "s", "APR2": "8", "APR3": "$\\bigotimes$", "APR4": "<", "FPS":
"^", "WFF1": "v", "WFF2": "o", "WFF3": "X", "BBB2": "P",
"ENG":"$\\bigoplus$", "MPA1": "D", "MS1": "H", "MS2": "h",
"MS1b": "*", "GNH3": "$\\spadesuit$", "H3": "$\\bowtie$",
"H4": "$\\clubsuit$", "ALF2": "$\\diamondsuit$", "ALF4": "$\\heartsuit$"
}
#~ map each lambda value to different linestyle
map_ls = {
0: "--",
1e-1: "-.",
1e0: (0, (6, 1, 1, 1, 1, 1)),
1e1: (0, (6, 1, 1, 1, 1, 1, 1, 1)),
"GR": "-"
}
#~ map each m value to different color
map_c = {
0: "#e6194B", 5e-3: "#3cb44b", 5e-2: "#4363d8", "GR": "#f58231"
}
return
def luminosity_color(color, amount=1.2):
"""
Lightens the given color by multiplying (1-luminosity) by the given amount.
Input can be matplotlib color string, hex string, or RGB tuple.
Examples:
>> lighten_color('g', 0.3)
>> lighten_color('#F034A3', 0.6)
>> lighten_color((.3,.55,.1), 0.5)
"""
import matplotlib.colors as mc
import colorsys
try:
c = mc.cnames[color]
except:
c = color
c = colorsys.rgb_to_hls(*mc.to_rgb(c))
return colorsys.hls_to_rgb(abs(c[0]), abs(1 - amount * (1 - c[1])), abs(c[2]))
def load_YvsX_config(config):
"""
the input is a list of tuples, each of them having as first element as
entry key and the second a string, which should be converted to a list
basically this takes the confg as a list of tuples and converts it to
dictionary
"""
#~ keys which values should be converted to numbers using eval
eval_keys = [ "beta", "m", "lambda", "x_col", "y_col" ]
#~ keys which values should be lists
list_keys = [ "eos_name", "beta", "m", "lambda" ]
config_dict = {}
for entry in config:
if entry[0] in eval_keys and entry[0] in list_keys:
config_dict.update(
{ entry[0]: [
eval(_) for _ in entry[1].split(",") if _.strip()
] }
)
elif entry[0] not in eval_keys and entry[0] in list_keys:
config_dict.update(
{ entry[0]: [
_.strip() for _ in entry[1].split(",") if _.strip()
] }
)
elif entry[0] in eval_keys:
config_dict.update(
{ entry[0]: eval(entry[1]) }
)
else:
config_dict.update(
{ entry[0]: entry[1].strip() }
)
return config_dict
def get_YvsX_ax():
"""
set plot variables and return axes to created figure
"""
plt.rc('xtick', labelsize=16)
plt.rc('ytick', labelsize=16)
plt.rc('font', size=15)
plt.rc('figure', figsize=(7.2,4.8))
plt.rc('axes', titlesize=16)
plt.rc('axes', labelsize=16)
plt.rc('lines', linewidth=2)
plt.rc('lines', markersize=10)
plt.rc('legend', fontsize=8)
plt.rc('text', usetex=True)
plt.rc('mathtext', fontset="stix")
#~ plt.rc('font', family='serif')
plt.rc('font', family='STIXGeneral')
fig, ax = plt.subplots()
fig.set_rasterized(True)
fig.set_tight_layout(False)
return ax
def get_uniI_ax():
"""
set plot variables and return axes to created figure
"""
plt.rc('xtick', labelsize=16)
plt.rc('ytick', labelsize=16)
plt.rc('font', size=15)
plt.rc('figure', figsize=(7.2,4.8))
plt.rc('axes', titlesize=16)
plt.rc('axes', labelsize=16)
plt.rc('lines', linewidth=2)
plt.rc('lines', markersize=10)
plt.rc('legend', fontsize=8)
plt.rc('text', usetex=True)
plt.rc('mathtext', fontset="stix")
#~ plt.rc('font', family='serif')
plt.rc('font', family='STIXGeneral')
fig, (ax_up, ax_down) = plt.subplots(
2,1, sharex=True,
gridspec_kw=dict(height_ratios=[2, 1])
)
fig.subplots_adjust(hspace=0)
fig.set_rasterized(True)
fig.set_tight_layout(False)
return ax_up, ax_down
def get_uniI_data( fpath_main, fpath_fitting, fname, EOSname, EOSbeta, EOSm,
EOSlambda, col_x, col_y, tilde ):
"""
retrieve the data of the model
"""
EOSmodel = "_".join( [
fname,
EOSname,
"beta{:.3e}".format(EOSbeta),
"m{:.3e}".format(EOSm),
"lambda{:.3e}".format(EOSlambda),
tilde
] )
fullPathModel = os.path.join( fpath_main, EOSname, fpath_fitting, EOSmodel)
print("\n Will load data for: \n\t {} \n".format(fullPathModel))
# TODO if file not exists what do
data = np.loadtxt(fullPathModel, comments="#", delimiter=" ",
usecols=(col_x, col_y))
min_compactness = 0.09
data = data[~(data[:,0]<min_compactness), :]
return data
def plotMarkers_getFits_uniI(
config, pars, tilde, ax_up, ax_down, ax_in = None, fit_results = {}
):
def _get_polyfit_res_tildeI(xp, yp):
coef, rest = polyfit(
x = xp, y = yp,
deg = [ 0, 1, 4 ],
w = np.sqrt(yp),
full = True
)
#~ calcualte the chi reduce
chi_red = rest[0][0]/(len(xp) - 3)
p = lambda x: coef[0] + coef[1]*x + coef[4]*x**4
return coef, chi_red, p
def _get_polyfit_res_barI(xp, yp):
coef, rest = polyfit(
x = xp, y = yp,
deg = [ 1, 2, 3, 4 ],
#~ w = np.sqrt(yp),
full = True
)
#~ calcualte the chi reduce
chi_red = rest[0][0]/(len(xp) - 4)
p = lambda x: coef[1]*x**-1 + coef[2]*x**-2 + coef[3]*x**-3 + coef[4]*x**-4
return coef, chi_red, p
# TODO this choice is arbitrary, it should not be hardcoded
# MS2 is soft; SLy is moderate; APR3 is stiff
#~ PlaceMarkersForEOS = ["MS2", "SLy", "APR3"]
plot_alpha = 0.5 # give some transperancy
#~ fit_results = {} # to print what we have achieved at the end for the model
# accumulate data for the model here, and use them to make the fit
data_to_fit = np.empty(shape=(0,2))
# fill the up plot with markers while gathering data for the fit
for EOSname in config["eos_name"]:
data = get_uniI_data(
config["path_main"], config["path_fitting"],
config["base_name"], EOSname, pars[0], pars[1], pars[2],
config["x_col"], config["y_col"], tilde
)
data_to_fit = np.append(data_to_fit, data, axis=0)
if pars[2] > 1:
continue
ax_up.plot(
data[:,0], data[:,1],
label = None,
markevery = random.uniform(0.14,0.18),
markersize = 6,
linewidth = 0,
marker = map_ms.get(EOSname, None),
color = map_c.get(
pars[1] if pars[0] else "GR",None
),
linestyle = None,
alpha = plot_alpha
)
# do not want any markers in the zoomed box
if ax_in and False:
ax_in.plot(
data[:,0], data[:,1],
label = None,
markevery = None,
markersize = 6,
linewidth = 0,
marker = map_ms.get(EOSname, None),
color = map_c.get(pars[1],None),
linestyle = None,
alpha = plot_alpha
)
# get the coef in list, the Chi reduced score, and the polynom itself
coef, chi_red, p = (
_get_polyfit_res_tildeI(
data_to_fit[:,0], data_to_fit[:,1]
) if tilde == "tildeI"
else _get_polyfit_res_barI(
data_to_fit[:,0]**-1, data_to_fit[:,1]
)
)
# average over all EOS of all the residuals
delta_all = 0
n_all = 0
# average over all EOS of largest residual per EOS
n_all_max = 0
delta_all_max = 0
# the largest residual over all EOS
delta_max = 0
# fill the down plot with residiums while gathering data for avreages
for EOSname in config["eos_name"]:
# change the markevery a little bit random
data = get_uniI_data(
config["path_main"], config["path_fitting"],
config["base_name"], EOSname, pars[0], pars[1], pars[2],
config["x_col"], config["y_col"], tilde
)
_data = np.abs( 1 - data[:,1]/p(data[:,0]) )
delta_all += np.sum(_data)
n_all += _data.size
delta_all_max += np.amax(_data)
n_all_max += 1
delta_max = delta_max if delta_max > np.amax(_data) else np.amax(_data)
if pars[2] >= 1:
continue
ax_down.plot(
data[:,0],
_data,
label = None,
linewidth = 0,
markersize = 6,
markevery = random.uniform(0.14,0.18),
marker = map_ms.get(EOSname, None),
color = map_c.get(pars[1] if pars[0] else "GR", None),
linestyle = None,
alpha = plot_alpha,
zorder = 90 if pars[0] == 0 else 10
)
avg_L_1 = delta_all/n_all
avg_L_inf = delta_all_max/n_all_max
L_inf_worst = delta_max
strFormat_BetaMLambda = "beta {}, m = {}, lambda = {}"
strFormat_tildeI_coef = (
"a_0 &= {:.3e} \\\\\n"
"a_1 &= {:.3e} \\\\\n"
"a_4 &= {:.3e} \\\\\n"
"{} &= {:.3e} \\\\\n\n"
)
strFormat_barI_coef = (
"a_1 &= {:.3e} \\\\\n"
"a_2 &= {:.3e} \\\\\n"
"a_3 &= {:.3e} \\\\\n"
"a_4 &= {:.3e} \\\\\n"
"{} &= {:.3e} \\\\\n\n"
)
strFormat_L = (
"<&L> = {:.3e} \\\\\n"
"<&L_{}> = {:.3e} \\\\\n"
"&L_{} = {:.3e} \\\\\n\n"
)
fit_results.update(
{
strFormat_BetaMLambda.format(pars[0], pars[1], pars[2]):
"\n".join([strFormat_tildeI_coef, strFormat_L]).format(
coef[0], coef[1], coef[4], "\chi_r^2", chi_red,
avg_L_1, "\infty", avg_L_inf, "\infty", L_inf_worst,
)
} if tilde == "tildeI"
else {
strFormat_BetaMLambda.format(pars[0], pars[1], pars[2]):
"\n".join([strFormat_barI_coef, strFormat_L]).format(
coef[1], coef[2], coef[3], coef[4], "\chi_r^2", chi_red,
avg_L_1, "\infty", avg_L_inf, "\infty", L_inf_worst,
) }
)
#give min and max compactnesses by hand
#~ p_x = np.linspace(np.amin(data_to_fit[:,0]), np.amax(data_to_fit[:,0]), 100)
# TODO maybe not hardcode the compactneses like that? now 0.09 to 0.35
p_x = np.linspace(0.09, 0.35, 100)
ax_up.plot(
p_x,
p(p_x),
label = None,
color = luminosity_color(map_c.get(pars[1] if pars[0] else "GR", None)),
linestyle = map_ls.get(pars[2] if pars[0] else "GR", None),
zorder = 90 if pars[0] == 0 else 85,
linewidth = 2.5
)
if ax_in:
ax_in.plot(
p_x,
p(p_x),
label = None,
color = luminosity_color(map_c.get(pars[1] if pars[0] else "GR", None)),
linestyle = map_ls.get(pars[2] if pars[0] else "GR", None),
zorder = 90 if pars[0] == 0 else 85,
linewidth = 2.5
)
# beta = 0 means GR, then make the worst and not so worst fit
if pars[0] == 0:
ax_up.fill_between(
p_x,
p(p_x)*(1 + avg_L_inf),
p(p_x)*(1 - avg_L_inf),
facecolor=map_c.get("GR", None),
alpha= 0.4,
zorder = 0
)
if ax_in:
ax_in.fill_between(
p_x,
p(p_x)*(1 + avg_L_inf),
p(p_x)*(1 - avg_L_inf),
facecolor=map_c.get("GR", None),
alpha= 0.4,
zorder = 0
)
ax_up.fill_between(
p_x,
p(p_x)*( 1 + L_inf_worst ),
p(p_x)*( 1 - L_inf_worst ),
facecolor=map_c.get("GR", None),
alpha= 0.3,
zorder = 0
)
if ax_in:
ax_in.fill_between(
p_x,
p(p_x)*( 1 + L_inf_worst ),
p(p_x)*( 1 - L_inf_worst ),
facecolor=map_c.get("GR", None),
alpha= 0.3,
zorder = 0
)
return fit_results
def set_axYvsX_parms(ax, x_label = "", y_label = ""):
#~ fontsize=16
#~ x_ticksize = 16
#~ y_ticksize = 16
#~ direction of the tickks
ax.tick_params(direction="in")
#~ set the labels and their size
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
#~ how big the tick labels should be
#~ ax.xaxis.set_tick_params(labelsize=x_ticksize)
#~ ax.yaxis.set_tick_params(labelsize=y_ticksize)
#~ if diffrent format should be applied to the tick numbers
#~ ax.xaxis.set_major_formatter(FormatStrFormatter(x_format_str))
#~ ax.yaxis.set_major_formatter(FormatStrFormatter(y_format_str))
return
def get_YvsX_data( fpath, fname, EOSname, EOSbeta, EOSm, EOSlambda, col_x, col_y):
"""
retrieve the data of the model
"""
EOSmodel = "_".join( [
fname,
EOSname,
"beta{:.3e}".format(EOSbeta),
"m{:.3e}".format(EOSm),
"lambda{:.3e}".format(EOSlambda)
] )
fullPathModel = os.path.join( fpath, EOSname, EOSmodel)
print("\n Will load data for: \n\t {} \n".format(fullPathModel))
# TODO if file not exists what do
data = np.loadtxt(fullPathModel, comments="#", delimiter=" ",
usecols=(col_x, col_y))
return data
def convert_NumScientific(num):
snum = "{:.2e}".format(num)
mantisa, power = snum.split("e")
mantisa = "{}".format(mantisa)
power = "{}".format(power)
return " ${{{:.0f}}} \\times 10^{{{:.0f}}}$".format(
float(mantisa), float(power)
) if float(mantisa) \
else " ${{{:.0f}}}$".format(
float(mantisa)
)
def plot_MvsR_GR(config, GRonly = False):
config = load_YvsX_config(config)
ax = get_YvsX_ax()
set_axYvsX_parms(ax, x_label = config["x_label"], y_label = config["y_label"])
for model in itertools.product( config["eos_name"], config["beta"],
config["m"], config["lambda"] ):
min_mass = 0.5
# TODO GRonly variable should go to the config file
if not GRonly:
data = get_YvsX_data(
config["path"], config["base_name"], model[0],
model[1], model[2], model[3], config["x_col"], config["y_col"]
)
#~ remove entries who have mass less than min_mass
data = data[~(data[:,1]<min_mass), :]
#~ include only "stable" masses, those before the max mass
#~ data = np.delete(data, np.s_[np.argmax(data[:,1]):], axis=0)
ax.plot(
data[:,0]*units.get(config["x_unit"], 1),
data[:,1]*units.get(config["y_unit"], 1),
label = None,
markevery = 0.1,
marker = map_ms.get(model[0], None),
color = map_c.get(model[2], None),
linestyle = map_ls.get(model[3], None)
)
#~ NOW SAME PROCEDURE BUT FOR GR CASE
data = get_YvsX_data(
config["path"], config["base_name"], model[0],
0, 0, 0, config["x_col"], config["y_col"]
)
data = data[~(data[:,1]<min_mass), :]
#~ data = np.delete(data, np.s_[np.argmax(data[:,1]):], axis=0)
ax.plot(
data[:,0]*units.get(config["x_unit"], 1),
data[:,1]*units.get(config["y_unit"], 1),
label = None,
markevery = 0.1,
marker = map_ms.get(model[0], None),
color = map_c.get("GR", None),
linestyle = map_ls.get("GR", None),
zorder = 100
)
handle_EOSnames = [
Line2D(
[], [],
color = "k",
marker = map_ms.get( _, None),
linewidth = 0,
linestyle = None,
label = _
) for _ in config["eos_name"]
]
handle_linestyles = [
Line2D(
[], [],
color = "k",
marker = None,
linestyle = map_ls.get(_, None),
label = (
"$\lambda = {}$".format(_)
if _ != "GR" else "GR" )
) for _ in [ *config["lambda"], "GR" ]
] if not GRonly else []
handle_markers = [
mpatches.Patch(
color = map_c.get(_, None),
label = (
"$m= $ {}".format(convert_NumScientific(_))
if _ != "GR" else "GR"
)
) for _ in [ *config["m"], "GR" ]
] if not GRonly else []
# lets add a legend for marker styles
ax.add_artist( ax.legend(
handles = [
*handle_EOSnames, *handle_linestyles, *handle_markers
],
loc = "lower right",
ncol = 1,
frameon = True,
markerscale = 0.6,
fancybox=True,
framealpha = 0.5,
handlelength = 4,
) )
ax.set_xlim(7.75, 15.25)
ax.set_ylim(0.25, 3)
plt.savefig(
'MvsR_GR.eps' if GRonly else "MvsR_STT_GR.eps",
format="eps",
dpi=600,
pad_inches=0,
bbox_inches='tight',
papertype = "a4"
)
plt.show()
return
def create_uniI_data(config):
def _load_config(config):
"""
the input is a list of tuples, each of them having as first element as
entry key and the second a string, which should be converted to a list
basically this takes the confg as a list of tuples and converts it to
dictionary
"""
#~ keys which values should be converted to numbers using eval
eval_keys = [ "beta", "m", "lambda", "col_r", "col_m", "col_j" ]
#~ keys which values should be lists
list_keys = [ "eos_name", "beta", "m", "lambda" ]
config_dict = {}
for entry in config:
if entry[0] in eval_keys and entry[0] in list_keys:
config_dict.update(
{ entry[0]: [
eval(_) for _ in entry[1].split(",") if _.strip()
] }
)
elif entry[0] not in eval_keys and entry[0] in list_keys:
config_dict.update(
{ entry[0]: [
_.strip() for _ in entry[1].split(",") if _.strip()
] }
)
elif entry[0] in eval_keys:
config_dict.update(
{ entry[0]: eval(entry[1]) }
)
else:
config_dict.update(
{ entry[0]: entry[1].strip() }
)
return config_dict
def _get_data( fpath, fname, EOSname, EOSbeta, EOSm, EOSlambda, col_r,
col_m, col_j ):
"""
retrieve the data of the model
"""
EOSmodel = "_".join( [
fname,
EOSname,
"beta{:.3e}".format(EOSbeta),
"m{:.3e}".format(EOSm),
"lambda{:.3e}".format(EOSlambda)
] )
fullPathModel = os.path.join( fpath, EOSname, EOSmodel)
print("\n Will load data for: \n\t {} \n".format(fullPathModel))
# TODO if file not exists what do
data = np.loadtxt(fullPathModel, comments="#", delimiter=" ",
usecols=(col_r, col_m, col_j))
return data
config = _load_config(config)
for model in itertools.product( config["eos_name"], config["beta"],
config["m"], config["lambda"] ):
data = _get_data(
config["path"], config["base_name"], model[0],
model[1], model[2], model[3],
config["col_r"], config["col_m"], config["col_j"]
)
#remove entries who have mass less than min_mass
min_mass = 0.5
data = data[~(data[:,1]<min_mass), :]
#include only "stable" masses, those before the max mass
data = np.delete(data, np.s_[np.argmax(data[:,1]):], axis=0)
EOSmodel = "_".join( [
config["base_name"],
model[0],
"beta{:.3e}".format(model[1]),
"m{:.3e}".format(model[2]),
"lambda{:.3e}".format(model[3]),
] )
fullModelPath = os.path.join(
config["path"], model[0], "Fitting"
)
pathlib.Path( fullModelPath ).mkdir(parents=True, exist_ok=True)
target = os.path.join(fullModelPath, "_".join([EOSmodel, "tildeI"]))
print("\n\t will convert universal I \n\t\t {} \n".format( target ) )
np.savetxt(
target,
np.column_stack( (
data[:,1]/data[:,0], data[:,2]/(data[:,1]*data[:,0]**2)
) ),
delimiter = " ",
newline = "\n",
header = "M/R I/(MR**2)",
comments="# "
)
target = os.path.join(fullModelPath, "_".join([EOSmodel, "barI"]))
print("\n\t will convert universal I \n\t\t {} \n".format( target ) )
np.savetxt(
target,
np.column_stack( (
data[:,1]/data[:,0], data[:,2]/(data[:,1]**3)
) ),
delimiter = " ",
header = "M/R I/(M**3)",
comments="# "
)
return
def plot_tildeI_GR_zoomBox(config):
config = load_YvsX_config(config)
ax_up, ax_down = get_uniI_ax()
ax_in = zoomed_inset_axes(ax_up, 3, loc=8)
set_axYvsX_parms(ax_up, x_label = "", y_label = config["y_up_label"])
set_axYvsX_parms(ax_down, x_label = config["x_label"], y_label = config["y_down_label"])
ax_down.set_yscale("log")
FitResults = {}
for pars in itertools.product(config["beta"], config["m"], config["lambda"]):
FitResults = plotMarkers_getFits_uniI(config, pars, "tildeI", ax_up, ax_down, ax_in)
FitResults.update(plotMarkers_getFits_uniI(config, [0,0,0], "tildeI", ax_up, ax_down, ax_in))
for k, v in FitResults.items():
print("\n{}\n{}".format(k, v))
handle_EOSnames = [
Line2D(
[], [],
color = "k",
marker = map_ms.get( _, None),
linewidth = 0,
linestyle = None,
label = _
) for _ in config["eos_name"]
]
handle_linestyles = [
Line2D(
[], [],
color = "k",
marker = None,
linestyle = map_ls.get(_, None),
label = (
"$\lambda = {}$".format(_)
if _ != "GR" else "GR" )
) for _ in [ *config["lambda"], "GR" ]
]
# colors will be presented as patches
handle_colors = [
mpatches.Patch(
color = map_c.get(_, None),
label = (
"$m= $ {}".format(convert_NumScientific(_))
if _ != "GR" else "GR"
)
) for _ in [ *config["m"], "GR" ]
]
# colors will be presented as solid lines
#~ handle_colors = [
#~ Line2D(
#~ [], [],
#~ color = map_c.get(_, None),
#~ marker = None,
#~ label = (
#~ "$\lambda = {}$".format(_)
#~ if _ != "GR" else "GR" )
#~ ) for _ in [ *config["lambda"], "GR" ]
#~ ]
legend_markers = ax_up.legend(
handles = [ *handle_EOSnames ],
loc = 2,
ncol = 1,
frameon = False,
markerscale = 0.6,
fancybox=True,
#~ framealpha = 0.5,
handlelength = 4,
bbox_to_anchor=(1, 1),
#~ borderaxespad=0.1,
)
legend_linestyles = ax_up.legend(
handles = [ *handle_linestyles ],
loc = 2,
ncol = 1,
frameon = True,
markerscale = 0.6,
fancybox=True,
framealpha = 0.5,
handlelength = 4,
title = "Line patterns"
#~ bbox_to_anchor=(0.79, 0.5),
#~ borderaxespad=0.1
)
#~ depricated way of seting font size of tittle of legend
#~ seem not the way any more in matplotlib 3
#~ legend_linestyles.set_title('location', prop={"size":10})
legend_linestyles.get_title().set_fontsize(10)
legend_colors = ax_up.legend(
handles = [ *handle_colors ],
loc = 4,
ncol = 1,
frameon = True,
markerscale = 0.6,
fancybox=True,
framealpha = 0.5,
handlelength = 4,
title = "Line colours"
#~ bbox_to_anchor=(0.99, 0.22),
#~ borderaxespad=0.1
)
legend_colors.get_title().set_fontsize(10)
ax_up.add_artist(legend_markers)
ax_up.add_artist( legend_linestyles )
ax_up.add_artist( legend_colors )
ax_up.set_xlim(0.09, 0.325)
ax_up.set_ylim(0.28, 0.55)
ax_in.set_xlim(0.255, 0.275)
ax_in.set_ylim(0.43, 0.45)
ax_in.xaxis.set_visible(False)
ax_in.yaxis.set_visible(False)
#~ ax_in.xaxis.set_tick_params(labelsize=8)
#~ ax_in.yaxis.set_tick_params(labelsize=8)
#~ ax_in.tick_params(axis="both",direction="in", pad=-10)
mark_inset(
ax_up, ax_in,
loc1=2, loc2=4,
fc="black", ec="black",
zorder=110
)
ax_down.set_ylim(1e-3, 1.5e0)
#~ ax_down.axhline(y=0.1, linewidth=2, color='r', alpha = 0.5)
plt.savefig(
'TildeI_all.eps', format="eps",
dpi=600,
pad_inches=0,
bbox_inches='tight',
papertype = "a4",
bbox_extra_artists=(legend_markers,),
)
plt.show()
return
def plot_tildeI_GR(config):
config = load_YvsX_config(config)
ax_up, ax_down = get_uniI_ax()
set_axYvsX_parms(ax_up, x_label = "", y_label = config["y_up_label"])
set_axYvsX_parms(ax_down, x_label = config["x_label"], y_label = config["y_down_label"])
ax_down.set_yscale("log")
FitResults = {}
for pars in itertools.product(config["beta"], config["m"], config["lambda"]):
FitResults = plotMarkers_getFits_uniI(config, pars, "tildeI", ax_up, ax_down)
FitResults.update(plotMarkers_getFits_uniI(config, [0,0,0], "tildeI", ax_up, ax_down))
for k, v in FitResults.items():
print("\n{}\n{}".format(k, v))
handle_EOSnames = [
Line2D(
[], [],
color = "k",
marker = map_ms.get( _, None),
linewidth = 0,
linestyle = None,
label = _
) for _ in config["eos_name"]
]
handle_linestyles = [
Line2D(
[], [],
color = "k",
marker = None,
linestyle = map_ls.get(_, None),
label = (
"$\lambda = {}$".format(_)
if _ != "GR" else "GR" )
) for _ in [ *config["lambda"], "GR" ]
]
handle_colors = [
mpatches.Patch(
color = map_c.get(_, None),
label = (
"$m= $ {}".format(convert_NumScientific(_))
if _ != "GR" else "GR"
)
) for _ in [ *config["m"], "GR" ]
]
# colors will be presented as solid lines
#~ handle_colors = [
#~ Line2D(
#~ [], [],
#~ color = map_c.get(_, None),
#~ marker = None,
#~ label = (
#~ "$\lambda = {}$".format(_)
#~ if _ != "GR" else "GR" )
#~ ) for _ in [ *config["lambda"], "GR" ]
#~ ]
legend_markers = ax_up.legend(
handles = [ *handle_EOSnames ],
loc = 2,
ncol = 2,
frameon = True,
markerscale = 0.6,
fancybox=True,
framealpha = 0.5,
handlelength = 4,
#~ bbox_to_anchor=(1, 1),
#~ borderaxespad=0.1,
)
ax_up.add_artist(legend_markers)
if len(handle_linestyles) > 2:
legend_linestyles = ax_up.legend(
handles = [ *handle_linestyles ],
loc = 4,
ncol = 1,
frameon = True,
markerscale = 0.6,
fancybox=True,
framealpha = 0.5,
handlelength = 4,
title = r"$m_\varphi = 5\times10^{-3}$"
#~ bbox_to_anchor=(0.79, 0.5),
#~ borderaxespad=0.1
)
legend_linestyles.get_title().set_fontsize(10)
ax_up.add_artist( legend_linestyles )
if len(handle_colors) > 2:
legend_colors = ax_up.legend(
handles = [ *handle_colors ],
loc = 4,
ncol = 1,
frameon = True,
markerscale = 0.6,
fancybox=True,
framealpha = 0.5,
handlelength = 4,
title = r"$\lambda = 0.1$"
#~ bbox_to_anchor=(0.99, 0.22),
#~ borderaxespad=0.1
)
ax_up.add_artist( legend_colors )
legend_colors.get_title().set_fontsize(10)
ax_up.set_xlim(0.09, 0.325)
ax_up.set_ylim(0.28, 0.55)
ax_down.set_ylim(1e-3, 1.5e0)
#~ ax_down.axhline(y=0.1, linewidth=2, color='r', alpha = 0.5)
plt.savefig(
'TildeI_lambda1e-1.eps', format="eps",
dpi=600,
pad_inches=0,
bbox_inches='tight',
papertype = "a4",
bbox_extra_artists=(legend_markers,)
)
plt.show()
return
def plot_barI_GR_zoomBox(config):
config = load_YvsX_config(config)
ax_up, ax_down = get_uniI_ax()
ax_in = zoomed_inset_axes(ax_up, 5, loc=9)
set_axYvsX_parms(ax_up, x_label = "", y_label = config["y_up_label"])
set_axYvsX_parms(ax_down, x_label = config["x_label"], y_label = config["y_down_label"])
ax_down.set_yscale("log")
FitResults = {}
for pars in itertools.product(config["beta"], config["m"], config["lambda"]):
FitResults = plotMarkers_getFits_uniI(config, pars, "barI", ax_up, ax_down, ax_in)
FitResults.update(plotMarkers_getFits_uniI(config, [0,0,0], "barI", ax_up, ax_down, ax_in))
for k, v in FitResults.items():
print("\n{}\n{}".format(k, v))
handle_EOSnames = [
Line2D(
[], [],
color = "k",
marker = map_ms.get( _, None),
linewidth = 0,
linestyle = None,
label = _
) for _ in config["eos_name"]
]
handle_linestyles = [
Line2D(
[], [],
color = "k",
marker = None,
linestyle = map_ls.get(_, None),
label = (
"$\lambda = {}$".format(_)
if _ != "GR" else "GR" )
) for _ in [ *config["lambda"], "GR" ]
]
# colors will be presented as patches
handle_colors = [
mpatches.Patch(
color = map_c.get(_, None),
label = (
"$m= $ {}".format(convert_NumScientific(_))
if _ != "GR" else "GR"
)
) for _ in [ *config["m"], "GR" ]
]
# colors will be presented as solid lines
#~ handle_colors = [
#~ Line2D(
#~ [], [],
#~ color = map_c.get(_, None),
#~ marker = None,
#~ label = (
#~ "$\lambda = {}$".format(_)
#~ if _ != "GR" else "GR" )
#~ ) for _ in [ *config["lambda"], "GR" ]
#~ ]
legend_markers = ax_up.legend(
handles = [ *handle_EOSnames ],
loc = 2,
ncol = 1,
frameon = False,
markerscale = 0.6,
fancybox=True,
#~ framealpha = 0.5,
handlelength = 4,
bbox_to_anchor=(1, 1),
#~ borderaxespad=0.1,
)
legend_linestyles = ax_up.legend(
handles = [ *handle_linestyles ],
loc = 1,
ncol = 1,
frameon = True,
markerscale = 0.6,
fancybox=True,
framealpha = 0.5,
handlelength = 4,
title = "Line patterns"
#~ bbox_to_anchor=(0.79, 0.5),
#~ borderaxespad=0.1
)
#~ depricated way of seting font size of tittle of legend
#~ seem not the way any more in matplotlib 3
#~ legend_linestyles.set_title('location', prop={"size":10})
legend_linestyles.get_title().set_fontsize(10)
legend_colors = ax_up.legend(
handles = [ *handle_colors ],
loc = 3,
ncol = 1,
frameon = True,
markerscale = 0.6,
fancybox=True,
framealpha = 0.5,
handlelength = 4,
title = "Line colours"
#~ bbox_to_anchor=(0.99, 0.22),
#~ borderaxespad=0.1
)
legend_colors.get_title().set_fontsize(10)
legend_colors.set_zorder(115)
ax_up.add_artist(legend_markers)
ax_up.add_artist( legend_linestyles )
ax_up.add_artist( legend_colors )
ax_up.set_xlim(0.09, 0.325)
ax_up.set_ylim(4.75, 35)
ax_in.set_xlim(0.185, 0.205)
ax_in.set_ylim(9, 11.5)
ax_in.xaxis.set_visible(False)
ax_in.yaxis.set_visible(False)
#~ ax_in.xaxis.set_tick_params(labelsize=8)
#~ ax_in.yaxis.set_tick_params(labelsize=8)
#~ ax_in.tick_params(axis="both",direction="in", pad=-10)
mark_inset(
ax_up, ax_in,
loc1=3, loc2=4,
fc="black", ec="black",
zorder=110
)
ax_down.set_ylim(1e-3, 1.5e0)
#~ ax_down.axhline(y=0.1, linewidth=2, color='r', alpha = 0.5)
plt.savefig(
'BarI_all.eps', format="eps",
dpi=600,
pad_inches=0,
bbox_inches='tight',
papertype = "a4",
bbox_extra_artists=(legend_markers,),
)
plt.show()
return
def plot_barI_GR(config):
config = load_YvsX_config(config)
ax_up, ax_down = get_uniI_ax()
set_axYvsX_parms(ax_up, x_label = "", y_label = config["y_up_label"])
set_axYvsX_parms(ax_down, x_label = config["x_label"], y_label = config["y_down_label"])
ax_down.set_yscale("log")
FitResults = {}
for pars in itertools.product(config["beta"], config["m"], config["lambda"]):
FitResults = plotMarkers_getFits_uniI(config, pars, "barI", ax_up, ax_down)
FitResults.update(plotMarkers_getFits_uniI(config, [0,0,0], "barI", ax_up, ax_down))
for k, v in FitResults.items():
print("\n{}\n{}".format(k, v))
handle_EOSnames = [
Line2D(
[], [],
color = "k",
marker = map_ms.get( _, None),
linewidth = 0,
linestyle = None,
label = _
) for _ in config["eos_name"]
]
handle_linestyles = [
Line2D(
[], [],
color = "k",
marker = None,
linestyle = map_ls.get(_, None),
label = (
"$\lambda = {}$".format(_)
if _ != "GR" else "GR" )
) for _ in [ *config["lambda"], "GR" ]
]
handle_colors = [
mpatches.Patch(
color = map_c.get(_, None),
label = (
"$m= $ {}".format(convert_NumScientific(_))
if _ != "GR" else "GR"
)
) for _ in [ *config["m"], "GR" ]
]
# colors will be presented as solid lines
#~ handle_colors = [
#~ Line2D(
#~ [], [],
#~ color = map_c.get(_, None),
#~ marker = None,
#~ label = (
#~ "$\lambda = {}$".format(_)
#~ if _ != "GR" else "GR" )
#~ ) for _ in [ *config["lambda"], "GR" ]
#~ ]
legend_markers = ax_up.legend(
handles = [ *handle_EOSnames ],
loc = 9,
ncol = 3,
frameon = True,
markerscale = 0.6,
fancybox=True,
framealpha = 0.5,
handlelength = 4,
bbox_to_anchor=(0.45, 1),
#~ borderaxespad=0.1,
)
ax_up.add_artist(legend_markers)
if len(handle_linestyles) > 2:
legend_linestyles = ax_up.legend(
handles = [ *handle_linestyles ],
loc = 3,
ncol = 1,
frameon = True,
markerscale = 0.6,
fancybox=True,
framealpha = 0.5,
handlelength = 4,
title = r"$m_\varphi = 5\times10^{-3}$"
#~ bbox_to_anchor=(0.79, 0.5),
#~ borderaxespad=0.1
)
legend_linestyles.get_title().set_fontsize(10)
ax_up.add_artist( legend_linestyles )
if len(handle_colors) > 2:
legend_colors = ax_up.legend(
handles = [ *handle_colors ],
loc = 1,
ncol = 1,
frameon = True,
markerscale = 0.6,
fancybox=True,
framealpha = 0.5,
handlelength = 4,
title = r"$\lambda = 0.1$"
#~ bbox_to_anchor=(0.99, 0.22),
#~ borderaxespad=0.1
)
ax_up.add_artist( legend_colors )
legend_colors.get_title().set_fontsize(10)
ax_up.set_xlim(0.09, 0.325)
ax_up.set_ylim(4.75, 35)
ax_down.set_ylim(1e-3, 1.5e0)
#~ ax_down.axhline(y=0.1, linewidth=2, color='r', alpha = 0.5)
plt.savefig(
'BarI_lambda1e-1.eps', format="eps",
dpi=600,
pad_inches=0,
bbox_inches='tight',
papertype = "a4",
bbox_extra_artists=(legend_markers,)
)
plt.show()
return
if __name__ == "__main__":
def _parser_init():
parser = argparse.ArgumentParser(
prog = "My personal result plotter",
description="""
Quick plotting tool to suit my needs. Basically it has limited regimes of
work, each corresponding to different keyword and thus different
configuration settings and output.
"""
)
# the config file
parser.add_argument(
"--config",
action = "store",
nargs = "?",
type = str,
default = "result_plotter.config",
const = "result_plotter.config",
metavar = "path to config file",
dest = "ConfigFile",
help = "path to configuration file for each option, presented as args"
)
# MvsR with GR included
parser.add_argument(
"--MvsR_GR",
action = "store_const",
#~ nargs = "?",
#~ type = str,
#~ default = "quick_plotter.config",
const = "MvsR_GR",
#~ metavar = "",
dest = "ConfigSection",
required = False,
help = """
if called, it will use MvsR_GR section in config file to produce
a plot
"""
)
# creating all uni I in separated directories
parser.add_argument(
"--create_uniI_data",
action = "store_const",
#~ nargs = "?",
#~ type = str,
#~ default = "quick_plotter.config",
const = "create_uniI_data",
#~ metavar = "",
dest = "ConfigSection",
required = False,
help = """
if called, it will use just create all uni I files needed to
create the universality plots and read path to results from
create_uniI_data section of the config file
"""
)
# create tildeI_GR
parser.add_argument(
"--tildeI_GR",
action = "store_const",
#~ nargs = "?",
#~ type = str,
#~ default = "quick_plotter.config",
const = "tildeI_GR",
#~ metavar = "",
dest = "ConfigSection",
required = False,
help = """
if called, it will use tildeI_GR section in config file to produce
a plot
"""
)
# create tildeI_GR_zoomBox
parser.add_argument(
"--tildeI_GR_zoomBox",
action = "store_const",
#~ nargs = "?",
#~ type = str,
#~ default = "quick_plotter.config",
const = "tildeI_GR_zoomBox",
#~ metavar = "",
dest = "ConfigSection",
required = False,
help = """
it will use the tilde I section
"""
)
# create barI_GR_zoomBox
parser.add_argument(
"--barI_GR_zoomBox",
action = "store_const",
#~ nargs = "?",
#~ type = str,
#~ default = "quick_plotter.config",
const = "barI_GR_zoomBox",
#~ metavar = "",
dest = "ConfigSection",
required = False,
help = """
it will use the bar I section
"""
)
# create barI_GR
parser.add_argument(
"--barI_GR",
action = "store_const",
#~ nargs = "?",
#~ type = str,
#~ default = "quick_plotter.config",
const = "barI_GR",
#~ metavar = "",
dest = "ConfigSection",
required = False,
help = """
if called, it will use barI_GR section in config file to produce
a plot
"""
)
return parser
def _get_config(ConfigFile):
# TODO no input control !!!
config = configparser.ConfigParser()
config.read(ConfigFile)
return config
units_coef_calc()
map_ms_ls_c()
args = _parser_init().parse_args()
config = _get_config(args.ConfigFile)
if args.ConfigSection == "MvsR_GR":
plot_MvsR_GR(config.items(args.ConfigSection))
elif args.ConfigSection == "create_uniI_data":
create_uniI_data(config.items(args.ConfigSection))
elif args.ConfigSection == "tildeI_GR":
plot_tildeI_GR(config.items(args.ConfigSection))
elif args.ConfigSection == "tildeI_GR_zoomBox":
#TODO Documentation for this needed
plot_tildeI_GR_zoomBox(config.items("tildeI_GR"))
elif args.ConfigSection == "barI_GR_zoomBox":
#TODO Documentation for this needed
plot_barI_GR_zoomBox(config.items("barI_GR"))
elif args.ConfigSection == "barI_GR":
plot_barI_GR(config.items(args.ConfigSection))
else:
print("\n {} unknown, terminating... \n", args.ConfigSection)
| [
"numpy.sum",
"argparse.ArgumentParser",
"numpy.argmax",
"numpy.empty",
"pathlib.Path",
"mpl_toolkits.axes_grid1.inset_locator.mark_inset",
"os.path.join",
"numpy.append",
"numpy.loadtxt",
"matplotlib.pyplot.rc",
"numpy.linspace",
"itertools.product",
"configparser.ConfigParser",
"matplotli... | [((5320, 5349), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick"""'], {'labelsize': '(16)'}), "('xtick', labelsize=16)\n", (5326, 5349), True, 'import matplotlib.pyplot as plt\n'), ((5354, 5383), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick"""'], {'labelsize': '(16)'}), "('ytick', labelsize=16)\n", (5360, 5383), True, 'import matplotlib.pyplot as plt\n'), ((5389, 5412), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(15)'}), "('font', size=15)\n", (5395, 5412), True, 'import matplotlib.pyplot as plt\n'), ((5418, 5454), 'matplotlib.pyplot.rc', 'plt.rc', (['"""figure"""'], {'figsize': '(7.2, 4.8)'}), "('figure', figsize=(7.2, 4.8))\n", (5424, 5454), True, 'import matplotlib.pyplot as plt\n'), ((5459, 5487), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'titlesize': '(16)'}), "('axes', titlesize=16)\n", (5465, 5487), True, 'import matplotlib.pyplot as plt\n'), ((5492, 5520), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'labelsize': '(16)'}), "('axes', labelsize=16)\n", (5498, 5520), True, 'import matplotlib.pyplot as plt\n'), ((5526, 5554), 'matplotlib.pyplot.rc', 'plt.rc', (['"""lines"""'], {'linewidth': '(2)'}), "('lines', linewidth=2)\n", (5532, 5554), True, 'import matplotlib.pyplot as plt\n'), ((5559, 5589), 'matplotlib.pyplot.rc', 'plt.rc', (['"""lines"""'], {'markersize': '(10)'}), "('lines', markersize=10)\n", (5565, 5589), True, 'import matplotlib.pyplot as plt\n'), ((5595, 5623), 'matplotlib.pyplot.rc', 'plt.rc', (['"""legend"""'], {'fontsize': '(8)'}), "('legend', fontsize=8)\n", (5601, 5623), True, 'import matplotlib.pyplot as plt\n'), ((5629, 5656), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (5635, 5656), True, 'import matplotlib.pyplot as plt\n'), ((5661, 5695), 'matplotlib.pyplot.rc', 'plt.rc', (['"""mathtext"""'], {'fontset': '"""stix"""'}), "('mathtext', fontset='stix')\n", (5667, 5695), True, 'import matplotlib.pyplot as plt\n'), ((5739, 5775), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""STIXGeneral"""'}), "('font', family='STIXGeneral')\n", (5745, 5775), True, 'import matplotlib.pyplot as plt\n'), ((5791, 5805), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5803, 5805), True, 'import matplotlib.pyplot as plt\n'), ((5981, 6010), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick"""'], {'labelsize': '(16)'}), "('xtick', labelsize=16)\n", (5987, 6010), True, 'import matplotlib.pyplot as plt\n'), ((6015, 6044), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick"""'], {'labelsize': '(16)'}), "('ytick', labelsize=16)\n", (6021, 6044), True, 'import matplotlib.pyplot as plt\n'), ((6050, 6073), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(15)'}), "('font', size=15)\n", (6056, 6073), True, 'import matplotlib.pyplot as plt\n'), ((6079, 6115), 'matplotlib.pyplot.rc', 'plt.rc', (['"""figure"""'], {'figsize': '(7.2, 4.8)'}), "('figure', figsize=(7.2, 4.8))\n", (6085, 6115), True, 'import matplotlib.pyplot as plt\n'), ((6120, 6148), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'titlesize': '(16)'}), "('axes', titlesize=16)\n", (6126, 6148), True, 'import matplotlib.pyplot as plt\n'), ((6153, 6181), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'labelsize': '(16)'}), "('axes', labelsize=16)\n", (6159, 6181), True, 'import matplotlib.pyplot as plt\n'), ((6187, 6215), 'matplotlib.pyplot.rc', 'plt.rc', (['"""lines"""'], {'linewidth': '(2)'}), "('lines', linewidth=2)\n", (6193, 6215), True, 'import matplotlib.pyplot as plt\n'), ((6220, 6250), 'matplotlib.pyplot.rc', 'plt.rc', (['"""lines"""'], {'markersize': '(10)'}), "('lines', markersize=10)\n", (6226, 6250), True, 'import matplotlib.pyplot as plt\n'), ((6256, 6284), 'matplotlib.pyplot.rc', 'plt.rc', (['"""legend"""'], {'fontsize': '(8)'}), "('legend', fontsize=8)\n", (6262, 6284), True, 'import matplotlib.pyplot as plt\n'), ((6290, 6317), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (6296, 6317), True, 'import matplotlib.pyplot as plt\n'), ((6322, 6356), 'matplotlib.pyplot.rc', 'plt.rc', (['"""mathtext"""'], {'fontset': '"""stix"""'}), "('mathtext', fontset='stix')\n", (6328, 6356), True, 'import matplotlib.pyplot as plt\n'), ((6400, 6436), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""STIXGeneral"""'}), "('font', family='STIXGeneral')\n", (6406, 6436), True, 'import matplotlib.pyplot as plt\n'), ((7060, 7118), 'os.path.join', 'os.path.join', (['fpath_main', 'EOSname', 'fpath_fitting', 'EOSmodel'], {}), '(fpath_main, EOSname, fpath_fitting, EOSmodel)\n', (7072, 7118), False, 'import os\n'), ((7240, 7318), 'numpy.loadtxt', 'np.loadtxt', (['fullPathModel'], {'comments': '"""#"""', 'delimiter': '""" """', 'usecols': '(col_x, col_y)'}), "(fullPathModel, comments='#', delimiter=' ', usecols=(col_x, col_y))\n", (7250, 7318), True, 'import numpy as np\n'), ((8669, 8691), 'numpy.empty', 'np.empty', ([], {'shape': '(0, 2)'}), '(shape=(0, 2))\n', (8677, 8691), True, 'import numpy as np\n'), ((13163, 13191), 'numpy.linspace', 'np.linspace', (['(0.09)', '(0.35)', '(100)'], {}), '(0.09, 0.35, 100)\n', (13174, 13191), True, 'import numpy as np\n'), ((15916, 15954), 'os.path.join', 'os.path.join', (['fpath', 'EOSname', 'EOSmodel'], {}), '(fpath, EOSname, EOSmodel)\n', (15928, 15954), False, 'import os\n'), ((16076, 16154), 'numpy.loadtxt', 'np.loadtxt', (['fullPathModel'], {'comments': '"""#"""', 'delimiter': '""" """', 'usecols': '(col_x, col_y)'}), "(fullPathModel, comments='#', delimiter=' ', usecols=(col_x, col_y))\n", (16086, 16154), True, 'import numpy as np\n'), ((16736, 16825), 'itertools.product', 'itertools.product', (["config['eos_name']", "config['beta']", "config['m']", "config['lambda']"], {}), "(config['eos_name'], config['beta'], config['m'], config[\n 'lambda'])\n", (16753, 16825), False, 'import itertools\n'), ((19735, 19872), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('MvsR_GR.eps' if GRonly else 'MvsR_STT_GR.eps')"], {'format': '"""eps"""', 'dpi': '(600)', 'pad_inches': '(0)', 'bbox_inches': '"""tight"""', 'papertype': '"""a4"""'}), "('MvsR_GR.eps' if GRonly else 'MvsR_STT_GR.eps', format='eps',\n dpi=600, pad_inches=0, bbox_inches='tight', papertype='a4')\n", (19746, 19872), True, 'import matplotlib.pyplot as plt\n'), ((19930, 19940), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19938, 19940), True, 'import matplotlib.pyplot as plt\n'), ((22134, 22223), 'itertools.product', 'itertools.product', (["config['eos_name']", "config['beta']", "config['m']", "config['lambda']"], {}), "(config['eos_name'], config['beta'], config['m'], config[\n 'lambda'])\n", (22151, 22223), False, 'import itertools\n'), ((24076, 24110), 'mpl_toolkits.axes_grid1.inset_locator.zoomed_inset_axes', 'zoomed_inset_axes', (['ax_up', '(3)'], {'loc': '(8)'}), '(ax_up, 3, loc=8)\n', (24093, 24110), False, 'from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes\n'), ((24347, 24411), 'itertools.product', 'itertools.product', (["config['beta']", "config['m']", "config['lambda']"], {}), "(config['beta'], config['m'], config['lambda'])\n", (24364, 24411), False, 'import itertools\n'), ((27680, 27756), 'mpl_toolkits.axes_grid1.inset_locator.mark_inset', 'mark_inset', (['ax_up', 'ax_in'], {'loc1': '(2)', 'loc2': '(4)', 'fc': '"""black"""', 'ec': '"""black"""', 'zorder': '(110)'}), "(ax_up, ax_in, loc1=2, loc2=4, fc='black', ec='black', zorder=110)\n", (27690, 27756), False, 'from mpl_toolkits.axes_grid1.inset_locator import mark_inset\n'), ((27902, 28047), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""TildeI_all.eps"""'], {'format': '"""eps"""', 'dpi': '(600)', 'pad_inches': '(0)', 'bbox_inches': '"""tight"""', 'papertype': '"""a4"""', 'bbox_extra_artists': '(legend_markers,)'}), "('TildeI_all.eps', format='eps', dpi=600, pad_inches=0,\n bbox_inches='tight', papertype='a4', bbox_extra_artists=(legend_markers,))\n", (27913, 28047), True, 'import matplotlib.pyplot as plt\n'), ((28106, 28116), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (28114, 28116), True, 'import matplotlib.pyplot as plt\n'), ((28469, 28533), 'itertools.product', 'itertools.product', (["config['beta']", "config['m']", "config['lambda']"], {}), "(config['beta'], config['m'], config['lambda'])\n", (28486, 28533), False, 'import itertools\n'), ((31584, 31736), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""TildeI_lambda1e-1.eps"""'], {'format': '"""eps"""', 'dpi': '(600)', 'pad_inches': '(0)', 'bbox_inches': '"""tight"""', 'papertype': '"""a4"""', 'bbox_extra_artists': '(legend_markers,)'}), "('TildeI_lambda1e-1.eps', format='eps', dpi=600, pad_inches=0,\n bbox_inches='tight', papertype='a4', bbox_extra_artists=(legend_markers,))\n", (31595, 31736), True, 'import matplotlib.pyplot as plt\n'), ((31794, 31804), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (31802, 31804), True, 'import matplotlib.pyplot as plt\n'), ((31940, 31974), 'mpl_toolkits.axes_grid1.inset_locator.zoomed_inset_axes', 'zoomed_inset_axes', (['ax_up', '(5)'], {'loc': '(9)'}), '(ax_up, 5, loc=9)\n', (31957, 31974), False, 'from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes\n'), ((32211, 32275), 'itertools.product', 'itertools.product', (["config['beta']", "config['m']", "config['lambda']"], {}), "(config['beta'], config['m'], config['lambda'])\n", (32228, 32275), False, 'import itertools\n'), ((35569, 35645), 'mpl_toolkits.axes_grid1.inset_locator.mark_inset', 'mark_inset', (['ax_up', 'ax_in'], {'loc1': '(3)', 'loc2': '(4)', 'fc': '"""black"""', 'ec': '"""black"""', 'zorder': '(110)'}), "(ax_up, ax_in, loc1=3, loc2=4, fc='black', ec='black', zorder=110)\n", (35579, 35645), False, 'from mpl_toolkits.axes_grid1.inset_locator import mark_inset\n'), ((35791, 35934), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""BarI_all.eps"""'], {'format': '"""eps"""', 'dpi': '(600)', 'pad_inches': '(0)', 'bbox_inches': '"""tight"""', 'papertype': '"""a4"""', 'bbox_extra_artists': '(legend_markers,)'}), "('BarI_all.eps', format='eps', dpi=600, pad_inches=0,\n bbox_inches='tight', papertype='a4', bbox_extra_artists=(legend_markers,))\n", (35802, 35934), True, 'import matplotlib.pyplot as plt\n'), ((35993, 36003), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (36001, 36003), True, 'import matplotlib.pyplot as plt\n'), ((36354, 36418), 'itertools.product', 'itertools.product', (["config['beta']", "config['m']", "config['lambda']"], {}), "(config['beta'], config['m'], config['lambda'])\n", (36371, 36418), False, 'import itertools\n'), ((39463, 39613), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""BarI_lambda1e-1.eps"""'], {'format': '"""eps"""', 'dpi': '(600)', 'pad_inches': '(0)', 'bbox_inches': '"""tight"""', 'papertype': '"""a4"""', 'bbox_extra_artists': '(legend_markers,)'}), "('BarI_lambda1e-1.eps', format='eps', dpi=600, pad_inches=0,\n bbox_inches='tight', papertype='a4', bbox_extra_artists=(legend_markers,))\n", (39474, 39613), True, 'import matplotlib.pyplot as plt\n'), ((39671, 39681), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (39679, 39681), True, 'import matplotlib.pyplot as plt\n'), ((7950, 7998), 'numpy.polynomial.polynomial.polyfit', 'polyfit', ([], {'x': 'xp', 'y': 'yp', 'deg': '[1, 2, 3, 4]', 'full': '(True)'}), '(x=xp, y=yp, deg=[1, 2, 3, 4], full=True)\n', (7957, 7998), False, 'from numpy.polynomial.polynomial import polyfit\n'), ((9042, 9078), 'numpy.append', 'np.append', (['data_to_fit', 'data'], {'axis': '(0)'}), '(data_to_fit, data, axis=0)\n', (9051, 9078), True, 'import numpy as np\n'), ((10993, 11006), 'numpy.sum', 'np.sum', (['_data'], {}), '(_data)\n', (10999, 11006), True, 'import numpy as np\n'), ((11061, 11075), 'numpy.amax', 'np.amax', (['_data'], {}), '(_data)\n', (11068, 11075), True, 'import numpy as np\n'), ((21794, 21832), 'os.path.join', 'os.path.join', (['fpath', 'EOSname', 'EOSmodel'], {}), '(fpath, EOSname, EOSmodel)\n', (21806, 21832), False, 'import os\n'), ((21966, 22055), 'numpy.loadtxt', 'np.loadtxt', (['fullPathModel'], {'comments': '"""#"""', 'delimiter': '""" """', 'usecols': '(col_r, col_m, col_j)'}), "(fullPathModel, comments='#', delimiter=' ', usecols=(col_r,\n col_m, col_j))\n", (21976, 22055), True, 'import numpy as np\n'), ((22940, 22989), 'os.path.join', 'os.path.join', (["config['path']", 'model[0]', '"""Fitting"""'], {}), "(config['path'], model[0], 'Fitting')\n", (22952, 22989), False, 'import os\n'), ((39765, 40076), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""My personal result plotter"""', 'description': '"""\n Quick plotting tool to suit my needs. Basically it has limited regimes of\n work, each corresponding to different keyword and thus different\n configuration settings and output.\n """'}), '(prog=\'My personal result plotter\', description=\n """\n Quick plotting tool to suit my needs. Basically it has limited regimes of\n work, each corresponding to different keyword and thus different\n configuration settings and output.\n """\n )\n', (39788, 40076), False, 'import argparse\n'), ((43742, 43769), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (43767, 43769), False, 'import configparser\n'), ((3846, 3858), 'matplotlib.colors.to_rgb', 'mc.to_rgb', (['c'], {}), '(c)\n', (3855, 3858), True, 'import matplotlib.colors as mc\n'), ((11165, 11179), 'numpy.amax', 'np.amax', (['_data'], {}), '(_data)\n', (11172, 11179), True, 'import numpy as np\n'), ((23294, 23385), 'numpy.column_stack', 'np.column_stack', (['(data[:, 1] / data[:, 0], data[:, 2] / (data[:, 1] * data[:, 0] ** 2))'], {}), '((data[:, 1] / data[:, 0], data[:, 2] / (data[:, 1] * data[:,\n 0] ** 2)))\n', (23309, 23385), True, 'import numpy as np\n'), ((23739, 23811), 'numpy.column_stack', 'np.column_stack', (['(data[:, 1] / data[:, 0], data[:, 2] / data[:, 1] ** 3)'], {}), '((data[:, 1] / data[:, 0], data[:, 2] / data[:, 1] ** 3))\n', (23754, 23811), True, 'import numpy as np\n'), ((7671, 7682), 'numpy.sqrt', 'np.sqrt', (['yp'], {}), '(yp)\n', (7678, 7682), True, 'import numpy as np\n'), ((9229, 9255), 'random.uniform', 'random.uniform', (['(0.14)', '(0.18)'], {}), '(0.14, 0.18)\n', (9243, 9255), False, 'import random\n'), ((11145, 11159), 'numpy.amax', 'np.amax', (['_data'], {}), '(_data)\n', (11152, 11159), True, 'import numpy as np\n'), ((11397, 11423), 'random.uniform', 'random.uniform', (['(0.14)', '(0.18)'], {}), '(0.14, 0.18)\n', (11411, 11423), False, 'import random\n'), ((23021, 23048), 'pathlib.Path', 'pathlib.Path', (['fullModelPath'], {}), '(fullModelPath)\n', (23033, 23048), False, 'import pathlib\n'), ((22656, 22677), 'numpy.argmax', 'np.argmax', (['data[:, 1]'], {}), '(data[:, 1])\n', (22665, 22677), True, 'import numpy as np\n')] |
from __future__ import print_function
from retinanet.dataloader2 import UnNormalizer
import numpy as np
import json
import os
import matplotlib
import matplotlib.pyplot as plt
import torch
import cv2
import pandas as pd
def compute_overlap(a, b):
"""
Parameters
----------
a: (N, 4) ndarray of float
b: (K, 4) ndarray of float
Returns
-------
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
"""
area = (b[:, 2] - b[:, 0]) * (b[:, 3] - b[:, 1])
iw = np.minimum(np.expand_dims(a[:, 2], axis=1), b[:, 2]) - np.maximum(np.expand_dims(a[:, 0], 1), b[:, 0])
ih = np.minimum(np.expand_dims(a[:, 3], axis=1), b[:, 3]) - np.maximum(np.expand_dims(a[:, 1], 1), b[:, 1])
iw = np.maximum(iw, 0)
ih = np.maximum(ih, 0)
ua = np.expand_dims((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]), axis=1) + area - iw * ih
ua = np.maximum(ua, np.finfo(float).eps)
intersection = iw * ih
return intersection / ua
def compute_ignore_overlap(a,b):
"""
Parameters
----------
a: (N, 4) ndarray of float
b: (N, 4) ndarray of float
Returns:
--------
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
"""
iw = np.minimum(np.expand_dims(a[:, 2], axis=1), b[:, 2]) - np.maximum(np.expand_dims(a[:, 0], 1), b[:, 0])
ih = np.minimum(np.expand_dims(a[:, 3], axis=1), b[:, 3]) - np.maximum(np.expand_dims(a[:, 1], 1), b[:, 1])
iw = np.maximum(iw, 0)
ih = np.maximum(ih, 0)
area = np.expand_dims((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]), axis=1)
area = np.maximum(area, np.finfo(float).eps)
intersection = iw * ih
return intersection / area
def _compute_ap(recall, precision):
""" Compute the average precision, given the recall and precision curves.
Code originally from https://github.com/rbgirshick/py-faster-rcnn.
# Arguments
recall: The recall curve (list).
precision: The precision curve (list).
# Returns
The average precision as computed in py-faster-rcnn.
"""
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], recall, [1.]))
mpre = np.concatenate(([0.], precision, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def _get_detections(dataset, retinanet, C, score_threshold=0.05, max_detections=100, save_path=None):
""" Get the detections from the retinanet using the generator.
The result is a list of lists such that the size is:
all_detections[num_images][num_classes] = detections[num_detections, 4 + num_classes]
# Arguments
dataset : The generator used to run images through the retinanet.
retinanet : The retinanet to run on the images.
score_threshold : The score confidence threshold to use.
max_detections : The maximum number of detections to use per image.
save_path : The path to save the images with visualized detections to.
# Returns
A list of lists containing the detections for each image in the generator.
"""
all_detections = [[None for i in range(dataset.num_classes())] for j in range(len(dataset))]
unnormalize = UnNormalizer(C.channels_ind)
retinanet.eval()
#if save_path is not None:
# os.makedirs(save_path+C.ID+"/results/",exist_ok=True)
with torch.no_grad():
for index in range(len(dataset)):
data = dataset[index]
scale = data['scale']
# run network
if torch.cuda.is_available():
scores, labels, boxes = retinanet(data['img'].permute(2, 0, 1).cuda().float().unsqueeze(dim=0))
else:
scores, labels, boxes = retinanet(data['img'].permute(2, 0, 1).float().unsqueeze(dim=0))
scores = scores.cpu().numpy()
labels = labels.cpu().numpy()
boxes = boxes.cpu().numpy()
# correct boxes for image scale
boxes /= scale
# select indices which have a score above the threshold
indices = np.where(scores > score_threshold)[0]
if indices.shape[0] > 0:
# select those scores
scores = scores[indices]
# find the order with which to sort the scores
scores_sort = np.argsort(-scores)[:max_detections]
# select detections
image_boxes = boxes[indices[scores_sort], :]
image_scores = scores[scores_sort]
image_labels = labels[indices[scores_sort]]
image_detections = np.concatenate([image_boxes, np.expand_dims(image_scores, axis=1), np.expand_dims(image_labels, axis=1)], axis=1)
# copy detections to all_detections
for label in range(dataset.num_classes()):
all_detections[index][label] = image_detections[image_detections[:, -1] == label, :-1]
else:
# copy detections to all_detections
for label in range(dataset.num_classes()):
all_detections[index][label] = np.zeros((0, 5))
print('{}/{}'.format(index + 1, len(dataset)), end='\r')
"""
if save_path is not None:
output = (255*unnormalize(data['img'].numpy().copy())).astype(np.uint8)
if indices.shape[0]>0:
for box,score,label in zip(image_boxes,image_scores,image_labels):
x,y,h,w = box*scale
cv2.rectangle(output,(int(x),int(y)),(int(h),int(w)),(0,255,0),2)
cv2.putText(output, "{:.2f}".format(score), (int(x), int(y) - 10), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 0), 2)
cv2.putText(output, "{:.2f}".format(score), (int(x), int(y) - 10), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)
cv2.imwrite(save_path+"results/"+data['name']+'.png',output)
"""
return all_detections
def _get_annotations(generator):
""" Get the ground truth annotations from the generator.
The result is a list of lists such that the size is:
all_detections[num_images][num_classes] = annotations[num_detections, 5]
# Arguments
generator : The generator used to retrieve ground truth annotations.
# Returns
A list of lists containing the annotations for each image in the generator.
"""
all_annotations = [[None for i in range(generator.num_classes())] for j in range(len(generator))]
for i in range(len(generator)):
# load the annotations
annotations = generator.load_annotations(i)
# copy detections to all_annotations
for label in range(generator.num_classes()):
all_annotations[i][label] = annotations[annotations[:, 4] == label, :4].copy()
print('{}/{}'.format(i + 1, len(generator)), end='\r')
return all_annotations
def evaluate(
generator,
retinanet,
C, # config
channel_cut=[],
iou_threshold=0.5,
score_threshold=0.05,
max_detections=100,
save_path=None,
ignore_class=False
):
""" Evaluate a given dataset using a given retinanet.
# Arguments
generator : The generator that represents the dataset to evaluate.
retinanet : The retinanet to evaluate.
iou_threshold : The threshold used to consider when a detection is positive or negative.
score_threshold : The score confidence threshold to use for detections.
max_detections : The maximum number of detections to use per image.
save_path : The path to save precision recall curve of each label.
# Returns
A dict mapping class names to mAP scores.
"""
# gather all detections and annotations
all_detections = _get_detections(generator, retinanet, C, score_threshold=score_threshold, max_detections=max_detections, save_path=save_path)
all_annotations = _get_annotations(generator)
#print("Nb de piétons annotés : {}".format(sum(all_annotations[i][0].shape[0] for i in range(len(generator)))))
#print("Nb de piétons detectés : {}".format(sum(all_detections[i][0].shape[0] for i in range(len(generator)))))
total_instances = []
average_precisions = {}
recalls = {}
precisions = {}
TPs = {}
FPs = {}
nb_classes = generator.num_classes()-1
ignore_index = nb_classes
for label in range(nb_classes):
false_positives = np.zeros((0,))
true_positives = np.zeros((0,))
scores = np.zeros((0,))
num_annotations = 0.0
for i in range(len(generator)):
detections = all_detections[i][label]
annotations = all_annotations[i][label]
num_annotations += annotations.shape[0]
detected_annotations = []
ignore_annotations = all_annotations[i][ignore_index]
for d in detections:
if not ignore_class:
scores = np.append(scores,d[4])
# Pas de classe à ignorer
if annotations.shape[0] == 0:
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
continue
overlaps = compute_overlap(np.expand_dims(d, axis=0), annotations)
assigned_annotation = np.argmax(overlaps, axis=1)
max_overlap = overlaps[0, assigned_annotation]
if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:
false_positives = np.append(false_positives, 0)
true_positives = np.append(true_positives, 1)
detected_annotations.append(assigned_annotation)
else:
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
else:
# Classe à ignorer
# Calcul overlap detection avec la classe à détecter
if annotations.shape[0]!=0:
overlaps = compute_overlap(np.expand_dims(d, axis=0), annotations)
assigned_annotation = np.argmax(overlaps, axis=1)
max_overlap = overlaps[0, assigned_annotation]
else:
assigned_annotation = None
max_overlap = 0
# Calcul overlap detection avec ignore regions
if ignore_annotations.shape[0]!=0:
overlaps_ignore = compute_ignore_overlap(np.expand_dims(d, axis=0), ignore_annotations)
assigned_ignore_annotation = np.argmax(overlaps_ignore, axis=1)
max_ignore_overlap = overlaps_ignore[0, assigned_ignore_annotation]
else:
max_ignore_overlap = 0
if max_overlap>= iou_threshold:
# Détection proche d'une annotation classe
scores = np.append(scores,d[4])
if assigned_annotation not in detected_annotations:
# Bonne détection pas déjà détectée => True Positive
false_positives = np.append(false_positives, 0)
true_positives = np.append(true_positives, 1)
detected_annotations.append(assigned_annotation)
else:
# Annotation déjà détectée => False Positive
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
elif max_ignore_overlap >= 0.75:
# Détection dans une région à ignorer
continue
else:
# Aucune annotation => False Positive
scores = np.append(scores,d[4])
false_positives = np.append(false_positives ,1)
true_positives = np.append(true_positives, 0)
"""
if max_overlap>=max_ignore_overlap:
scores = np.append(scores,d[4])
# La détection est plus proche d'une annotation classe qu'une région à ignorer OU il n'y a aucune annotation
if assigned_annotation == None:
# Aucune annotation => Faux positif
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
elif max_overlap>= iou_threshold and assigned_annotation not in detected_annotations:
# Bonne détection pas déjà détectée => True Positif
false_positives = np.append(false_positives, 0)
true_positives = np.append(true_positives, 1)
detected_annotations.append(assigned_annotation)
else:
# Détection lointaine ou annotation déjà détectée => False positive
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
else:
# La détection est plus proche d'une région à ignorer qu'une annotation classe
if max_ignore_overlap >= iou_threshold:
# La détection est très proche d'une région à ignorer : On ignore la détection
continue
else:
scores = np.append(scores,d[4])
# La détection est loin d'une région à ignorer : On compte la détection comme un faux positif
false_positives = np.append(false_positives ,1)
true_positives = np.append(true_positives, 0)
"""
total_instances.append(num_annotations)
# no annotations -> AP for this class is 0 (is this correct?)
# no detections -> AP for this class is 0
if num_annotations == 0 or len(scores)==0:
average_precisions[label] = 0, num_annotations
precisions[label] = [0.0]
recalls[label] = [0.0]
TPs[label] = np.array([0])
FPs[label] = np.array([0])
continue
#print("Num annotations : {}".format(num_annotations))
# sort by score
indices = np.argsort(-scores)
false_positives = false_positives[indices]
true_positives = true_positives[indices]
#print("len FP : {}".format(len(false_positives)))
# compute false positives and true positives
false_positives = np.cumsum(false_positives)
true_positives = np.cumsum(true_positives)
#print("TP max : {}".format(true_positives.max()))
#print("FP max : {}".format(false_positives.max()))
# compute recall and precision
recall = true_positives / num_annotations
precision = true_positives / np.maximum(true_positives + false_positives, np.finfo(np.float64).eps)
# compute average precision
average_precision = _compute_ap(recall, precision)
average_precisions[label] = average_precision, num_annotations
recalls[label] = recall
precisions[label] = precision
TPs[label] = true_positives
FPs[label] = false_positives
# TMP
all_precisions = []
ID = C.ID
os.makedirs(save_path+C.ID+'//',exist_ok=True)
print('\nmAP:')
for label in range(nb_classes):
label_name = generator.label_to_name(label)
print("---------")
print('{}: {}'.format(label_name, average_precisions[label][0]))
print("Precision: ",precisions[label][-1])
print("Recall: ",recalls[label][-1])
print("Num annotations: {}".format(average_precisions[label][1]))
print("Num detections: {}".format(len(TPs[label])))
print("TP: {}".format(TPs[label].max()))
print("FP: {}\n".format(FPs[label].max()))
all_precisions.append(average_precisions[label][0])
if save_path!=None:
matplotlib.use('Agg')
plt.figure()
plt.xlim((0,1.1))
plt.ylim((0,1))
plt.plot(recalls[label],precisions[label])
# naming the x axis
plt.xlabel('Recall')
# naming the y axis
plt.ylabel('Precision')
# giving a title to my graph
plt.title('Precision Recall curve')
# function to show the plot
plt.savefig(save_path+C.ID+'//'+label_name+'_precision_recall.jpg')
print("Debug : ")
print("All precisions : {}".format(all_precisions))
print("Total instances : {}".format(total_instances))
mAPs = pd.DataFrame([['{:.4f}'.format(ap[0]) for ap in average_precisions.values()]+['{:.4f}'.format(sum(all_precisions) / sum(x > 0 for x in total_instances))]],columns = [generator.label_to_name(ap) for ap in average_precisions.keys()]+['mAP'])
mAPs.to_csv(save_path+C.ID+'//'+ID+'.csv',sep='\t')
precision_recall_ped = pd.DataFrame([precisions[0],recalls[0]])
precision_recall_ped.to_csv(save_path+C.ID+'//precision_recall.csv',sep='\t')
return average_precisions
| [
"matplotlib.pyplot.title",
"numpy.sum",
"numpy.maximum",
"numpy.argmax",
"numpy.argsort",
"matplotlib.pyplot.figure",
"torch.no_grad",
"pandas.DataFrame",
"numpy.cumsum",
"numpy.finfo",
"numpy.append",
"matplotlib.pyplot.ylim",
"retinanet.dataloader2.UnNormalizer",
"matplotlib.use",
"tor... | [((738, 755), 'numpy.maximum', 'np.maximum', (['iw', '(0)'], {}), '(iw, 0)\n', (748, 755), True, 'import numpy as np\n'), ((765, 782), 'numpy.maximum', 'np.maximum', (['ih', '(0)'], {}), '(ih, 0)\n', (775, 782), True, 'import numpy as np\n'), ((1454, 1471), 'numpy.maximum', 'np.maximum', (['iw', '(0)'], {}), '(iw, 0)\n', (1464, 1471), True, 'import numpy as np\n'), ((1481, 1498), 'numpy.maximum', 'np.maximum', (['ih', '(0)'], {}), '(ih, 0)\n', (1491, 1498), True, 'import numpy as np\n'), ((1511, 1576), 'numpy.expand_dims', 'np.expand_dims', (['((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]))'], {'axis': '(1)'}), '((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]), axis=1)\n', (1525, 1576), True, 'import numpy as np\n'), ((2150, 2188), 'numpy.concatenate', 'np.concatenate', (['([0.0], recall, [1.0])'], {}), '(([0.0], recall, [1.0]))\n', (2164, 2188), True, 'import numpy as np\n'), ((2198, 2239), 'numpy.concatenate', 'np.concatenate', (['([0.0], precision, [0.0])'], {}), '(([0.0], precision, [0.0]))\n', (2212, 2239), True, 'import numpy as np\n'), ((2562, 2607), 'numpy.sum', 'np.sum', (['((mrec[i + 1] - mrec[i]) * mpre[i + 1])'], {}), '((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n', (2568, 2607), True, 'import numpy as np\n'), ((3555, 3583), 'retinanet.dataloader2.UnNormalizer', 'UnNormalizer', (['C.channels_ind'], {}), '(C.channels_ind)\n', (3567, 3583), False, 'from retinanet.dataloader2 import UnNormalizer\n'), ((16325, 16376), 'os.makedirs', 'os.makedirs', (["(save_path + C.ID + '//')"], {'exist_ok': '(True)'}), "(save_path + C.ID + '//', exist_ok=True)\n", (16336, 16376), False, 'import os\n'), ((17993, 18034), 'pandas.DataFrame', 'pd.DataFrame', (['[precisions[0], recalls[0]]'], {}), '([precisions[0], recalls[0]])\n', (18005, 18034), True, 'import pandas as pd\n'), ((2340, 2372), 'numpy.maximum', 'np.maximum', (['mpre[i - 1]', 'mpre[i]'], {}), '(mpre[i - 1], mpre[i])\n', (2350, 2372), True, 'import numpy as np\n'), ((2480, 2511), 'numpy.where', 'np.where', (['(mrec[1:] != mrec[:-1])'], {}), '(mrec[1:] != mrec[:-1])\n', (2488, 2511), True, 'import numpy as np\n'), ((3708, 3723), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3721, 3723), False, 'import torch\n'), ((8862, 8876), 'numpy.zeros', 'np.zeros', (['(0,)'], {}), '((0,))\n', (8870, 8876), True, 'import numpy as np\n'), ((8903, 8917), 'numpy.zeros', 'np.zeros', (['(0,)'], {}), '((0,))\n', (8911, 8917), True, 'import numpy as np\n'), ((8944, 8958), 'numpy.zeros', 'np.zeros', (['(0,)'], {}), '((0,))\n', (8952, 8958), True, 'import numpy as np\n'), ((15287, 15306), 'numpy.argsort', 'np.argsort', (['(-scores)'], {}), '(-scores)\n', (15297, 15306), True, 'import numpy as np\n'), ((15564, 15590), 'numpy.cumsum', 'np.cumsum', (['false_positives'], {}), '(false_positives)\n', (15573, 15590), True, 'import numpy as np\n'), ((15617, 15642), 'numpy.cumsum', 'np.cumsum', (['true_positives'], {}), '(true_positives)\n', (15626, 15642), True, 'import numpy as np\n'), ((524, 555), 'numpy.expand_dims', 'np.expand_dims', (['a[:, 2]'], {'axis': '(1)'}), '(a[:, 2], axis=1)\n', (538, 555), True, 'import numpy as np\n'), ((579, 605), 'numpy.expand_dims', 'np.expand_dims', (['a[:, 0]', '(1)'], {}), '(a[:, 0], 1)\n', (593, 605), True, 'import numpy as np\n'), ((636, 667), 'numpy.expand_dims', 'np.expand_dims', (['a[:, 3]'], {'axis': '(1)'}), '(a[:, 3], axis=1)\n', (650, 667), True, 'import numpy as np\n'), ((691, 717), 'numpy.expand_dims', 'np.expand_dims', (['a[:, 1]', '(1)'], {}), '(a[:, 1], 1)\n', (705, 717), True, 'import numpy as np\n'), ((793, 858), 'numpy.expand_dims', 'np.expand_dims', (['((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]))'], {'axis': '(1)'}), '((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]), axis=1)\n', (807, 858), True, 'import numpy as np\n'), ((901, 916), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (909, 916), True, 'import numpy as np\n'), ((1240, 1271), 'numpy.expand_dims', 'np.expand_dims', (['a[:, 2]'], {'axis': '(1)'}), '(a[:, 2], axis=1)\n', (1254, 1271), True, 'import numpy as np\n'), ((1295, 1321), 'numpy.expand_dims', 'np.expand_dims', (['a[:, 0]', '(1)'], {}), '(a[:, 0], 1)\n', (1309, 1321), True, 'import numpy as np\n'), ((1352, 1383), 'numpy.expand_dims', 'np.expand_dims', (['a[:, 3]'], {'axis': '(1)'}), '(a[:, 3], axis=1)\n', (1366, 1383), True, 'import numpy as np\n'), ((1407, 1433), 'numpy.expand_dims', 'np.expand_dims', (['a[:, 1]', '(1)'], {}), '(a[:, 1], 1)\n', (1421, 1433), True, 'import numpy as np\n'), ((1606, 1621), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (1614, 1621), True, 'import numpy as np\n'), ((3878, 3903), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3901, 3903), False, 'import torch\n'), ((15100, 15113), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (15108, 15113), True, 'import numpy as np\n'), ((15139, 15152), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (15147, 15152), True, 'import numpy as np\n'), ((17012, 17033), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (17026, 17033), False, 'import matplotlib\n'), ((17046, 17058), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (17056, 17058), True, 'import matplotlib.pyplot as plt\n'), ((17071, 17089), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0, 1.1)'], {}), '((0, 1.1))\n', (17079, 17089), True, 'import matplotlib.pyplot as plt\n'), ((17101, 17117), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 1)'], {}), '((0, 1))\n', (17109, 17117), True, 'import matplotlib.pyplot as plt\n'), ((17129, 17172), 'matplotlib.pyplot.plot', 'plt.plot', (['recalls[label]', 'precisions[label]'], {}), '(recalls[label], precisions[label])\n', (17137, 17172), True, 'import matplotlib.pyplot as plt\n'), ((17217, 17237), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Recall"""'], {}), "('Recall')\n", (17227, 17237), True, 'import matplotlib.pyplot as plt\n'), ((17284, 17307), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Precision"""'], {}), "('Precision')\n", (17294, 17307), True, 'import matplotlib.pyplot as plt\n'), ((17364, 17399), 'matplotlib.pyplot.title', 'plt.title', (['"""Precision Recall curve"""'], {}), "('Precision Recall curve')\n", (17373, 17399), True, 'import matplotlib.pyplot as plt\n'), ((17454, 17529), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(save_path + C.ID + '//' + label_name + '_precision_recall.jpg')"], {}), "(save_path + C.ID + '//' + label_name + '_precision_recall.jpg')\n", (17465, 17529), True, 'import matplotlib.pyplot as plt\n'), ((4428, 4462), 'numpy.where', 'np.where', (['(scores > score_threshold)'], {}), '(scores > score_threshold)\n', (4436, 4462), True, 'import numpy as np\n'), ((4676, 4695), 'numpy.argsort', 'np.argsort', (['(-scores)'], {}), '(-scores)\n', (4686, 4695), True, 'import numpy as np\n'), ((5483, 5499), 'numpy.zeros', 'np.zeros', (['(0, 5)'], {}), '((0, 5))\n', (5491, 5499), True, 'import numpy as np\n'), ((9411, 9434), 'numpy.append', 'np.append', (['scores', 'd[4]'], {}), '(scores, d[4])\n', (9420, 9434), True, 'import numpy as np\n'), ((9847, 9874), 'numpy.argmax', 'np.argmax', (['overlaps'], {'axis': '(1)'}), '(overlaps, axis=1)\n', (9856, 9874), True, 'import numpy as np\n'), ((15936, 15956), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (15944, 15956), True, 'import numpy as np\n'), ((4999, 5035), 'numpy.expand_dims', 'np.expand_dims', (['image_scores'], {'axis': '(1)'}), '(image_scores, axis=1)\n', (5013, 5035), True, 'import numpy as np\n'), ((5037, 5073), 'numpy.expand_dims', 'np.expand_dims', (['image_labels'], {'axis': '(1)'}), '(image_labels, axis=1)\n', (5051, 5073), True, 'import numpy as np\n'), ((9572, 9601), 'numpy.append', 'np.append', (['false_positives', '(1)'], {}), '(false_positives, 1)\n', (9581, 9601), True, 'import numpy as np\n'), ((9644, 9672), 'numpy.append', 'np.append', (['true_positives', '(0)'], {}), '(true_positives, 0)\n', (9653, 9672), True, 'import numpy as np\n'), ((9765, 9790), 'numpy.expand_dims', 'np.expand_dims', (['d'], {'axis': '(0)'}), '(d, axis=0)\n', (9779, 9790), True, 'import numpy as np\n'), ((10098, 10127), 'numpy.append', 'np.append', (['false_positives', '(0)'], {}), '(false_positives, 0)\n', (10107, 10127), True, 'import numpy as np\n'), ((10170, 10198), 'numpy.append', 'np.append', (['true_positives', '(1)'], {}), '(true_positives, 1)\n', (10179, 10198), True, 'import numpy as np\n'), ((10340, 10369), 'numpy.append', 'np.append', (['false_positives', '(1)'], {}), '(false_positives, 1)\n', (10349, 10369), True, 'import numpy as np\n'), ((10412, 10440), 'numpy.append', 'np.append', (['true_positives', '(0)'], {}), '(true_positives, 0)\n', (10421, 10440), True, 'import numpy as np\n'), ((10772, 10799), 'numpy.argmax', 'np.argmax', (['overlaps'], {'axis': '(1)'}), '(overlaps, axis=1)\n', (10781, 10799), True, 'import numpy as np\n'), ((11296, 11330), 'numpy.argmax', 'np.argmax', (['overlaps_ignore'], {'axis': '(1)'}), '(overlaps_ignore, axis=1)\n', (11305, 11330), True, 'import numpy as np\n'), ((11673, 11696), 'numpy.append', 'np.append', (['scores', 'd[4]'], {}), '(scores, d[4])\n', (11682, 11696), True, 'import numpy as np\n'), ((10686, 10711), 'numpy.expand_dims', 'np.expand_dims', (['d'], {'axis': '(0)'}), '(d, axis=0)\n', (10700, 10711), True, 'import numpy as np\n'), ((11196, 11221), 'numpy.expand_dims', 'np.expand_dims', (['d'], {'axis': '(0)'}), '(d, axis=0)\n', (11210, 11221), True, 'import numpy as np\n'), ((11899, 11928), 'numpy.append', 'np.append', (['false_positives', '(0)'], {}), '(false_positives, 0)\n', (11908, 11928), True, 'import numpy as np\n'), ((11974, 12002), 'numpy.append', 'np.append', (['true_positives', '(1)'], {}), '(true_positives, 1)\n', (11983, 12002), True, 'import numpy as np\n'), ((12229, 12258), 'numpy.append', 'np.append', (['false_positives', '(1)'], {}), '(false_positives, 1)\n', (12238, 12258), True, 'import numpy as np\n'), ((12304, 12332), 'numpy.append', 'np.append', (['true_positives', '(0)'], {}), '(true_positives, 0)\n', (12313, 12332), True, 'import numpy as np\n'), ((12602, 12625), 'numpy.append', 'np.append', (['scores', 'd[4]'], {}), '(scores, d[4])\n', (12611, 12625), True, 'import numpy as np\n'), ((12667, 12696), 'numpy.append', 'np.append', (['false_positives', '(1)'], {}), '(false_positives, 1)\n', (12676, 12696), True, 'import numpy as np\n'), ((12738, 12766), 'numpy.append', 'np.append', (['true_positives', '(0)'], {}), '(true_positives, 0)\n', (12747, 12766), True, 'import numpy as np\n')] |
from operator import matmul
from matplotlib import image
import numpy as np
import cv2 # OpenCV
import math
from matplotlib import pyplot as plt
import os
from ..utils.process_text_file import ProcessTextFile
class Image:
def __init__(self):
self.gs = []
def load_image(self, image_fname, display=False):
color_image = cv2.imread(image_fname)
self.gs = cv2.cvtColor(color_image, cv2.COLOR_BGR2GRAY)
if display:
Image.imshow(self.gs)
return
def imresize(self, scale):
self.gs = cv2.resize(self.gs, dsize=(int(scale * self.gs.shape[1]), int(scale * self.gs.shape[0])), interpolation=cv2.INTER_CUBIC)
return
def display_image(self):
plt.imshow(self.gs)
plt.xticks([]), plt.yticks([]) # to hide tick values on X and Y axis
plt.show(block=False)
return
@staticmethod
def imagesc(img):
img_norm = (img - img.min()) / (img.max() - img.min() + 1e-5)
Image.imshow(img_norm)
return
@staticmethod
def normalize(img):
return (img - img.min()) / (img.max() - img.min() + 1e-5)
@staticmethod
def imshow(img):
plt.imshow(img)
plt.xticks([]), plt.yticks([]) # to hide tick values on X and Y axis
plt.show(block=False)
return
@staticmethod
def meshgrid(x_dim, y_dim):
mesh_x, mesh_y = np.meshgrid(np.linspace(0, x_dim-1, x_dim), np.linspace(0, y_dim-1, y_dim))
return np.dstack((mesh_x, mesh_y, np.ones((mesh_x.shape)))) | [
"matplotlib.pyplot.show",
"cv2.cvtColor",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.yticks",
"numpy.ones",
"cv2.imread",
"numpy.linspace",
"matplotlib.pyplot.xticks"
] | [((346, 369), 'cv2.imread', 'cv2.imread', (['image_fname'], {}), '(image_fname)\n', (356, 369), False, 'import cv2\n'), ((388, 433), 'cv2.cvtColor', 'cv2.cvtColor', (['color_image', 'cv2.COLOR_BGR2GRAY'], {}), '(color_image, cv2.COLOR_BGR2GRAY)\n', (400, 433), False, 'import cv2\n'), ((731, 750), 'matplotlib.pyplot.imshow', 'plt.imshow', (['self.gs'], {}), '(self.gs)\n', (741, 750), True, 'from matplotlib import pyplot as plt\n'), ((837, 858), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (845, 858), True, 'from matplotlib import pyplot as plt\n'), ((1194, 1209), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (1204, 1209), True, 'from matplotlib import pyplot as plt\n'), ((1296, 1317), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (1304, 1317), True, 'from matplotlib import pyplot as plt\n'), ((759, 773), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (769, 773), True, 'from matplotlib import pyplot as plt\n'), ((775, 789), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (785, 789), True, 'from matplotlib import pyplot as plt\n'), ((1218, 1232), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (1228, 1232), True, 'from matplotlib import pyplot as plt\n'), ((1234, 1248), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (1244, 1248), True, 'from matplotlib import pyplot as plt\n'), ((1422, 1454), 'numpy.linspace', 'np.linspace', (['(0)', '(x_dim - 1)', 'x_dim'], {}), '(0, x_dim - 1, x_dim)\n', (1433, 1454), True, 'import numpy as np\n'), ((1454, 1486), 'numpy.linspace', 'np.linspace', (['(0)', '(y_dim - 1)', 'y_dim'], {}), '(0, y_dim - 1, y_dim)\n', (1465, 1486), True, 'import numpy as np\n'), ((1529, 1550), 'numpy.ones', 'np.ones', (['mesh_x.shape'], {}), '(mesh_x.shape)\n', (1536, 1550), True, 'import numpy as np\n')] |
# Copyright 2019 AUI, Inc. Washington DC, USA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################
def timeaverage(xds, width=1, timespan='state', timebin=None):
"""
Average data across time axis
Parameters
----------
xds : xarray.core.dataset.Dataset
input Visibility Dataset
width : int
number of adjacent times to average (fast), used when timebin is None. Default=1 (no change)
timespan : str
Span of the timebin. Allowed values are 'none', 'scan', 'state' or 'both'. Default is 'state' (meaning all states in a scan)
timebin (future) : float
time bin width to averaging (in seconds) (slow - requires interpolation). Default None uses width parameter
Returns
-------
xarray.core.dataset.Dataset
New Visibility Dataset
"""
import xarray
import numpy as np
# save names of coordinates, then reset them all to variables
coords = [cc for cc in list(xds.coords) if cc not in xds.dims]
xds = xds.reset_coords()
# find all variables with time dimension (vwtd)
vwtds = [dv for dv in xds.data_vars if 'time' in xds[dv].dims]
# find all remaining coordinates and variables without a time dimension
remaining = [dv for dv in list(xds.data_vars) + list(xds.coords) if 'time' not in xds[dv].dims]
# create list of single-span datasets from this parent dataset
ssds = []
if timespan == 'none':
scans = [xds.where(xds.scan == ss, drop=True) for ss in np.unique(xds.scan.values)]
for scan in scans:
ssds += [scan.where(scan.state == ss, drop=True) for ss in np.unique(scan.state.values)]
elif timespan == 'scan': # span across scans by separating out states
ssds = [xds.where(xds.state == ss, drop=True) for ss in np.unique(xds.state.values)]
elif timespan == 'state': # span across state by separating out scans
ssds = [xds.where(xds.scan == ss, drop=True) for ss in np.unique(xds.scan.values)]
else: # span across both
ssds = [xds]
# loop over each single-span dataset and average within that span
# build up a list of new averaged single span datasets
# only time-dependent variables are included here, non-time variables will be added back later
ndss = [] # list of new datasets
for ss in ssds:
xdas = {}
for dv in ss.data_vars:
xda = ss.data_vars[dv].astype(xds[dv].dtype)
# apply time averaging to compatible variables
if (dv in vwtds) and (xda.dtype.type != np.str_):
if (dv == 'DATA') and ('SIGMA_SPECTRUM' in ss.data_vars):
xda = (ss.DATA / ss.SIGMA_SPECTRUM**2).coarsen(time=width, boundary='trim').sum()
xdas[dv] = xda * (ss.SIGMA_SPECTRUM**2).coarsen(time=width, boundary='trim').sum()
elif (dv == 'CORRECTED_DATA') and ('WEIGHT_SPECTRUM' in ss.data_vars):
xda = (ss.CORRECTED_DATA * ss.WEIGHT_SPECTRUM).coarsen(time=width, boundary='trim').sum()
xdas[dv] = xda / ss.WEIGHT_SPECTRUM.coarsen(time=width, boundary='trim').sum()
elif (dv == 'DATA') and ('SIGMA' in ss.data_vars):
xda = (ss.DATA / ss.SIGMA**2).coarsen(time=width, boundary='trim').sum()
xdas[dv] = xda * (ss.SIGMA**2).coarsen(time=width, boundary='trim').sum()
elif (dv == 'CORRECTED_DATA') and ('WEIGHT' in ss.data_vars):
xda = (ss.CORRECTED_DATA * ss.WEIGHT).coarsen(time=width, boundary='trim').sum()
xdas[dv] = xda / ss.WEIGHT.coarsen(time=width, boundary='trim').sum()
else:
xdas[dv] = (xda.coarsen(time=width, boundary='trim').sum() / width).astype(ss.data_vars[dv].dtype)
# decimate variables with string types
elif dv in vwtds:
xdas[dv] = xda.thin(width)
ndss += [xarray.Dataset(xdas)]
# concatenate back to a single dataset of all scans/states
# then merge with a dataset of non-time dependent variables
new_xds = xarray.concat(ndss, dim='time', coords='all')
new_xds = xarray.merge([new_xds, xds[remaining]])
new_xds = new_xds.assign_attrs(xds.attrs)
new_xds = new_xds.set_coords(coords)
return new_xds
| [
"xarray.merge",
"numpy.unique",
"xarray.Dataset",
"xarray.concat"
] | [((4658, 4703), 'xarray.concat', 'xarray.concat', (['ndss'], {'dim': '"""time"""', 'coords': '"""all"""'}), "(ndss, dim='time', coords='all')\n", (4671, 4703), False, 'import xarray\n'), ((4718, 4757), 'xarray.merge', 'xarray.merge', (['[new_xds, xds[remaining]]'], {}), '([new_xds, xds[remaining]])\n', (4730, 4757), False, 'import xarray\n'), ((4494, 4514), 'xarray.Dataset', 'xarray.Dataset', (['xdas'], {}), '(xdas)\n', (4508, 4514), False, 'import xarray\n'), ((2055, 2081), 'numpy.unique', 'np.unique', (['xds.scan.values'], {}), '(xds.scan.values)\n', (2064, 2081), True, 'import numpy as np\n'), ((2181, 2209), 'numpy.unique', 'np.unique', (['scan.state.values'], {}), '(scan.state.values)\n', (2190, 2209), True, 'import numpy as np\n'), ((2350, 2377), 'numpy.unique', 'np.unique', (['xds.state.values'], {}), '(xds.state.values)\n', (2359, 2377), True, 'import numpy as np\n'), ((2517, 2543), 'numpy.unique', 'np.unique', (['xds.scan.values'], {}), '(xds.scan.values)\n', (2526, 2543), True, 'import numpy as np\n')] |
import numpy as np
from uq360.utils.batch_features.histogram_feature import SingleHistogramFeature
from uq360.utils.transformers.confidence_delta import ConfidenceDeltaTransformer
from uq360.utils.transformers.confidence_top import ConfidenceTopTransformer
from uq360.utils.transformers.confidence_entropy import ConfidenceEntropyTransformer
from uq360.utils.transformers.class_frequency import ClassFrequencyTransformer
class BasicPointwiseHistogramDistance(SingleHistogramFeature):
def __init__(self, bins=10):
super().__init__(bins)
self.fit_status = True
def set_pointwise_transformer(self, pointwise_transformer):
pass
# Top Confidence feature
class BatchConfidenceTop(BasicPointwiseHistogramDistance):
def __init__(self):
super().__init__()
self.set_transformer('confidence_top', ConfidenceTopTransformer())
@classmethod
def name(cls):
return ('confidence_top_distance')
# Construct a single histogram
def extract_histogram(self, vec):
bins = np.concatenate([np.linspace(0,0.9,num=10), np.linspace(0.91,1.0,num=10)])
self.histogram_edges = bins
hist , _ = np.histogram(vec, bins=bins)
hist = np.divide(hist, float(len(vec)))
return hist
# Confidence Delta feature
class BatchConfidenceDelta(BasicPointwiseHistogramDistance):
def __init__(self):
super().__init__()
self.set_transformer('confidence_delta', ConfidenceDeltaTransformer())
@classmethod
def name(cls):
return ('confidence_delta_distance')
# Confidence Entropy feature
class BatchConfidenceEntropy(BasicPointwiseHistogramDistance):
def __init__(self):
super().__init__()
self.set_transformer('confidence_entropy', ConfidenceEntropyTransformer())
self.changed_histogram = None
@classmethod
def name(cls):
return ('confidence_entropy_distance')
# Construct a single histogram
def extract_histogram(self, vec):
epsilon = 0.001
bins = np.concatenate([np.linspace(0,0.1,num=11), np.linspace(0.2,3.0,num=29)])
# Safety check in case your histogram misses.
too_high = np.mean([vec >= max(bins)])
too_low = np.mean([vec <= min(bins)])
if too_high > 0.5 or too_low > 0.5:
if self.changed_histogram != 'false':
# Don't change prod if test wasn't changed
bins = np.linspace(min(vec) - epsilon, max(vec)+epsilon, num=25)
print("Fixing too high, new histogram is ", bins)
else:
self.changed_histogram = 'false'
self.histogram_edges = bins
hist , _ = np.histogram(vec, bins=bins)
hist = np.divide(hist, float(len(vec)))
return hist
# Predicted class frequency ratio
class BatchClassFrequency(BasicPointwiseHistogramDistance):
def __init__(self):
super().__init__()
self.set_transformer('class_frequency', ClassFrequencyTransformer())
self.fit_status = False
@classmethod
def name(cls):
return ('class_frequency_distance')
def fit(self, x, y):
if self.fit_status:
return
else:
self.pointwise_transformer.fit(x,y)
self.fit_status = True
# Construct a single histogram
def extract_histogram(self, vec):
freq = self.pointwise_transformer.class_frequencies
ordered_freq = sorted(freq)
# Left edge, edges between each pair of frequencies, and right edge
bins = [ordered_freq[0] - 1]
lf = len(freq)-1
for i in range(lf):
bins.append(0.5*(ordered_freq[i]+ordered_freq[i+1]))
bins.append(ordered_freq[-1] + 1)
self.histogram_edges = bins
hist , _ = np.histogram(vec, bins=bins, density=False)
hist = np.divide(hist, float(len(vec)))
return hist
| [
"uq360.utils.transformers.confidence_delta.ConfidenceDeltaTransformer",
"numpy.histogram",
"numpy.linspace",
"uq360.utils.transformers.class_frequency.ClassFrequencyTransformer",
"uq360.utils.transformers.confidence_top.ConfidenceTopTransformer",
"uq360.utils.transformers.confidence_entropy.ConfidenceEntr... | [((1175, 1203), 'numpy.histogram', 'np.histogram', (['vec'], {'bins': 'bins'}), '(vec, bins=bins)\n', (1187, 1203), True, 'import numpy as np\n'), ((2672, 2700), 'numpy.histogram', 'np.histogram', (['vec'], {'bins': 'bins'}), '(vec, bins=bins)\n', (2684, 2700), True, 'import numpy as np\n'), ((3775, 3818), 'numpy.histogram', 'np.histogram', (['vec'], {'bins': 'bins', 'density': '(False)'}), '(vec, bins=bins, density=False)\n', (3787, 3818), True, 'import numpy as np\n'), ((849, 875), 'uq360.utils.transformers.confidence_top.ConfidenceTopTransformer', 'ConfidenceTopTransformer', ([], {}), '()\n', (873, 875), False, 'from uq360.utils.transformers.confidence_top import ConfidenceTopTransformer\n'), ((1462, 1490), 'uq360.utils.transformers.confidence_delta.ConfidenceDeltaTransformer', 'ConfidenceDeltaTransformer', ([], {}), '()\n', (1488, 1490), False, 'from uq360.utils.transformers.confidence_delta import ConfidenceDeltaTransformer\n'), ((1770, 1800), 'uq360.utils.transformers.confidence_entropy.ConfidenceEntropyTransformer', 'ConfidenceEntropyTransformer', ([], {}), '()\n', (1798, 1800), False, 'from uq360.utils.transformers.confidence_entropy import ConfidenceEntropyTransformer\n'), ((2964, 2991), 'uq360.utils.transformers.class_frequency.ClassFrequencyTransformer', 'ClassFrequencyTransformer', ([], {}), '()\n', (2989, 2991), False, 'from uq360.utils.transformers.class_frequency import ClassFrequencyTransformer\n'), ((1062, 1089), 'numpy.linspace', 'np.linspace', (['(0)', '(0.9)'], {'num': '(10)'}), '(0, 0.9, num=10)\n', (1073, 1089), True, 'import numpy as np\n'), ((1089, 1119), 'numpy.linspace', 'np.linspace', (['(0.91)', '(1.0)'], {'num': '(10)'}), '(0.91, 1.0, num=10)\n', (1100, 1119), True, 'import numpy as np\n'), ((2053, 2080), 'numpy.linspace', 'np.linspace', (['(0)', '(0.1)'], {'num': '(11)'}), '(0, 0.1, num=11)\n', (2064, 2080), True, 'import numpy as np\n'), ((2080, 2109), 'numpy.linspace', 'np.linspace', (['(0.2)', '(3.0)'], {'num': '(29)'}), '(0.2, 3.0, num=29)\n', (2091, 2109), True, 'import numpy as np\n')] |
"""Utils for the command line tool."""
# Standard library
import datetime as dt
import logging
import os
import pickle
import sys
from pathlib import Path
# Third-party
import matplotlib.path as mpath
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import xarray as xr
# from ipdb import set_trace
def count_to_log_level(count: int) -> int:
"""Map occurrence of the command line option verbose to the log level."""
if count == 0:
return logging.ERROR
elif count == 1:
return logging.WARNING
elif count == 2:
return logging.INFO
else:
return logging.DEBUG
def extract_tqc(grib_file, out_dir, date_str, lt):
"""Extract tqc from model file using fieldextra.
Args:
grib_file (str): Grib file
out_dir (str): Output directory
date_str (str): date
lt (int): leadtime
"""
logging.debug(f"Apply fxfilter to: {grib_file}.")
# new filename
new_name = Path(out_dir, f"tqc_{date_str}_{lt:03}.grb2")
logging.info(f"Creating: {str(new_name)}.")
# check whether filtered file already exists
if new_name.is_file():
logging.info(f" ...exists already!")
return
# apply fxfilter
cmd = f"fxfilter -o {new_name} -s TQC {grib_file}"
logging.debug(f"Will run: {cmd}")
os.system(cmd)
return
def retrieve_cosmo_files(start, end, interval, out_dir, max_lt):
"""Retrieve COSMO files.
Args:
start (datetime): start
end (datetime): end
interval (int): interval between simulations
out_dir (str): output directory for tqc-files
max_lt (int): maximum leadtime
"""
cosmo_dir = f"/store/s83/osm/COSMO-1E/" # FCST{start.strftime('%y')}"
logging.info(f"Retrieving COSMO-files from {cosmo_dir}")
# create output directory
Path(out_dir).mkdir(parents=True, exist_ok=True)
# list of ini-dates of simulations
dates = pd.date_range(start, end, freq=f"{interval}H")
# loop over simulations
for date in dates:
# string of date for directories
date_str = date.strftime("%y%m%d%H")
# collect grib files
for lt in range(0, max_lt + 1, 1):
model_file = list(
Path(cosmo_dir, f"FCST{date.strftime('%y')}").glob(
f"{date_str}_???/grib/c1effsurf{lt:03}_000"
)
)
if len(model_file) == 0:
logging.warning(f"No file found for {date_str}: +{lt}h.")
elif len(model_file) > 1:
print(f"Model file description ambiguous.")
sys.exit(1)
else:
# apply fxfilter
extract_tqc(model_file[0], out_dir, date_str, lt)
def get_fls_fractions(in_dir):
"""Retrieve dataframe containing FLS fractions.
Args:
in_dir (str): input directory
"""
pass
def get_ml_mask(lats, lons):
"""Retrieve mask of Swiss Plateau (Mittelland).
Args:
lats (array): latitudes
lons (array): longitudes
Returns:
mask (array with True and False)
"""
# polygon points
ll_corner = (46.12, 5.89)
p1 = (46.06, 6.10)
p2 = (46.33, 6.78)
p3 = (46.55, 7.01)
p4 = (46.64, 7.31)
p5 = (46.65, 7.65)
p6 = (46.62, 7.79)
p7 = (46.81, 8.33)
p8 = (47.09, 9.78)
p9 = (47.82, 10.02)
p10 = (47.81, 8.34)
p11 = (47.38, 7.87)
p12 = (47.29, 7.68)
p13 = (47.25, 7.45)
p14 = (47.13, 7.06)
p15 = (47.07, 6.87)
p16 = (46.73, 6.36)
p17 = (46.59, 6.30)
p18 = (46.18, 5.86)
# create polygon
Path = mpath.Path
path_data = [
(Path.MOVETO, ll_corner),
(Path.LINETO, p1),
(Path.LINETO, p2),
(Path.LINETO, p3),
(Path.LINETO, p4),
(Path.LINETO, p5),
(Path.LINETO, p6),
(Path.LINETO, p7),
(Path.LINETO, p8),
(Path.LINETO, p9),
(Path.LINETO, p10),
(Path.LINETO, p11),
(Path.LINETO, p12),
(Path.LINETO, p13),
(Path.LINETO, p14),
(Path.LINETO, p15),
(Path.LINETO, p16),
(Path.LINETO, p17),
(Path.LINETO, p18),
(Path.CLOSEPOLY, ll_corner),
]
codes, verts = zip(*path_data)
path = mpath.Path(verts, codes)
# store original shape
shape = lats.shape
# path.contains_points checks whether points are within polygon
# however, this function can only handle vectors
# -> ravel and unravel
latlon = [[lat, lon] for lat, lon in zip(lats.ravel(), lons.ravel())]
mask = np.reshape(path.contains_points(latlon), shape)
return mask
def save_as_pickle(obj, path):
"""Save object as pickled object.
Args:
obj (python object): usually dataframe
path (str): full path
"""
# create parent if not existing yet
print(path.parents[0])
path.parents[0].mkdir(parents=True, exist_ok=True)
# dump object
pickle.dump(obj, open(path, "wb"))
logging.info(f"Saved {path}")
return
def calc_fls_fractions(
start,
end,
interval,
in_dir_obs,
in_dir_model,
out_dir_fls,
max_lt,
extend_previous,
threshold,
):
"""Calculate FLS fractions in Swiss Plateau for OBS and FCST.
Args:
start (datetime): start
end (datetime): end
interval (int): interval between simulations
in_dir_obs (str): dir with sat data
in_dir_model (str): dir with model data
out_dir_fls (str): dir with fls fractions
max_lt (int): maximum leadtime
extend_previous (bool): load previous obs and fcst dataframes
threshold (float): threshold for low stratus confidence level
Returns:
obs (dataframe)
fcst (dataframe)
"""
# determine init and valid timestamps
ini_times = pd.date_range(start=start, end=end, freq=f"{interval}H")
valid_times = pd.date_range(
start=start, end=end + dt.timedelta(hours=max_lt), freq="1H"
)
# retrieve OBS dataframe
obs_path = Path(out_dir_fls, "obs.p")
if obs_path.is_file() and extend_previous:
obs = pickle.load(open(obs_path, "rb"))
logging.info("Loaded obs from pickled object.")
else:
# create dataframe
obs = pd.DataFrame(columns=["fls_frac", "high_clouds"], index=valid_times)
# retrieve FCST dataframe
fcst_path = Path(out_dir_fls, "fcst.p")
if fcst_path.is_file() and extend_previous:
fcst = pickle.load(open(fcst_path, "rb"))
logging.info("Loaded fcst from pickled object.")
else:
# create dataframe
fcst = pd.DataFrame(columns=np.arange(max_lt + 1), index=valid_times)
# initiate variables
ml_mask = None
ml_size = None
for valid_time in valid_times:
valid_time_str = valid_time.strftime("%y%m%d%H")
# A) extract FLS fraction from OBS
##################################
# timestamp from sat images: -15min
obs_timestamp = (valid_time - dt.timedelta(minutes=15)).strftime("%y%m%d%H%M")
logging.debug(f"SAT timestamp: {obs_timestamp}")
# obs filename
obs_file = Path(in_dir_obs, f"MSG_lscl-cosmo1eqc3km_{obs_timestamp}_c1e.nc")
logging.debug(f"SAT file: {obs_file}")
# load obs file
try:
ds = xr.open_dataset(obs_file).squeeze()
except FileNotFoundError:
logging.warning(f"No sat file for {obs_timestamp}.")
logging.debug(f" -> {obs_file}")
continue
if ml_mask is None:
ml_mask = get_ml_mask(ds.lat_1.values, ds.lon_1.values)
ml_size = np.sum(ml_mask)
logging.debug(f"{ml_size} grid points in ML.")
# lscl = low stratus confidence level (diagnosed)
lscl = ds.LSCL.values
lscl_ml = lscl[ml_mask]
# count nan-values (=high clouds)
n_high_clouds = np.sum(np.isnan(lscl_ml))
# count values larger than threshold (=FLS)
n_fls = np.sum(lscl_ml > threshold)
# fill into dataframe
obs.loc[valid_time]["fls_frac"] = n_fls / ml_size
obs.loc[valid_time]["high_clouds"] = n_high_clouds / ml_size
# B) extract FLS fraction from FCST
###################################
for lt in range(max_lt + 1):
ini_time = valid_time - dt.timedelta(hours=lt)
ini_time_str = ini_time.strftime("%y%m%d%H")
fcst_file = Path(in_dir_model, f"tqc_{ini_time_str}_{lt:03}.grb2")
if fcst_file.is_file():
logging.debug(f"Loading {fcst_file}")
ds2 = xr.open_dataset(fcst_file, engine="cfgrib").squeeze()
else:
logging.debug(f" but no {fcst_file}")
continue
tqc = ds2.unknown.values
# overwrite grid points covered by high clouds with nan
tqc[np.isnan(lscl)] = np.nan
# mask swiss plateau
tqc_ml = tqc[ml_mask]
# count grid points with liquid water path > 0.1 g/m2
n_fls = np.sum(tqc_ml > 0.0001)
# fill into dataframe
fcst.loc[valid_time][lt] = n_fls / ml_size
save_as_pickle(obs, obs_path)
save_as_pickle(fcst, fcst_path)
return obs, fcst
# plot mask
# plt.pcolormesh(ml_mask)
# plt.savefig("/scratch/swester/tmp/ml_mask.png")
def load_obs_fcst(fls_path):
"""Load obs and fcst from existing pickled dataframes.
Args:
obs_path (str): obs
fcst_path (str): fcst
Returns:
2 dataframes: obs, fcst
"""
obs = pickle.load(open(Path(fls_path, "obs.p"), "rb"))
fcst = pickle.load(open(Path(fls_path, "fcst.p"), "rb"))
return obs, fcst
| [
"pandas.DataFrame",
"logging.debug",
"pandas.date_range",
"numpy.sum",
"logging.warning",
"xarray.open_dataset",
"os.system",
"numpy.isnan",
"logging.info",
"pathlib.Path",
"matplotlib.path.Path",
"datetime.timedelta",
"numpy.arange",
"sys.exit"
] | [((895, 944), 'logging.debug', 'logging.debug', (['f"""Apply fxfilter to: {grib_file}."""'], {}), "(f'Apply fxfilter to: {grib_file}.')\n", (908, 944), False, 'import logging\n'), ((980, 1025), 'pathlib.Path', 'Path', (['out_dir', 'f"""tqc_{date_str}_{lt:03}.grb2"""'], {}), "(out_dir, f'tqc_{date_str}_{lt:03}.grb2')\n", (984, 1025), False, 'from pathlib import Path\n'), ((1293, 1326), 'logging.debug', 'logging.debug', (['f"""Will run: {cmd}"""'], {}), "(f'Will run: {cmd}')\n", (1306, 1326), False, 'import logging\n'), ((1331, 1345), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (1340, 1345), False, 'import os\n'), ((1780, 1836), 'logging.info', 'logging.info', (['f"""Retrieving COSMO-files from {cosmo_dir}"""'], {}), "(f'Retrieving COSMO-files from {cosmo_dir}')\n", (1792, 1836), False, 'import logging\n'), ((1973, 2019), 'pandas.date_range', 'pd.date_range', (['start', 'end'], {'freq': 'f"""{interval}H"""'}), "(start, end, freq=f'{interval}H')\n", (1986, 2019), True, 'import pandas as pd\n'), ((4307, 4331), 'matplotlib.path.Path', 'mpath.Path', (['verts', 'codes'], {}), '(verts, codes)\n', (4317, 4331), True, 'import matplotlib.path as mpath\n'), ((5034, 5063), 'logging.info', 'logging.info', (['f"""Saved {path}"""'], {}), "(f'Saved {path}')\n", (5046, 5063), False, 'import logging\n'), ((5933, 5989), 'pandas.date_range', 'pd.date_range', ([], {'start': 'start', 'end': 'end', 'freq': 'f"""{interval}H"""'}), "(start=start, end=end, freq=f'{interval}H')\n", (5946, 5989), True, 'import pandas as pd\n'), ((6143, 6169), 'pathlib.Path', 'Path', (['out_dir_fls', '"""obs.p"""'], {}), "(out_dir_fls, 'obs.p')\n", (6147, 6169), False, 'from pathlib import Path\n'), ((6488, 6515), 'pathlib.Path', 'Path', (['out_dir_fls', '"""fcst.p"""'], {}), "(out_dir_fls, 'fcst.p')\n", (6492, 6515), False, 'from pathlib import Path\n'), ((1159, 1196), 'logging.info', 'logging.info', (['f""" ...exists already!"""'], {}), "(f' ...exists already!')\n", (1171, 1196), False, 'import logging\n'), ((6273, 6320), 'logging.info', 'logging.info', (['"""Loaded obs from pickled object."""'], {}), "('Loaded obs from pickled object.')\n", (6285, 6320), False, 'import logging\n'), ((6372, 6440), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['fls_frac', 'high_clouds']", 'index': 'valid_times'}), "(columns=['fls_frac', 'high_clouds'], index=valid_times)\n", (6384, 6440), True, 'import pandas as pd\n'), ((6622, 6670), 'logging.info', 'logging.info', (['"""Loaded fcst from pickled object."""'], {}), "('Loaded fcst from pickled object.')\n", (6634, 6670), False, 'import logging\n'), ((7171, 7219), 'logging.debug', 'logging.debug', (['f"""SAT timestamp: {obs_timestamp}"""'], {}), "(f'SAT timestamp: {obs_timestamp}')\n", (7184, 7219), False, 'import logging\n'), ((7263, 7328), 'pathlib.Path', 'Path', (['in_dir_obs', 'f"""MSG_lscl-cosmo1eqc3km_{obs_timestamp}_c1e.nc"""'], {}), "(in_dir_obs, f'MSG_lscl-cosmo1eqc3km_{obs_timestamp}_c1e.nc')\n", (7267, 7328), False, 'from pathlib import Path\n'), ((7337, 7375), 'logging.debug', 'logging.debug', (['f"""SAT file: {obs_file}"""'], {}), "(f'SAT file: {obs_file}')\n", (7350, 7375), False, 'import logging\n'), ((8109, 8136), 'numpy.sum', 'np.sum', (['(lscl_ml > threshold)'], {}), '(lscl_ml > threshold)\n', (8115, 8136), True, 'import numpy as np\n'), ((1872, 1885), 'pathlib.Path', 'Path', (['out_dir'], {}), '(out_dir)\n', (1876, 1885), False, 'from pathlib import Path\n'), ((7751, 7766), 'numpy.sum', 'np.sum', (['ml_mask'], {}), '(ml_mask)\n', (7757, 7766), True, 'import numpy as np\n'), ((7779, 7825), 'logging.debug', 'logging.debug', (['f"""{ml_size} grid points in ML."""'], {}), "(f'{ml_size} grid points in ML.')\n", (7792, 7825), False, 'import logging\n'), ((8021, 8038), 'numpy.isnan', 'np.isnan', (['lscl_ml'], {}), '(lscl_ml)\n', (8029, 8038), True, 'import numpy as np\n'), ((8562, 8616), 'pathlib.Path', 'Path', (['in_dir_model', 'f"""tqc_{ini_time_str}_{lt:03}.grb2"""'], {}), "(in_dir_model, f'tqc_{ini_time_str}_{lt:03}.grb2')\n", (8566, 8616), False, 'from pathlib import Path\n'), ((9186, 9209), 'numpy.sum', 'np.sum', (['(tqc_ml > 0.0001)'], {}), '(tqc_ml > 0.0001)\n', (9192, 9209), True, 'import numpy as np\n'), ((9735, 9758), 'pathlib.Path', 'Path', (['fls_path', '"""obs.p"""'], {}), "(fls_path, 'obs.p')\n", (9739, 9758), False, 'from pathlib import Path\n'), ((9795, 9819), 'pathlib.Path', 'Path', (['fls_path', '"""fcst.p"""'], {}), "(fls_path, 'fcst.p')\n", (9799, 9819), False, 'from pathlib import Path\n'), ((2480, 2537), 'logging.warning', 'logging.warning', (['f"""No file found for {date_str}: +{lt}h."""'], {}), "(f'No file found for {date_str}: +{lt}h.')\n", (2495, 2537), False, 'import logging\n'), ((6054, 6080), 'datetime.timedelta', 'dt.timedelta', ([], {'hours': 'max_lt'}), '(hours=max_lt)\n', (6066, 6080), True, 'import datetime as dt\n'), ((6744, 6765), 'numpy.arange', 'np.arange', (['(max_lt + 1)'], {}), '(max_lt + 1)\n', (6753, 6765), True, 'import numpy as np\n'), ((7513, 7565), 'logging.warning', 'logging.warning', (['f"""No sat file for {obs_timestamp}."""'], {}), "(f'No sat file for {obs_timestamp}.')\n", (7528, 7565), False, 'import logging\n'), ((7578, 7610), 'logging.debug', 'logging.debug', (['f""" -> {obs_file}"""'], {}), "(f' -> {obs_file}')\n", (7591, 7610), False, 'import logging\n'), ((8458, 8480), 'datetime.timedelta', 'dt.timedelta', ([], {'hours': 'lt'}), '(hours=lt)\n', (8470, 8480), True, 'import datetime as dt\n'), ((8670, 8707), 'logging.debug', 'logging.debug', (['f"""Loading {fcst_file}"""'], {}), "(f'Loading {fcst_file}')\n", (8683, 8707), False, 'import logging\n'), ((8819, 8857), 'logging.debug', 'logging.debug', (['f""" but no {fcst_file}"""'], {}), "(f' but no {fcst_file}')\n", (8832, 8857), False, 'import logging\n'), ((9006, 9020), 'numpy.isnan', 'np.isnan', (['lscl'], {}), '(lscl)\n', (9014, 9020), True, 'import numpy as np\n'), ((2652, 2663), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2660, 2663), False, 'import sys\n'), ((7114, 7138), 'datetime.timedelta', 'dt.timedelta', ([], {'minutes': '(15)'}), '(minutes=15)\n', (7126, 7138), True, 'import datetime as dt\n'), ((7431, 7456), 'xarray.open_dataset', 'xr.open_dataset', (['obs_file'], {}), '(obs_file)\n', (7446, 7456), True, 'import xarray as xr\n'), ((8730, 8773), 'xarray.open_dataset', 'xr.open_dataset', (['fcst_file'], {'engine': '"""cfgrib"""'}), "(fcst_file, engine='cfgrib')\n", (8745, 8773), True, 'import xarray as xr\n')] |
#! /usr/bin/env python3
"""Test collapse."""
# --- import -------------------------------------------------------------------------------------
import WrightTools as wt
import numpy as np
# --- tests --------------------------------------------------------------------------------------
def test_gradient():
data = wt.Data()
data.create_variable("v1", np.arange(0, 6))
data.create_variable("v2", np.arange(0, 6) * 2)
data.create_channel("ch", np.array([1, 2, 4, 7, 11, 16]))
data.transform("v1")
data.gradient("v1")
data.transform("v2")
data.gradient("v2")
assert data.ch_v1_gradient.shape == (6,)
assert np.allclose(data.ch_v1_gradient.points, np.array([1.0, 1.5, 2.5, 3.5, 4.5, 5.0]))
assert data.ch_v2_gradient.shape == (6,)
assert np.allclose(data.ch_v2_gradient.points, np.array([0.5, 0.75, 1.25, 1.75, 2.25, 2.5]))
# --- run -----------------------------------------------------------------------------------------
if __name__ == "__main__":
test_gradient()
| [
"WrightTools.Data",
"numpy.arange",
"numpy.array"
] | [((327, 336), 'WrightTools.Data', 'wt.Data', ([], {}), '()\n', (334, 336), True, 'import WrightTools as wt\n'), ((368, 383), 'numpy.arange', 'np.arange', (['(0)', '(6)'], {}), '(0, 6)\n', (377, 383), True, 'import numpy as np\n'), ((467, 497), 'numpy.array', 'np.array', (['[1, 2, 4, 7, 11, 16]'], {}), '([1, 2, 4, 7, 11, 16])\n', (475, 497), True, 'import numpy as np\n'), ((694, 734), 'numpy.array', 'np.array', (['[1.0, 1.5, 2.5, 3.5, 4.5, 5.0]'], {}), '([1.0, 1.5, 2.5, 3.5, 4.5, 5.0])\n', (702, 734), True, 'import numpy as np\n'), ((832, 876), 'numpy.array', 'np.array', (['[0.5, 0.75, 1.25, 1.75, 2.25, 2.5]'], {}), '([0.5, 0.75, 1.25, 1.75, 2.25, 2.5])\n', (840, 876), True, 'import numpy as np\n'), ((416, 431), 'numpy.arange', 'np.arange', (['(0)', '(6)'], {}), '(0, 6)\n', (425, 431), True, 'import numpy as np\n')] |
import random
from typing import Tuple
import numpy as np
from maenv.core import World, Agent, Team
from maenv.exceptions.scenario_exceptions import ScenarioNotSymmetricError
from maenv.interfaces.scenario import BaseTeamScenario
from maenv.utils.colors import generate_colors
class TeamsScenario(BaseTeamScenario):
def __init__(self,
match_build_plan: dict,
grid_size: int = 10,
bounds: Tuple[int,int] = (1280, 720),
ai="basic", ai_config=None,
random_spawns: bool = False,
stochastic_spawns: bool = False,
attack_range_only: bool = False,
**kwargs):
"""
Constructor for a team scenario.
@param match_build_plan: Plan to setup the match and therefore team composition and possible AI`s.
n_agents: How many agents per team
n_teams: How many teams
"""
self.match_build_plan = match_build_plan
assert match_build_plan is not None, "Cannot build scenario from empty build plan."
self.grid_size = grid_size
self.bounds = np.array(bounds)
self.random_spawns = random_spawns
self.stochastic_spawns = stochastic_spawns
self.ai = ai
self.ai_config = ai_config
self.attack_range_only = attack_range_only
self.teams_n = len(match_build_plan)
self.agents_n = [len(team["units"]) for team in match_build_plan]
self.is_symmetric = self.agents_n.count(self.agents_n[0]) == len(self.agents_n) # each agent n must be the same
self.team_mixing_factor = 8 # build_plan["tmf"] if "tmf" in build_plan["tmf"] else 5
if not self.is_symmetric:
raise ScenarioNotSymmetricError(self.agents_n, self.teams_n)
self.team_spawns = None
if "agent_spawns" in self.match_build_plan:
self.agent_spawns = self.match_build_plan["agent_spawns"]
else:
self.agent_spawns = [None] * self.teams_n
def _make_world(self):
"""
A teams scenario creates a world with two equally sized teams with either a fixed spawn scheme or
a random generated spawn scheme. Spawns can be regenerated every episode or kept constant.
@param grid_size:
@return:
"""
total_n_agents = sum(self.agents_n)
world = World(n_agents=total_n_agents, n_teams=self.teams_n, grid_size=self.grid_size, ai=self.ai,
ai_config=self.ai_config, attack_range_only=self.attack_range_only, bounds=self.bounds)
colors = generate_colors(self.teams_n)
agent_count = 0
for tid in range(self.teams_n):
is_scripted = self.match_build_plan[tid]["is_scripted"]
members = [
Agent(
id=aid, # is not reset per team. aid identifying all units globally
tid=tid,
color=colors[tid],
build_plan=self.match_build_plan[tid]["units"][index],
is_scripted=is_scripted,
) for index, aid in # index is the team internal identifier
enumerate(range(agent_count, agent_count + self.agents_n[tid]))
]
agent_count += self.agents_n[tid]
world.agents += members
team = Team(tid=tid, members=members, is_scripted=is_scripted)
world.teams.append(team)
return world
def reset_world(self, world: World):
# How far should team spawns and agents be spread
agent_spread = world.grid_size * sum(self.agents_n) / self.team_mixing_factor
team_spread = self.teams_n * agent_spread
# random team spawns
if self.stochastic_spawns or self.team_spawns is None: # if spawns already exist do not generate
self.team_spawns = world.spg.generate_team_spawns(randomize=self.random_spawns, radius=team_spread)
if random.random() < 0.5:
self.team_spawns[0], self.team_spawns[1] = self.team_spawns[1], self.team_spawns[0]
if self.stochastic_spawns or any([spawn is None for spawn in self.agent_spawns]):
# take first teams size since symmetric for spawn generation
agent_spawns = world.spg.generate(randomize=self.random_spawns, mean_radius=1, sigma_radius=agent_spread)
# mirror spawns
self.agent_spawns[0] = agent_spawns + self.team_spawns[0]
self.agent_spawns[1] = (- agent_spawns) + self.team_spawns[1]
for team, team_spawn in zip(world.teams, self.team_spawns):
for team_intern_id, agent in enumerate(team.members):
spawn = self.agent_spawns[team.tid][team_intern_id]
world.connect(agent, spawn)
world.init() # Init after all agents added
def reward(self, agent: Agent, world: World):
reward = 0
reward += agent.stats.dmg_dealt / agent.attack_damage * 2
reward += agent.stats.kills * 10
return reward
def done(self, team: Team, world: World):
if np.all(world.wiped_teams): # if all teams are wiped simultaneously -> done
return True
# if only one team is not wiped and this team is the team under testing -> winner winner chicken dinner
return not world.wiped_teams[team.tid] and world.wiped_teams.count(False) == 1
def observation(self, agent: Agent, world: World):
other_obs = world.obs[agent.id].flatten()
return np.concatenate((other_obs, agent.self_observation))
| [
"maenv.utils.colors.generate_colors",
"maenv.core.Agent",
"maenv.core.Team",
"numpy.all",
"random.random",
"maenv.core.World",
"numpy.array",
"maenv.exceptions.scenario_exceptions.ScenarioNotSymmetricError",
"numpy.concatenate"
] | [((1140, 1156), 'numpy.array', 'np.array', (['bounds'], {}), '(bounds)\n', (1148, 1156), True, 'import numpy as np\n'), ((2382, 2570), 'maenv.core.World', 'World', ([], {'n_agents': 'total_n_agents', 'n_teams': 'self.teams_n', 'grid_size': 'self.grid_size', 'ai': 'self.ai', 'ai_config': 'self.ai_config', 'attack_range_only': 'self.attack_range_only', 'bounds': 'self.bounds'}), '(n_agents=total_n_agents, n_teams=self.teams_n, grid_size=self.\n grid_size, ai=self.ai, ai_config=self.ai_config, attack_range_only=self\n .attack_range_only, bounds=self.bounds)\n', (2387, 2570), False, 'from maenv.core import World, Agent, Team\n'), ((2601, 2630), 'maenv.utils.colors.generate_colors', 'generate_colors', (['self.teams_n'], {}), '(self.teams_n)\n', (2616, 2630), False, 'from maenv.utils.colors import generate_colors\n'), ((5107, 5132), 'numpy.all', 'np.all', (['world.wiped_teams'], {}), '(world.wiped_teams)\n', (5113, 5132), True, 'import numpy as np\n'), ((5527, 5578), 'numpy.concatenate', 'np.concatenate', (['(other_obs, agent.self_observation)'], {}), '((other_obs, agent.self_observation))\n', (5541, 5578), True, 'import numpy as np\n'), ((1743, 1797), 'maenv.exceptions.scenario_exceptions.ScenarioNotSymmetricError', 'ScenarioNotSymmetricError', (['self.agents_n', 'self.teams_n'], {}), '(self.agents_n, self.teams_n)\n', (1768, 1797), False, 'from maenv.exceptions.scenario_exceptions import ScenarioNotSymmetricError\n'), ((3359, 3414), 'maenv.core.Team', 'Team', ([], {'tid': 'tid', 'members': 'members', 'is_scripted': 'is_scripted'}), '(tid=tid, members=members, is_scripted=is_scripted)\n', (3363, 3414), False, 'from maenv.core import World, Agent, Team\n'), ((2803, 2929), 'maenv.core.Agent', 'Agent', ([], {'id': 'aid', 'tid': 'tid', 'color': 'colors[tid]', 'build_plan': "self.match_build_plan[tid]['units'][index]", 'is_scripted': 'is_scripted'}), "(id=aid, tid=tid, color=colors[tid], build_plan=self.match_build_plan[\n tid]['units'][index], is_scripted=is_scripted)\n", (2808, 2929), False, 'from maenv.core import World, Agent, Team\n'), ((3974, 3989), 'random.random', 'random.random', ([], {}), '()\n', (3987, 3989), False, 'import random\n')] |
import random
import numbers
import torchvision.transforms.functional as F
from PIL import Image
import torch
import scipy.ndimage as ndimage
import numpy as np
#import cv2
from skimage.transform import rescale
class Resize(object):
""" Rescales the inputs and target arrays to the given 'size'.
'size' will be the size of the smaller edge.
For example, if height > width, then image will be
rescaled to (size * height / width, size)
size: size of the smaller edge
interpolation order: Default: 2 (bilinear)
"""
def __init__(self, size, order=2):
self.size = size
self.order = order
def __call__(self, frames, flows):
h, w, _ = frames[0].shape
#print('in: ',frames[0].shape)
out_frames = []
for frame in frames:
#h, w, _ = frame.shape
if (w <= h and w == self.size) or (h <= w and h == self.size):
out_frames.append(frame)
continue
#return inputs,target
if w < h:
ratio = self.size/w
else:
ratio = self.size/h
#inputs[0] = ndimage.interpolation.zoom(inputs[0], ratio, order=self.order)
#inputs[1] = ndimage.interpolation.zoom(inputs[1], ratio, order=self.order)
frame = rescale(frame, (ratio, ratio, 1), anti_aliasing=False)
#frame = cv2.resize(frame, ratio, cv2.INTER_LINEAR)
#frame = ndimage.interpolation.zoom(frame, (ratio, ratio, 1), order=self.order)
frame *= ratio
out_frames.append(frame)
out_flows = []
for flo in flows:
#h, w, _ = flo.shape
if (w <= h and w == self.size) or (h <= w and h == self.size):
out_flows.append(flo)
continue
#return inputs,target
if w < h:
ratio = self.size/w
else:
ratio = self.size/h
#inputs[0] = ndimage.interpolation.zoom(inputs[0], ratio, order=self.order)
#inputs[1] = ndimage.interpolation.zoom(inputs[1], ratio, order=self.order)
flo = rescale(flo, (ratio, ratio, 1), anti_aliasing=False)
#flo = cv2.resize(flo, ratio, cv2.INTER_LINEAR)
#flo = ndimage.interpolation.zoom(flo, (ratio, ratio, 1), order=self.order)
flo *= ratio
out_flows.append(flo)
#print('out: ', out_frames[0].shape)
return out_frames, out_flows#inputs, target
class RandomCrop(object):
"""Crops the given PIL.Image at a random location to have a region of
the given size. size can be a tuple (target_height, target_width)
or an integer, in which case the target will be of a square shape (size, size)
"""
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, frames, flows):
h, w, _ = frames[0].shape
th, tw = self.size
x = random.randint(0, w - tw)
y = random.randint(0, h - th)
out_frames = []
for frame in frames:
#h, w, _ = frame.shape
#th, tw = self.size
if w == tw and h == th:
out_frames.append(frame)
continue
#return inputs,target
#x = random.randint(0, w - tw)
#y = random.randint(0, h - th)
frame = frame[y: y + th,x: x + tw]
out_frames.append(frame)
out_flows = []
for flo in flows:
#h, w, _ = flo.shape
#th, tw = self.size
if w == tw and h == th:
out_flows.append(flo)
continue
#return inputs,target
#x = random.randint(0, w - tw)
#y = random.randint(0, h - th)
flo = flo[y: y + th,x: x + tw]
out_flows.append(flo)
return out_frames, out_flows#inputs, target[y1: y1 + th,x1: x1 + tw]
class CenterCrop(object):
"""Crops the given inputs and target arrays at the center to have a region of
the given size. size can be a tuple (target_height, target_width)
or an integer, in which case the target will be of a square shape (size, size)
Careful, img1 and img2 may not be the same size
"""
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, frames, flows):
h, w, _ = frames[0].shape
th, tw = self.size
x = int(round((w - tw) / 2.))
y = int(round((h - th) / 2.))
out_frames = []
for frame in frames:
#h, w, _ = frame.shape
#th, tw = self.size
#x = int(round((w - tw) / 2.))
#y = int(round((h - th) / 2.))
frame = frame[y: y + th, x: x + tw]
out_frames.append(frame)
out_flows = []
for flo in flows:
#h, w, _ = frame.shape
#th, tw = self.size
#x = int(round((w - tw) / 2.))
#y = int(round((h - th) / 2.))
flo = flo[y: y + th, x: x + tw]
out_flows.append(flo)
return out_frames, out_flows
class RandomHorizontalFlip(object):
"""Randomly horizontally flips the given PIL.Image with a probability of 0.5
"""
def __call__(self, frames, flows):
out_frames = []
for frame in frames:
if random.random() < 0.5:
frame = np.copy(np.fliplr(frame))
frame[:,:,0] *= -1
out_frames.append(frame)
out_flows = []
for flo in flows:
if random.random() < 0.5:
flo = np.copy(np.fliplr(flo))
flo[:,:,0] *= -1
out_flows.append(flo)
return out_frames, out_flows
class ToTensor(object):
"""Convert a list of ``PIL Image`` or ``numpy.ndarray`` to tensor.
Converts a list of PIL Image or numpy.ndarray (H x W x C) in the range
[0, 255] to a torch.FloatTensor of shape (C x L xH x W) in the range
[0.0, 1.0].
"""
def __call__(self, frames, flows):
"""
Args:
frames: a list of (PIL Image or numpy.ndarray).
Returns:
a list of Tensor: Converted images.
"""
#print(frames[0].shape)
out_frames = []
for frame in frames:
out_frames.append(F.to_tensor(frame))
out_flows = []
for flo in flows:
out_flows.append(F.to_tensor(flo))
return out_frames, out_flows
class ArrayToTensor(object):
"""Converts a numpy.ndarray (H x W x C) to a torch.FloatTensor of shape (C x H x W)."""
def __call__(self, frames, flows):
out_frames = []
for frame in frames:
assert(isinstance(frame, np.ndarray))
frame = np.transpose(frame, (2, 0, 1))
tensor_frame = torch.from_numpy(frame)
out_frames.append(tensor_frame.float())
out_flows = []
for flo in flows:
assert(isinstance(flo, np.ndarray))
flo = np.transpose(flo, (2, 0, 1))
tensor_flo = torch.from_numpy(flo)
out_flows.append(tensor_flo.float())
# put it from HWC to CHW format
#return tensor_frame.float(), tensor_flo.float()
return out_frames, out_flows
class Compose(object):
""" Composes several co_transforms together.
For example:
>>> co_transforms.Compose([
>>> co_transforms.CenterCrop(10),
>>> co_transforms.ToTensor(),
>>> ])
"""
def __init__(self, co_transforms):
self.co_transforms = co_transforms
def __call__(self, frames, flows):
for t in self.co_transforms:
#print('t', t)
frames,flows = t(frames,flows)
return frames,flows
class Normalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, frames, flows):
"""
Args:
frames: a list of Tensor image of size (C, H, W) to be normalized.
Returns:
a list of Tensor: a list of normalized Tensor images.
"""
out_frames = []
for frame in frames:
out_frames.append(F.normalize(frame, self.mean, self.std))
out_flows = []
for flo in flows:
out_flows.append(F.normalize(flo, [0,0], [1,1]))
return out_frames, out_flows
class Stack(object):
def __init__(self, dim=1):
self.dim = dim
def __call__(self, frames, flows):
"""
Args:
frames: a list of (L) Tensor image of size (C, H, W).
Returns:
Tensor: a video Tensor of size (C, L, H, W).
"""
return torch.stack(frames, dim=self.dim), torch.stack(flows, dim=self.dim)
| [
"torch.stack",
"random.randint",
"skimage.transform.rescale",
"torchvision.transforms.functional.to_tensor",
"numpy.transpose",
"random.random",
"numpy.fliplr",
"torchvision.transforms.functional.normalize",
"torch.from_numpy"
] | [((3069, 3094), 'random.randint', 'random.randint', (['(0)', '(w - tw)'], {}), '(0, w - tw)\n', (3083, 3094), False, 'import random\n'), ((3107, 3132), 'random.randint', 'random.randint', (['(0)', '(h - th)'], {}), '(0, h - th)\n', (3121, 3132), False, 'import random\n'), ((1322, 1376), 'skimage.transform.rescale', 'rescale', (['frame', '(ratio, ratio, 1)'], {'anti_aliasing': '(False)'}), '(frame, (ratio, ratio, 1), anti_aliasing=False)\n', (1329, 1376), False, 'from skimage.transform import rescale\n'), ((2163, 2215), 'skimage.transform.rescale', 'rescale', (['flo', '(ratio, ratio, 1)'], {'anti_aliasing': '(False)'}), '(flo, (ratio, ratio, 1), anti_aliasing=False)\n', (2170, 2215), False, 'from skimage.transform import rescale\n'), ((6979, 7009), 'numpy.transpose', 'np.transpose', (['frame', '(2, 0, 1)'], {}), '(frame, (2, 0, 1))\n', (6991, 7009), True, 'import numpy as np\n'), ((7037, 7060), 'torch.from_numpy', 'torch.from_numpy', (['frame'], {}), '(frame)\n', (7053, 7060), False, 'import torch\n'), ((7228, 7256), 'numpy.transpose', 'np.transpose', (['flo', '(2, 0, 1)'], {}), '(flo, (2, 0, 1))\n', (7240, 7256), True, 'import numpy as np\n'), ((7282, 7303), 'torch.from_numpy', 'torch.from_numpy', (['flo'], {}), '(flo)\n', (7298, 7303), False, 'import torch\n'), ((8898, 8931), 'torch.stack', 'torch.stack', (['frames'], {'dim': 'self.dim'}), '(frames, dim=self.dim)\n', (8909, 8931), False, 'import torch\n'), ((8933, 8965), 'torch.stack', 'torch.stack', (['flows'], {'dim': 'self.dim'}), '(flows, dim=self.dim)\n', (8944, 8965), False, 'import torch\n'), ((5570, 5585), 'random.random', 'random.random', ([], {}), '()\n', (5583, 5585), False, 'import random\n'), ((5780, 5795), 'random.random', 'random.random', ([], {}), '()\n', (5793, 5795), False, 'import random\n'), ((6540, 6558), 'torchvision.transforms.functional.to_tensor', 'F.to_tensor', (['frame'], {}), '(frame)\n', (6551, 6558), True, 'import torchvision.transforms.functional as F\n'), ((6638, 6654), 'torchvision.transforms.functional.to_tensor', 'F.to_tensor', (['flo'], {}), '(flo)\n', (6649, 6654), True, 'import torchvision.transforms.functional as F\n'), ((8400, 8439), 'torchvision.transforms.functional.normalize', 'F.normalize', (['frame', 'self.mean', 'self.std'], {}), '(frame, self.mean, self.std)\n', (8411, 8439), True, 'import torchvision.transforms.functional as F\n'), ((8519, 8551), 'torchvision.transforms.functional.normalize', 'F.normalize', (['flo', '[0, 0]', '[1, 1]'], {}), '(flo, [0, 0], [1, 1])\n', (8530, 8551), True, 'import torchvision.transforms.functional as F\n'), ((5625, 5641), 'numpy.fliplr', 'np.fliplr', (['frame'], {}), '(frame)\n', (5634, 5641), True, 'import numpy as np\n'), ((5833, 5847), 'numpy.fliplr', 'np.fliplr', (['flo'], {}), '(flo)\n', (5842, 5847), True, 'import numpy as np\n')] |
# Copyright (C) 2020 University of Oxford
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import pickle
import netCDF4
import numpy as np
import pandas as pd
from requests import get
# opening netCDF4 files via url is not reliable
# (it requires the package to be built with OPenDAP support)
# we dowload and write to disk the file before opening it
def download_MET_file(url, file_name):
try:
os.remove(file_name)
except:
pass
# dowload the file from url and save it on disk
# get request
response = get(url)
if response.status_code != 200:
return False
# open in binary mode
with open(file_name, "wb") as file:
# write to file
file.write(response.content)
file.close()
return True
def load_local_data():
# load the variables dict
with open("plugins/WEATHER/input/weather_indicators.json", "r") as read_file:
weather_indicators = json.load(read_file)
# load grid to GADM level 2 dict
with open('plugins/WEATHER/input/adm_2_to_grid.pkl', 'rb') as handle:
adm_2_to_grid = pickle.load(handle)
return weather_indicators, adm_2_to_grid
# dowload the weather data for a single variable for all days in daterange
# use the adm_2_to_grid to assign each point in the grid to the right GID
# returns a pandas dataframe
def create_aggr_df(indicator, day, variables, adm_2_to_grid, logger):
days = []
country = []
avg = []
std = []
region = []
city = []
logger.debug("downloading data for {} for {}".format(indicator, day.strftime('%Y-%m-%d')))
URL = "https://metdatasa.blob.core.windows.net/covid19-response/metoffice_global_daily/"
temp_file = os.path.join(os.path.dirname(__file__), '..', '..', 'data', 'netCDF4_file.nc')
if not download_MET_file("{}{}/{}{}.nc".format(URL, variables[indicator]['folder'], variables[indicator]['file'],
day.strftime('%Y%m%d')), file_name=temp_file):
return None
nc = netCDF4.Dataset(temp_file)
data = nc.variables[variables[indicator]['variable']][:].data.reshape(-1)
for area_0 in adm_2_to_grid:
for area_1 in adm_2_to_grid[area_0]:
for area_2 in adm_2_to_grid[area_0][area_1]:
idx_list = [point[0] for point in adm_2_to_grid[area_0][area_1][area_2]]
to_avg = [data[idx] for idx in idx_list]
days.append(day.strftime('%Y-%m-%d'))
country.append(area_0)
region.append(area_1)
city.append(area_2)
avg.append(np.mean(to_avg))
std.append(np.std(to_avg))
try:
os.remove(temp_file)
except:
pass
d = {'day': days, 'country': country, 'region': region, 'city': city,
indicator + '_avg': avg, indicator + '_std': std}
return pd.DataFrame(data=d)
| [
"netCDF4.Dataset",
"pandas.DataFrame",
"os.remove",
"json.load",
"numpy.std",
"os.path.dirname",
"pickle.load",
"numpy.mean",
"requests.get"
] | [((1061, 1069), 'requests.get', 'get', (['url'], {}), '(url)\n', (1064, 1069), False, 'from requests import get\n'), ((2547, 2573), 'netCDF4.Dataset', 'netCDF4.Dataset', (['temp_file'], {}), '(temp_file)\n', (2562, 2573), False, 'import netCDF4\n'), ((3400, 3420), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'd'}), '(data=d)\n', (3412, 3420), True, 'import pandas as pd\n'), ((930, 950), 'os.remove', 'os.remove', (['file_name'], {}), '(file_name)\n', (939, 950), False, 'import os\n'), ((1457, 1477), 'json.load', 'json.load', (['read_file'], {}), '(read_file)\n', (1466, 1477), False, 'import json\n'), ((1614, 1633), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (1625, 1633), False, 'import pickle\n'), ((2235, 2260), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2250, 2260), False, 'import os\n'), ((3208, 3228), 'os.remove', 'os.remove', (['temp_file'], {}), '(temp_file)\n', (3217, 3228), False, 'import os\n'), ((3130, 3145), 'numpy.mean', 'np.mean', (['to_avg'], {}), '(to_avg)\n', (3137, 3145), True, 'import numpy as np\n'), ((3174, 3188), 'numpy.std', 'np.std', (['to_avg'], {}), '(to_avg)\n', (3180, 3188), True, 'import numpy as np\n')] |
import wave
import os,sys
import ctypes
import contextlib
import numpy as np
from ctypes import util
from scipy.io import wavfile
from pydub import AudioSegment
import pandas as pd
#loading libraries and setting up the environment
lib_path = util.find_library("rnnoise")
if (not("/" in lib_path)):
lib_path = (os.popen('ldconfig -p | grep '+lib_path).read().split('\n')[0].strip().split(" ")[-1] or ("/usr/local/lib/"+lib_path))
lib = ctypes.cdll.LoadLibrary(lib_path)
lib.rnnoise_process_frame.argtypes = [ctypes.c_void_p,ctypes.POINTER(ctypes.c_float),ctypes.POINTER(ctypes.c_float)]
lib.rnnoise_process_frame.restype = ctypes.c_float
lib.rnnoise_create.restype = ctypes.c_void_p
lib.rnnoise_destroy.argtypes = [ctypes.c_void_p]
# borrowed from here
# https://github.com/Shb742/rnnoise_python
class RNNoise(object):
def __init__(self):
self.obj = lib.rnnoise_create(None)
def process_frame(self,inbuf):
outbuf = np.ndarray((480,), 'h', inbuf).astype(ctypes.c_float)
outbuf_ptr = outbuf.ctypes.data_as(ctypes.POINTER(ctypes.c_float))
VodProb = lib.rnnoise_process_frame(self.obj,outbuf_ptr,outbuf_ptr)
return (VodProb,outbuf.astype(ctypes.c_short).tobytes())
def destroy(self):
lib.rnnoise_destroy(self.obj)
def read_wave(path):
"""Reads a .wav file.
Takes the path, and returns (PCM audio data, sample rate).
"""
with contextlib.closing(wave.open(path, 'rb')) as wf:
num_channels = wf.getnchannels()
assert num_channels == 1
sample_width = wf.getsampwidth()
assert sample_width == 2
sample_rate = wf.getframerate()
assert sample_rate in (8000, 16000, 32000, 48000)
pcm_data = wf.readframes(wf.getnframes())
return pcm_data, sample_rate
def frame_generator(frame_duration_ms,
audio,
sample_rate):
"""Generates audio frames from PCM audio data.
Takes the desired frame duration in milliseconds, the PCM data, and
the sample rate.
Yields Frames of the requested duration.
"""
n = int(sample_rate * (frame_duration_ms / 1000.0) * 2)
offset = 0
timestamp = 0.0
duration = (float(n) / sample_rate) / 2.0
while offset + n < len(audio):
yield audio[offset:offset + n]
offset += n
denoiser = RNNoise()
import sys
file_name=sys.argv[1]
#file_name='overlayed_noisy_sounds/9.wav'
wav_path=file_name
TARGET_SR = 48000
TEMP_FILE = 'test.wav'
sound = AudioSegment.from_wav(wav_path)
sound = sound.set_frame_rate(TARGET_SR)
sound = sound.set_channels(1)
sound.export(TEMP_FILE,
format="wav")
audio, sample_rate = read_wave(TEMP_FILE)
assert sample_rate == TARGET_SR
frames = frame_generator(10, audio, TARGET_SR)
frames = list(frames)
tups = [denoiser.process_frame(frame) for frame in frames]
denoised_frames = [tup[1] for tup in tups]
denoised_wav = np.concatenate([np.frombuffer(frame,
dtype=np.int16)
for frame in denoised_frames])
wavfile.write(file_name.replace('.wav','_denoised.wav'),
TARGET_SR,
denoised_wav) | [
"wave.open",
"numpy.ndarray",
"numpy.frombuffer",
"os.popen",
"ctypes.cdll.LoadLibrary",
"pydub.AudioSegment.from_wav",
"ctypes.util.find_library",
"ctypes.POINTER"
] | [((243, 271), 'ctypes.util.find_library', 'util.find_library', (['"""rnnoise"""'], {}), "('rnnoise')\n", (260, 271), False, 'from ctypes import util\n'), ((441, 474), 'ctypes.cdll.LoadLibrary', 'ctypes.cdll.LoadLibrary', (['lib_path'], {}), '(lib_path)\n', (464, 474), False, 'import ctypes\n'), ((2508, 2539), 'pydub.AudioSegment.from_wav', 'AudioSegment.from_wav', (['wav_path'], {}), '(wav_path)\n', (2529, 2539), False, 'from pydub import AudioSegment\n'), ((529, 559), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_float'], {}), '(ctypes.c_float)\n', (543, 559), False, 'import ctypes\n'), ((560, 590), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_float'], {}), '(ctypes.c_float)\n', (574, 590), False, 'import ctypes\n'), ((2940, 2976), 'numpy.frombuffer', 'np.frombuffer', (['frame'], {'dtype': 'np.int16'}), '(frame, dtype=np.int16)\n', (2953, 2976), True, 'import numpy as np\n'), ((1043, 1073), 'ctypes.POINTER', 'ctypes.POINTER', (['ctypes.c_float'], {}), '(ctypes.c_float)\n', (1057, 1073), False, 'import ctypes\n'), ((1435, 1456), 'wave.open', 'wave.open', (['path', '"""rb"""'], {}), "(path, 'rb')\n", (1444, 1456), False, 'import wave\n'), ((946, 976), 'numpy.ndarray', 'np.ndarray', (['(480,)', '"""h"""', 'inbuf'], {}), "((480,), 'h', inbuf)\n", (956, 976), True, 'import numpy as np\n'), ((315, 357), 'os.popen', 'os.popen', (["('ldconfig -p | grep ' + lib_path)"], {}), "('ldconfig -p | grep ' + lib_path)\n", (323, 357), False, 'import os, sys\n')] |
import cv2
import numpy as np
def is_cv2():
# if we are using OpenCV 2, then our cv2.__version__ will start
# with '2.'
return check_opencv_version("2.")
def is_cv3():
# if we are using OpenCV 3.X, then our cv2.__version__ will start
# with '3.'
return check_opencv_version("3.")
def check_opencv_version(major, lib=None):
# if the supplied library is None, import OpenCV
if lib is None:
import cv2 as lib
# return whether or not the current OpenCV version matches the
# major version number
return lib.__version__.startswith(major)
def compatible_contours(thresh):
if is_cv2():
(contours, _) = cv2.findContours(thresh, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
elif is_cv3():
(_, contours, _) = cv2.findContours(thresh, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
return contours
def compatible_boundingrect(points):
if is_cv2():
points = np.array([p for p in points])
return cv2.boundingRect(points)
elif is_cv3():
return cv2.boundingRect(points)
| [
"cv2.boundingRect",
"numpy.array",
"cv2.findContours",
"cv2.__version__.startswith"
] | [((555, 588), 'cv2.__version__.startswith', 'lib.__version__.startswith', (['major'], {}), '(major)\n', (581, 588), True, 'import cv2 as lib\n'), ((664, 732), 'cv2.findContours', 'cv2.findContours', (['thresh', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (680, 732), False, 'import cv2\n'), ((1025, 1054), 'numpy.array', 'np.array', (['[p for p in points]'], {}), '([p for p in points])\n', (1033, 1054), True, 'import numpy as np\n'), ((1070, 1094), 'cv2.boundingRect', 'cv2.boundingRect', (['points'], {}), '(points)\n', (1086, 1094), False, 'import cv2\n'), ((820, 888), 'cv2.findContours', 'cv2.findContours', (['thresh', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (836, 888), False, 'import cv2\n'), ((1129, 1153), 'cv2.boundingRect', 'cv2.boundingRect', (['points'], {}), '(points)\n', (1145, 1153), False, 'import cv2\n')] |
from flask import Flask, request, jsonify, abort
import base64
import requests
from datetime import datetime
import subprocess
import os
import sys
import argparse
__dir__ = os.path.dirname(os.path.abspath(__file__))
sys.path.append(__dir__)
sys.path.append(os.path.abspath(os.path.join(__dir__, '../..')))
import cv2
import tools.infer.predict_det as predict_det
import tools.infer.predict_rec as predict_rec
import tools.infer.predict_cls as predict_cls
import copy
import numpy as np
import math
import time
import tools.infer.utility as utility
from ppocr.utils.utility import get_image_file_list, check_and_read_gif
from PIL import Image
from tools.infer.utility import draw_ocr
from tools.infer.utility import draw_ocr_box_txt
from ppocr.utils.logging import get_logger
app = Flask(__name__)
logger = get_logger()
class TextSystem(object):
def __init__(self, args):
self.text_detector = predict_det.TextDetector(args)
self.text_recognizer = predict_rec.TextRecognizer(args)
self.use_angle_cls = args.use_angle_cls
if self.use_angle_cls:
self.text_classifier = predict_cls.TextClassifier(args)
def get_rotate_crop_image(self, img, points):
'''
img_height, img_width = img.shape[0:2]
left = int(np.min(points[:, 0]))
right = int(np.max(points[:, 0]))
top = int(np.min(points[:, 1]))
bottom = int(np.max(points[:, 1]))
img_crop = img[top:bottom, left:right, :].copy()
points[:, 0] = points[:, 0] - left
points[:, 1] = points[:, 1] - top
'''
img_crop_width = int(
max(
np.linalg.norm(points[0] - points[1]),
np.linalg.norm(points[2] - points[3])))
img_crop_height = int(
max(
np.linalg.norm(points[0] - points[3]),
np.linalg.norm(points[1] - points[2])))
pts_std = np.float32([[0, 0], [img_crop_width, 0],
[img_crop_width, img_crop_height],
[0, img_crop_height]])
M = cv2.getPerspectiveTransform(points, pts_std)
dst_img = cv2.warpPerspective(
img,
M, (img_crop_width, img_crop_height),
borderMode=cv2.BORDER_REPLICATE,
flags=cv2.INTER_CUBIC)
dst_img_height, dst_img_width = dst_img.shape[0:2]
if dst_img_height * 1.0 / dst_img_width >= 1.5:
dst_img = np.rot90(dst_img)
return dst_img
def print_draw_crop_rec_res(self, img_crop_list, rec_res):
bbox_num = len(img_crop_list)
for bno in range(bbox_num):
cv2.imwrite("./output/img_crop_%d.jpg" % bno, img_crop_list[bno])
print(bno, rec_res[bno])
def __call__(self, img):
ori_im = img.copy()
dt_boxes, elapse = self.text_detector(img)
print("dt_boxes num : {}, elapse : {}".format(len(dt_boxes), elapse))
if dt_boxes is None:
return None, None
img_crop_list = []
dt_boxes = sorted_boxes(dt_boxes)
for bno in range(len(dt_boxes)):
tmp_box = copy.deepcopy(dt_boxes[bno])
img_crop = self.get_rotate_crop_image(ori_im, tmp_box)
img_crop_list.append(img_crop)
if self.use_angle_cls:
img_crop_list, angle_list, elapse = self.text_classifier(
img_crop_list)
print("cls num : {}, elapse : {}".format(
len(img_crop_list), elapse))
rec_res, elapse = self.text_recognizer(img_crop_list)
print("rec_res num : {}, elapse : {}".format(len(rec_res), elapse))
# self.print_draw_crop_rec_res(img_crop_list, rec_res)
return dt_boxes, rec_res
def sorted_boxes(dt_boxes):
"""
Sort text boxes in order from top to bottom, left to right
args:
dt_boxes(array):detected text boxes with shape [4, 2]
return:
sorted boxes(array) with shape [4, 2]
"""
num_boxes = dt_boxes.shape[0]
sorted_boxes = sorted(dt_boxes, key=lambda x: (x[0][1], x[0][0]))
_boxes = list(sorted_boxes)
for i in range(num_boxes - 1):
if abs(_boxes[i + 1][0][1] - _boxes[i][0][1]) < 10 and \
(_boxes[i + 1][0][0] < _boxes[i][0][0]):
tmp = _boxes[i]
_boxes[i] = _boxes[i + 1]
_boxes[i + 1] = tmp
return _boxes
def do_predict(args):
"""
:param args:
:return: 返回(image_file, newdt, rec_res)组成的列表,image_file是图片,newdt是文字的box坐标,rec_res是识别结果和置信度组成的元祖
"""
image_file_list = get_image_file_list(args.image_dir)
text_sys = TextSystem(args)
is_visualize = True
font_path = args.vis_font_path
#把所有的图片的名字,bbox,识别结果放在一个tuple里面返回
images_result = []
for image_file in image_file_list:
img, flag = check_and_read_gif(image_file)
if not flag:
img = cv2.imread(image_file)
if img is None:
logger.info("error in loading image:{}".format(image_file))
continue
starttime = time.time()
# dt_boxes 是一个列表,列表中每个元素时一个bbox坐标,格式是,每个点是x,y,每个点的位置是[左上角点,右上角点,右下角点,左下角点] [[171. 93.], [626. 93.], [626. 139.], [171. 139.]]
# rec_res 是识别结果和置信度的元祖组成的列表, 其中的一个元素是['为你定制元气美肌', 0.9992783]
dt_boxes, rec_res = text_sys(img)
elapse = time.time() - starttime
print("Predict time of %s: %.3fs" % (image_file, elapse))
drop_score = 0.5
dt_num = len(dt_boxes)
for dno in range(dt_num):
text, score = rec_res[dno]
if score >= drop_score:
text_str = "%s, %.3f" % (text, score)
print(text_str)
if is_visualize:
# 是否可视化
image = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
boxes = dt_boxes
txts = [rec_res[i][0] for i in range(len(rec_res))]
scores = [rec_res[i][1] for i in range(len(rec_res))]
draw_img = draw_ocr_box_txt(
image,
boxes,
txts,
scores,
drop_score=drop_score,
font_path=font_path)
draw_img_save = "./inference_results/"
if not os.path.exists(draw_img_save):
os.makedirs(draw_img_save)
cv2.imwrite(
os.path.join(draw_img_save, os.path.basename(image_file)),
draw_img[:, :, ::-1])
print("The visualized image saved in {}".format(
os.path.join(draw_img_save, os.path.basename(image_file))))
#dt的numpy改成列表格式
newdt = [i.tolist() for i in dt_boxes]
# 把置信度float32改成字符串格式,方便以后json dumps
rec_res = [[rec[0],str(rec[1])] for rec in rec_res]
images_result.append((image_file, newdt, rec_res))
return images_result
@app.route("/api/base64", methods=['POST'])
def b64():
"""
接收用户调用paddle ocr api接口
返回json格式的预测结果
:param contents, 提供的文件内容,是一个列表结构, base64格式
:return: json格式 [{name:图片名称,label: 分类结果,content: 识别内容},{name:图片名称,label: 分类结果,content: 识别内容}]
"""
save_path = '/tmp/'
jsonres = request.get_json()
#images是图片的base64格式
images= jsonres.get('images', None)
#图片的名字列表
names= jsonres.get('names', None)
#创建一个目录为这次请求,图片保存到这个目录下,识别结果也是从这里拿到
image_dir = os.path.join(save_path,datetime.now().strftime('%Y%m%d%H%M%S%f'))
if not os.path.exists(image_dir):
os.mkdir(image_dir)
for name,image in zip(names,images):
name = os.path.basename(name)
file_timestamp = datetime.now().strftime('%Y%m%d%H%M%S%f')
image_path = os.path.join(image_dir, "ocr{}_{}".format(file_timestamp,name))
#解压成base64, 保存到本地
file = base64.b64decode(image)
with open(image_path, "wb") as f:
f.write(file)
args = parse_args()
#识别图片的路径
args.image_dir = image_dir
args.det_model_dir = "inference/ch_ppocr_mobile_v2.0_det_infer/"
args.rec_model_dir = "inference/ch_ppocr_mobile_v2.0_rec_infer/"
args.cls_model_dir = "inference/ch_ppocr_mobile_v2.0_cls_infer/"
args.use_angle_cls = True
args.use_space_char = True
#不画出结果图
# args.is_visualize = False
images_result = do_predict(args)
results = {"content": images_result}
return jsonify(results)
@app.route("/api/path", methods=['POST'])
def path():
"""
传过来给定图片的路径即可,需要绝对路径
:return:
:rtype:
"""
jsonres = request.get_json()
#images是图片的路径
image_dir= jsonres.get('images', None)
#识别图片的路径
image_file_list = get_image_file_list(image_dir)
#把所有的图片的名字,bbox,识别结果放在一个tuple里面返回
images_result = []
for image_file in image_file_list:
img, flag = check_and_read_gif(image_file)
if not flag:
img = cv2.imread(image_file)
if img is None:
logger.info("error in loading image:{}".format(image_file))
continue
starttime = time.time()
# dt_boxes 是一个列表,列表中每个元素时一个bbox坐标,格式是,每个点是x,y,每个点的位置是[左上角点,右上角点,右下角点,左下角点] [[171. 93.], [626. 93.], [626. 139.], [171. 139.]]
# rec_res 是识别结果和置信度的元祖组成的列表, 其中的一个元素是['为你定制元气美肌', 0.9992783]
dt_boxes, rec_res = text_sys(img)
elapse = time.time() - starttime
print("Predict time of %s: %.3fs" % (image_file, elapse))
# drop_score = 0.5
# dt_num = len(dt_boxes)
# for dno in range(dt_num):
# text, score = rec_res[dno]
# if score >= drop_score:
# text_str = "%s, %.3f" % (text, score)
# print(text_str)
# dt的numpy改成列表格式
every_res = []
for rec, dt in zip(rec_res,dt_boxes):
dt = dt.tolist()
one_res = {
"words": rec[0],
"confidence": str(rec[1]),
"left_top": dt[0],
"right_top": dt[1],
"right_bottom":dt[2],
"left_bottom":dt[3],
}
every_res.append(one_res)
one_data = {
"image_name": image_file,
"ocr_result": every_res
}
images_result.append(one_data)
return jsonify(images_result)
if __name__ == "__main__":
args = utility.parse_args()
args.det_model_dir = "inference/ch_ppocr_mobile_v2.0_det_infer"
args.rec_model_dir = "inference/ch_ppocr_mobile_v2.0_rec_infer"
args.cls_model_dir = "inference/ch_ppocr_mobile_v2.0_cls_infer"
args.use_angle_cls = True
args.use_space_char = True
text_sys = TextSystem(args)
app.run(host='0.0.0.0', port=6688, debug=False, threaded=True)
| [
"os.mkdir",
"tools.infer.utility.draw_ocr_box_txt",
"cv2.getPerspectiveTransform",
"base64.b64decode",
"flask.jsonify",
"numpy.rot90",
"numpy.linalg.norm",
"os.path.join",
"flask.request.get_json",
"sys.path.append",
"os.path.abspath",
"cv2.warpPerspective",
"ppocr.utils.utility.get_image_fi... | [((218, 242), 'sys.path.append', 'sys.path.append', (['__dir__'], {}), '(__dir__)\n', (233, 242), False, 'import sys\n'), ((785, 800), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (790, 800), False, 'from flask import Flask, request, jsonify, abort\n'), ((811, 823), 'ppocr.utils.logging.get_logger', 'get_logger', ([], {}), '()\n', (821, 823), False, 'from ppocr.utils.logging import get_logger\n'), ((191, 216), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (206, 216), False, 'import os\n'), ((4562, 4597), 'ppocr.utils.utility.get_image_file_list', 'get_image_file_list', (['args.image_dir'], {}), '(args.image_dir)\n', (4581, 4597), False, 'from ppocr.utils.utility import get_image_file_list, check_and_read_gif\n'), ((7125, 7143), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (7141, 7143), False, 'from flask import Flask, request, jsonify, abort\n'), ((8280, 8296), 'flask.jsonify', 'jsonify', (['results'], {}), '(results)\n', (8287, 8296), False, 'from flask import Flask, request, jsonify, abort\n'), ((8432, 8450), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (8448, 8450), False, 'from flask import Flask, request, jsonify, abort\n'), ((8547, 8577), 'ppocr.utils.utility.get_image_file_list', 'get_image_file_list', (['image_dir'], {}), '(image_dir)\n', (8566, 8577), False, 'from ppocr.utils.utility import get_image_file_list, check_and_read_gif\n'), ((10139, 10161), 'flask.jsonify', 'jsonify', (['images_result'], {}), '(images_result)\n', (10146, 10161), False, 'from flask import Flask, request, jsonify, abort\n'), ((10202, 10222), 'tools.infer.utility.parse_args', 'utility.parse_args', ([], {}), '()\n', (10220, 10222), True, 'import tools.infer.utility as utility\n'), ((275, 305), 'os.path.join', 'os.path.join', (['__dir__', '"""../.."""'], {}), "(__dir__, '../..')\n", (287, 305), False, 'import os\n'), ((910, 940), 'tools.infer.predict_det.TextDetector', 'predict_det.TextDetector', (['args'], {}), '(args)\n', (934, 940), True, 'import tools.infer.predict_det as predict_det\n'), ((972, 1004), 'tools.infer.predict_rec.TextRecognizer', 'predict_rec.TextRecognizer', (['args'], {}), '(args)\n', (998, 1004), True, 'import tools.infer.predict_rec as predict_rec\n'), ((1917, 2019), 'numpy.float32', 'np.float32', (['[[0, 0], [img_crop_width, 0], [img_crop_width, img_crop_height], [0,\n img_crop_height]]'], {}), '([[0, 0], [img_crop_width, 0], [img_crop_width, img_crop_height],\n [0, img_crop_height]])\n', (1927, 2019), True, 'import numpy as np\n'), ((2088, 2132), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['points', 'pts_std'], {}), '(points, pts_std)\n', (2115, 2132), False, 'import cv2\n'), ((2151, 2274), 'cv2.warpPerspective', 'cv2.warpPerspective', (['img', 'M', '(img_crop_width, img_crop_height)'], {'borderMode': 'cv2.BORDER_REPLICATE', 'flags': 'cv2.INTER_CUBIC'}), '(img, M, (img_crop_width, img_crop_height), borderMode=\n cv2.BORDER_REPLICATE, flags=cv2.INTER_CUBIC)\n', (2170, 2274), False, 'import cv2\n'), ((4809, 4839), 'ppocr.utils.utility.check_and_read_gif', 'check_and_read_gif', (['image_file'], {}), '(image_file)\n', (4827, 4839), False, 'from ppocr.utils.utility import get_image_file_list, check_and_read_gif\n'), ((5039, 5050), 'time.time', 'time.time', ([], {}), '()\n', (5048, 5050), False, 'import time\n'), ((7392, 7417), 'os.path.exists', 'os.path.exists', (['image_dir'], {}), '(image_dir)\n', (7406, 7417), False, 'import os\n'), ((7427, 7446), 'os.mkdir', 'os.mkdir', (['image_dir'], {}), '(image_dir)\n', (7435, 7446), False, 'import os\n'), ((7503, 7525), 'os.path.basename', 'os.path.basename', (['name'], {}), '(name)\n', (7519, 7525), False, 'import os\n'), ((7719, 7742), 'base64.b64decode', 'base64.b64decode', (['image'], {}), '(image)\n', (7735, 7742), False, 'import base64\n'), ((8698, 8728), 'ppocr.utils.utility.check_and_read_gif', 'check_and_read_gif', (['image_file'], {}), '(image_file)\n', (8716, 8728), False, 'from ppocr.utils.utility import get_image_file_list, check_and_read_gif\n'), ((8928, 8939), 'time.time', 'time.time', ([], {}), '()\n', (8937, 8939), False, 'import time\n'), ((1119, 1151), 'tools.infer.predict_cls.TextClassifier', 'predict_cls.TextClassifier', (['args'], {}), '(args)\n', (1145, 1151), True, 'import tools.infer.predict_cls as predict_cls\n'), ((2456, 2473), 'numpy.rot90', 'np.rot90', (['dst_img'], {}), '(dst_img)\n', (2464, 2473), True, 'import numpy as np\n'), ((2647, 2712), 'cv2.imwrite', 'cv2.imwrite', (["('./output/img_crop_%d.jpg' % bno)", 'img_crop_list[bno]'], {}), "('./output/img_crop_%d.jpg' % bno, img_crop_list[bno])\n", (2658, 2712), False, 'import cv2\n'), ((3130, 3158), 'copy.deepcopy', 'copy.deepcopy', (['dt_boxes[bno]'], {}), '(dt_boxes[bno])\n', (3143, 3158), False, 'import copy\n'), ((4879, 4901), 'cv2.imread', 'cv2.imread', (['image_file'], {}), '(image_file)\n', (4889, 4901), False, 'import cv2\n'), ((5315, 5326), 'time.time', 'time.time', ([], {}), '()\n', (5324, 5326), False, 'import time\n'), ((5959, 6051), 'tools.infer.utility.draw_ocr_box_txt', 'draw_ocr_box_txt', (['image', 'boxes', 'txts', 'scores'], {'drop_score': 'drop_score', 'font_path': 'font_path'}), '(image, boxes, txts, scores, drop_score=drop_score,\n font_path=font_path)\n', (5975, 6051), False, 'from tools.infer.utility import draw_ocr_box_txt\n'), ((8768, 8790), 'cv2.imread', 'cv2.imread', (['image_file'], {}), '(image_file)\n', (8778, 8790), False, 'import cv2\n'), ((9204, 9215), 'time.time', 'time.time', ([], {}), '()\n', (9213, 9215), False, 'import time\n'), ((1645, 1682), 'numpy.linalg.norm', 'np.linalg.norm', (['(points[0] - points[1])'], {}), '(points[0] - points[1])\n', (1659, 1682), True, 'import numpy as np\n'), ((1700, 1737), 'numpy.linalg.norm', 'np.linalg.norm', (['(points[2] - points[3])'], {}), '(points[2] - points[3])\n', (1714, 1737), True, 'import numpy as np\n'), ((1804, 1841), 'numpy.linalg.norm', 'np.linalg.norm', (['(points[0] - points[3])'], {}), '(points[0] - points[3])\n', (1818, 1841), True, 'import numpy as np\n'), ((1859, 1896), 'numpy.linalg.norm', 'np.linalg.norm', (['(points[1] - points[2])'], {}), '(points[1] - points[2])\n', (1873, 1896), True, 'import numpy as np\n'), ((5738, 5774), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (5750, 5774), False, 'import cv2\n'), ((6215, 6244), 'os.path.exists', 'os.path.exists', (['draw_img_save'], {}), '(draw_img_save)\n', (6229, 6244), False, 'import os\n'), ((6262, 6288), 'os.makedirs', 'os.makedirs', (['draw_img_save'], {}), '(draw_img_save)\n', (6273, 6288), False, 'import os\n'), ((7338, 7352), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7350, 7352), False, 'from datetime import datetime\n'), ((7551, 7565), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7563, 7565), False, 'from datetime import datetime\n'), ((6358, 6386), 'os.path.basename', 'os.path.basename', (['image_file'], {}), '(image_file)\n', (6374, 6386), False, 'import os\n'), ((6532, 6560), 'os.path.basename', 'os.path.basename', (['image_file'], {}), '(image_file)\n', (6548, 6560), False, 'import os\n')] |
import os
import io
import glob
import errno
import json
import numpy as np
import scipy.interpolate as spi
import scipy.optimize as spo
import matplotlib.pyplot as plt
import scipy.ndimage as ndimage
np.set_printoptions(linewidth=500)
def fvb_crr(raw_array, offset=0, medianRatio=1, noiseCoeff=5, debugging=False):
"""
Remove cosmic rays from a sequency of identical exposures
:param raw_array: The array to be cleaned. Successive spectra should
be the columns (i.e. 1600 x n) of the raw_array
:param offset: baseline to add to raw_array.
Not used, but here if it's needed in the future
:param medianRatio: Multiplier to the median when deciding a cutoff
:param noiseCoeff: Multiplier to the noise on the median
May need changing for noisy data
:return:
"""
d = np.array(raw_array)
med = ndimage.filters.median_filter(d, size=(1, d.shape[1]), mode='wrap')
med = np.median(d, axis=1).reshape(d.shape[0], 1)
if debugging:
print("shape of median filter:", med.shape)
meanMedian = med.mean(axis=1)
# meanMedian = med.copy()
if debugging:
print("shape of meaned median filter:", meanMedian.shape)
# Construct a cutoff for each pixel. It was kind of guess and
# check
cutoff = meanMedian * medianRatio + noiseCoeff * np.std(meanMedian[-100:])
if debugging:
print("shape of cutoff criteria:", cutoff.shape)
import pyqtgraph as pg
winlist = []
app = pg.QtGui.QApplication([])
win = pg.GraphicsLayoutWidget()
win.setWindowTitle("Raw Image")
p1 = win.addPlot()
img = pg.ImageItem()
img.setImage(d.copy().T)
p1.addItem(img)
hist = pg.HistogramLUTItem()
hist.setImageItem(img)
win.addItem(hist)
win.nextRow()
p2 = win.addPlot(colspan=2)
p2.setMaximumHeight(250)
p2.addLegend()
for i, v in enumerate(d.T):
p2.plot(v, pen=(i, d.shape[1]), name=str(i))
p2.plot(np.sum(d, axis=1), pen=pg.mkPen('w', width=3))
win.show()
winlist.append(win)
win2 = pg.GraphicsLayoutWidget()
win2.setWindowTitle("Median Image")
p1 = win2.addPlot()
img = pg.ImageItem()
img.setImage(med.T)
p1.addItem(img)
hist = pg.HistogramLUTItem()
hist.setImageItem(img)
win2.addItem(hist)
win2.nextRow()
p2 = win2.addPlot(colspan=2)
p2.setMaximumHeight(250)
p2.plot(np.sum(med, axis=1) / d.shape[1])
win2.show()
winlist.append(win2)
win2 = pg.GraphicsLayoutWidget()
win2.setWindowTitle("d-m")
p1 = win2.addPlot()
img = pg.ImageItem()
img.setImage((d - med).T)
p1.addItem(img)
hist = pg.HistogramLUTItem()
hist.setImageItem(img)
win2.addItem(hist)
win2.nextRow()
p2 = win2.addPlot(colspan=2)
p2.setMaximumHeight(250)
p2.addLegend()
for i, v in enumerate((d - med).T):
p2.plot(v, pen=(i, d.shape[1]), name=str(i))
p2.plot(cutoff, pen=pg.mkPen('w', width=3))
win2.show()
winlist.append(win2)
# Find the bad pixel positions
# Note the [:, None] - needed to cast the correct shapes
badPixs = np.argwhere((d - med) > (cutoff.reshape(len(cutoff), 1)))
for pix in badPixs:
# get the other pixels in the row which aren't the cosmic
if debugging:
print("cleaning pixel", pix)
p = d[pix[0], [i for i in range(d.shape[1]) if not i == pix[1]]]
if debugging:
print("\tRemaining pixels in row are", p)
# Replace the cosmic by the average of the others
# Could get hairy if more than one cosmic per row.
# Maybe when doing many exposures?
d[pix[0], pix[1]] = np.mean(p)
if debugging:
win = pg.GraphicsLayoutWidget()
win.setWindowTitle("Clean Image")
p1 = win.addPlot()
img = pg.ImageItem()
img.setImage(d.copy().T)
p1.addItem(img)
hist = pg.HistogramLUTItem()
hist.setImageItem(img)
win.addItem(hist)
win.nextRow()
p2 = win.addPlot(colspan=2)
p2.setMaximumHeight(250)
p2.plot(np.sum(d, axis=1))
win.show()
winlist.append(win)
app.exec_()
return np.array(d)
def stitchData(dataList, plot=False):
"""
Attempt to stitch together absorbance data. Will translate the second data
set to minimize leastsq between the two data sets.
:param dataList: Iterable of the data sets to be fit. Currently
it only takes the first two elements of the list, but should be
fairly straightforward to recursivly handle a list>2. Shifts the
second data set to overlap the first elements of dataList can be
either np.arrays or Absorbance class, where it will take the
proc_data itself
:param plot: bool whether or not you want the fit iterations to be plotted
(for debugging)
:return: a, a (2,) np.array of the shift
"""
# Data coercsion, make sure we know what we're working wtih
first = dataList[0]
if isinstance(first, Absorbance):
first = first.proc_data
second = dataList[1]
if isinstance(second, Absorbance):
second = second.proc_data
if plot:
# Keep a reference to whatever plot is open at call-time
# Useful if the calling script has plots before and after, as
# omitting this will cause future plots to be added to figures here
firstFig = plt.gcf()
plt.figure("Stitcher")
# Plot the raw input data
plt.plot(*first.T)
plt.plot(*second.T)
# Algorithm is set up such that the "second" data set spans the
# higher domain than first. Need to enforce this, and remember it
# so the correct shift is applied
flipped = False
if max(first[:, 0]) > max(second[:, 0]):
flipped = True
first, second = second, first
def stitchData(dataList, plot=False):
"""
Attempt to stitch together absorbance data. Will translate the second data
set to minimize leastsq between the two data sets.
:param dataList: Iterable of the data sets to be fit. Currently
it only takes the first two elements of the list, but should be
fairly straightforward to recursivly handle a list>2. Shifts the
second data set to overlap the first elements of dataList can be
either np.arrays or Absorbance class, where it will take the
proc_data itself.
:param plot: bool whether or not you want the fit iterations to be plotted
(for debugging)
:return: a, a (2,) np.array of the shift
"""
# Data coercsion, make sure we know what we're working wtih
first = dataList[0]
if isinstance(first, Absorbance):
first = first.proc_data
second = dataList[1]
if isinstance(second, Absorbance):
second = second.proc_data
if plot:
# Keep a reference to whatever plot is open at call-time
# Useful if the calling script has plots before and after, as
# omitting this will cause future plots to be added to figures here
firstFig = plt.gcf()
plt.figure("Stitcher")
# Plot the raw input data
plt.plot(*first.T)
plt.plot(*second.T)
# Algorithm is set up such that the "second" data set spans the
# higher domain than first. Need to enforce this, and remember it
# so the correct shift is applied
flipped = False
if max(first[:, 0]) > max(second[:, 0]):
flipped = True
first, second = second, first
def fitter(p, shiftable, immutable):
"""
Defines the function used in spo.leastsq to stitch data
Input:
p = Tuple of the the shifts for x and y
shiftable = Data set that is being shifted
immutable = Data set that is not being shifted
Returns:
Array to be minimized by spo.leastsq
"""
# designed to over
# Get the shifts
dx = p[0]
dy = p[1]
# Don't want pass-by-reference nonsense, recast our own refs
shiftable = np.array(shiftable)
immutable = np.array(immutable)
# Shift the data set
shiftable[:, 1] += dy
shiftable[:, 0] += dx
# Create an interpolator. We want a
# direct comparision for subtracting the two functions
# Different spec grating positions have different wavelengths
# so they're not directly comparable.
shiftF = spi.interp1d(*shiftable.T)
# Find the bounds of where the two data sets overlap
overlap = (min(shiftable[:, 0]), max(immutable[:, 0]))
print("overlap", overlap)
# Determine the indices of the immutable function
# where it overlaps. argwhere returns 2-d thing,
# requiring the [0] at the end of each call
fOlIdx = (min(np.argwhere(immutable[:, 0] >= overlap[0]))[0],
max(np.argwhere(immutable[:, 0] <= overlap[1]))[0])
print("fOlIdx", fOlIdx)
# Get the interpolated values of the shiftable function at the same
# x-coordinates as the immutable case
newShift = shiftF(immutable[fOlIdx[0]:fOlIdx[1], 0])
if plot:
plt.plot(
*immutable[fOlIdx[0]:fOlIdx[1], :].T, marker='o',
label="imm", markersize=10
)
plt.plot(
immutable[fOlIdx[0]:fOlIdx[1], 0], newShift, marker='o',
label="shift"
)
imm = immutable[fOlIdx[0]:fOlIdx[1], 1]
shift = newShift
return imm - shift
a, _, _, msg, err = spo.leastsq(
fitter, [0.0001, 0.01 * max(first[:, 1])],
args=(second, first), full_output=1
)
if plot:
# Revert back to the original figure, as per top comments
plt.figure(firstFig.number)
# Need to invert the shift if we flipped which
# model we're supposed to move
if flipped:
a *= -1
return a
def save_parameter_sweep_no_sb(
spectrum_list, file_name, folder_str, param_name, unit, verbose=False):
"""
This function will take a fully processed list of spectrum objects and
slice Spectrum.sb_fits appropriately to get an output like:
"Parameter"|SB1 freq|err|SB1 amp|error|SB1 linewidth|error|SB2...|SBn...|
param1 | . |
param2 | . |
.
.
.
Currently I'm thinking fuck the offset y0
After constructing this large matrix, it will save it somewhere.
"""
spectrum_list.sort(key=lambda x: x.parameters[param_name])
included_spectra = dict()
param_array = None
sb_included = []
for spec in spectrum_list:
sb_included = sorted(
list(
set(
sb_included + list(spec.full_dict.keys())
)
)
)
included_spectra[spec.fname.split('/')[-1]] = \
spec.parameters[param_name]
# If these are from summed spectra, then only the the first file name
# from that sum will show up here, which should be fine?
if verbose:
# print "full name:", spectrum_list[0].fname
print("included names:", included_spectra)
print("sb_included:", sb_included)
for spec in spectrum_list:
# This is different from full_dict in that the list has the
# sideband order as the zeroth element.
temp_dict = {}
if verbose:
print("the sb_results:", spec.sb_results)
if spec.sb_results.ndim == 1:
continue
for index in range(len(spec.sb_results[:, 0])):
if verbose:
print("my array slice:", spec.sb_results[index, :])
temp_dict[int(round(spec.sb_results[index, 0]))] = np.array(
spec.sb_results[index, 1:])
if verbose:
print(temp_dict)
for sb in sb_included:
blank = np.zeros(6)
if sb not in temp_dict:
temp_dict[sb] = blank
# Why is this try-except here?
# (9-2020) Unsure when this was asked, still worth answering
try:
spec_data = np.array([float(spec.parameters[param_name])])
# TODO: ensure exception is not being used as control logic
except Exception:
spec_data = np.array([float(spec.parameters[param_name][:2])])
for key in sorted(temp_dict.keys()):
spec_data = np.hstack((spec_data, temp_dict[key]))
try:
param_array = np.vstack((param_array, spec_data))
# TODO: ensure exception is not being used as control logic
except Exception:
param_array = np.array(spec_data)
if verbose:
print("The shape of the param_array is:", param_array.shape)
'''
param_array_norm = np.array(param_array).T # python iterates over rows
for elem in [x for x in xrange(len(param_array_norm)) if (x-1)%7 == 3]:
temp_max = np.max(param_array_norm[elem])
param_array_norm[elem] = param_array_norm[elem] / temp_max
param_array_norm[elem + 1] = param_array_norm[elem + 1] / temp_max
'''
snipped_array = param_array[:, 0]
norm_array = param_array[:, 0]
if verbose:
print("Snipped_array is", snipped_array)
for ii in range(len(param_array.T)):
if (ii - 1) % 6 == 0:
if verbose:
print("param_array shape", param_array[:, ii])
snipped_array = np.vstack((snipped_array, param_array[:, ii]))
norm_array = np.vstack((norm_array, param_array[:, ii]))
elif (ii - 1) % 6 == 2:
snipped_array = np.vstack((snipped_array, param_array[:, ii]))
temp_max = np.max(param_array[:, ii])
norm_array = np.vstack((norm_array, param_array[:, ii] / temp_max))
elif (ii - 1) % 6 == 3:
snipped_array = np.vstack((snipped_array, param_array[:, ii]))
norm_array = np.vstack((norm_array, param_array[:, ii] / temp_max))
snipped_array = snipped_array.T
norm_array = norm_array.T
try:
os.mkdir(folder_str)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
norm_name = file_name + '_norm.txt'
snip_name = file_name + '_snip.txt'
file_name = file_name + '.txt'
try:
included_spectra_str = json.dumps(
included_spectra, sort_keys=True, indent=4, separators=(',', ': ')
)
# TODO: ensure exception is not used as control logic, it appears it isn't
# if exception is not, narrow from Exception class to specific error class
except Exception:
print("Source: save_parameter_sweep\nJSON FAILED")
return
included_spectra_str = included_spectra_str.replace('\n', '\n#')
included_spectra_str += '\n#' * (99 - included_spectra_str.count('\n'))
origin_import1 = param_name
origin_import2 = unit
origin_import3 = ""
for order in sb_included:
origin_import1 += \
"Frequency,error,Sideband strength,error,Linewidth,error"
origin_import2 += ",eV,,arb. u.,,meV,"
origin_import3 += ",{0},,{0},,{0},".format(order)
origin_total = \
origin_import1 + "\n" + origin_import2 + "\n" + origin_import3
origin_import1 = param_name
origin_import2 = unit
origin_import3 = ""
for order in sb_included:
origin_import1 += ",Frequency,Sideband strength,error"
origin_import2 += ",eV,arb. u.,"
origin_import3 += ",{0},{0},".format(order)
origin_snip = \
origin_import1 + "\n" + origin_import2 + "\n" + origin_import3
header_total = '#' + included_spectra_str + '\n' + origin_total
header_snip = '#' + included_spectra_str + '\n' + origin_snip
if verbose:
print("the param_array is:", param_array)
np.savetxt(
os.path.join(folder_str, file_name), param_array, delimiter=',',
header=header_total, comments='', fmt='%0.6e'
)
np.savetxt(
os.path.join(folder_str, snip_name), snipped_array, delimiter=',',
header=header_snip, comments='', fmt='%0.6e'
)
np.savetxt(
os.path.join(folder_str, norm_name), norm_array, delimiter=',',
header=header_snip, comments='', fmt='%0.6e'
)
if verbose:
print("Saved the file.\nDirectory: {}".format(
os.path.join(folder_str, file_name)
)
)
def save_parameter_sweep(
spectrum_list, file_name, folder_str, param_name, unit,
wanted_indices=[1, 3, 4], skip_empties=False, verbose=False,
header_dict={}, only_even=False
):
"""
This function will take a fully processed list of spectrum objects and
slice Spectrum.sb_fits appropriately to get an output like:
"Parameter"|SB1 freq|err|SB1 amp|error|SB1 linewidth|error|SB2...|SBn...|
param1 | . |
param2 | . |
.
.
.
Currently I'm thinking fuck the offset y0
After constructing this large matrix, it will save it somewhere.
Thus function has been update to pass a list of indices to slice for the
return values
skip_empties: If False, will add a row of zeroes for the parameter even if
no sidebands are found. If True, will not add a line for that parameter
only_even: don't include odd orders in the saved sweep
[sb number, Freq (eV), Freq error (eV), Gauss area (arb.), Area error,
Gauss linewidth (eV), Linewidth error (eV)]
[ 0 , 1 , 2, , 3 , 4 ,
5 , 6 ]
"""
if isinstance(param_name, list):
# if you pass two things because the param you want
# is in a dict (e.g. field strength has mean/std)
# do it that way
param_name_list = list(param_name) # keep reference to old one
paramGetter = lambda x: x.parameters[param_name_list[0]][param_name_list[1]]
# Keep the name for labeling things later on
param_name = param_name[0]
else:
paramGetter = lambda x: x.parameters[param_name]
# Sort all of the spectra based on the desired key
spectrum_list.sort(key=paramGetter)
# keep track of which file name corresponds to which parameter
# which gets put in
included_spectra = dict()
# The big array which will be stacked up to keep all of the sideband
# details vs desired parameter
param_array = None
# list of which sidebands are seen throughout.
sb_included = []
# how many parameters (area, strength, linewidth, pos, etc.) are there?
# Here incase software changes and more things are kept in
# sb results. Needed to handle how to slice the arrays
try:
num_params = spectrum_list[0].sb_results.shape[1]
except IndexError:
# There's a file with only 1 sb and it happens to be first
# in the list.
num_params = spectrum_list[0].sb_results.shape[0]
except AttributeError:
# The first file has no sidebands, so just hardcode it,
# as stated below.
num_params = 0
# Rarely, there's an issue where I'm doing some testing and there's a set
# where the first file has no sidebands in it, so the above thing returns 0
# It seems really silly to do a bunch of testing to try and correct for
# that, so I'm going to hardcode the number of parameters.
if num_params == 0:
num_params = 7
# loop through all of them once to figure out which sidebands are seen
# in all spectra
for spec in spectrum_list:
try:
# use sets to keep track of only unique sidebands
sb_included = sorted(
list(
set(
sb_included + list(spec.full_dict.keys())
)
)
)
except AttributeError:
print("No full dict?", spec.fname)
print(spec.sb_list)
# If these are from summed spectra, then only the the first file name
# from that sum will show up here, which should be fine?
included_spectra[spec.fname.split('/')[-1]] = paramGetter(spec)
if only_even:
sb_included = [ii for ii in sb_included if not ii % 2]
if verbose:
print("included names:", included_spectra)
print("sb_included:", sb_included)
for spec in spectrum_list:
# Flag to keep whethere there are no sidebands or not. Used to skip
# issues when trying to index on empty arrays
noSidebands = False
if verbose:
print("the sb_results:", spec.sb_results)
# if no sidebands were found, skip this one
try:
# TODO: (08/14/18) the .ndim==1 isn't the correct check, since it
# fails when looking at the laser line. Need to test this with a
# real empty data set, vs data set with 1 sb
#
#
# (08/28/18) I'm not sure what the "not spec" is trying to handle
# spec.sb_results is None occurs when _no_ sidebands were fit
# spec.sb_results.ndim == 1 happens when only one sideband is found
if not spec or spec.sb_results is None \
or spec.sb_results.ndim == 1:
if spec.sb_results is None:
# Flag no sidebands are afound
noSidebands = True
elif spec.sb_results[0] == 0:
# Cast it to 2d to allow slicing later on. Not sure why
# this is only done if the laser line is the one found.
spec.sb_results = np.atleast_2d(spec.sb_results)
elif skip_empties:
continue
else:
noSidebands = True
except (AttributeError, TypeError):
raise
# Make an sb_results of all zeroes where we'll fill
# in the sideband info we found
new_spec = np.zeros((len(sb_included), num_params))
if not noSidebands:
sb_results = spec.sb_results.copy()
saw_sbs = sb_results[:, 0]
found_sb = sorted(list(set(sb_included) & set(saw_sbs)))
found_idx = [sb_included.index(ii) for ii in found_sb]
try:
new_spec[:, 0] = sb_included
# TODO: ensure Exception is not being used as control structure
# if it is not, narrow Exception class to specific error class
except Exception:
print("new_spec", new_spec)
raise
try:
if only_even:
new_spec[found_idx, :] = sb_results[
sb_results[:, 0] % 2 == 0]
else:
new_spec[found_idx, :] = sb_results
except ValueError:
print(spec.fname)
print("included:", sb_included)
print("found:", found_sb, found_idx)
print(new_spec.shape, sb_results.shape)
print(sb_results)
print(new_spec)
raise
spec_data = np.insert(new_spec.flatten(), 0, float(paramGetter(spec)))
try:
param_array = np.row_stack((param_array, spec_data))
# TODO: ensure Exception is not being used as control structure
# if it is not, narrow Exception class to specific error class
except Exception:
param_array = np.array(spec_data)
# if you only pass one spectra
if param_array.ndim == 1:
# recast it to 2D for slicing the indices we want from the param array
# from the passed argument
param_array = param_array[None, :]
snip = wanted_indices
N = len(sb_included)
# run it out across all of the points across the param_array
snipped_indices = [0] + list(
1+np.array(snip * N) + num_params
* np.array(sorted(list(range(N)) * len(snip)))
)
snipped_array = param_array[:, snipped_indices]
norm_array = snipped_array.copy()
# normalize the area if it's requested
if 3 in snip:
num_snip = len(snip)
strength_idx = snip.index(3)
if 4 in snip:
# normalize error first if it was requested
idx = snip.index(4)
norm_array[:, 1 + idx + np.arange(N) * num_snip] /= \
norm_array[
:,
1 + strength_idx + np.arange(N) * num_snip
].max(axis=0)
strength_idx = snip.index(3)
norm_array[:, 1+strength_idx+np.arange(N)*num_snip] /= \
norm_array[
:,
1+strength_idx+np.arange(N)*num_snip
].max(axis=0)
try:
os.mkdir(folder_str)
except TypeError:
# if you pass None as folder_str (for using byteIO)
pass
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
included_spectra.update(header_dict)
try:
included_spectra_str = json.dumps(
included_spectra, sort_keys=True, indent=4, separators=(',', ': ')
)
# TODO: ensure Exception is not being used as control structure
# if it is not, narrow Exception class to specific error class
except Exception:
print("Source: save_parameter_sweep\nJSON FAILED")
return
included_spectra_str = included_spectra_str.replace('\n', '\n#')
included_spectra_str += '\n#' * (99 - included_spectra_str.count('\n'))
# this will make the header chunk for the full, un-sliced data set
# TODO: fix naming so you aren't looping twice
# 1/9/18 This isn't needed, right? Why isn't it deleted?
origin_import1 = param_name
origin_import2 = unit
origin_import3 = ""
for order in sb_included:
origin_import1 += \
",sideband,Frequency,error,Sideband strength,error,Linewidth,error"
origin_import2 += ",order,eV,eV,arb. u.,arb.u.,meV,meV"
origin_import3 += ",,{0},,{0},,{0},".format(order)
origin_total = origin_import1 + "\n" \
+ origin_import2 + "\n" \
+ origin_import3
# This little chunk will make a chunk block of header strings for the
# sliced data set which can be looped over
origin_import1 = param_name
origin_import2 = unit
origin_import3 = ""
wanted_titles = [
"Sideband", "Frequency", "error", "Sideband strength",
"error", "Linewidth", "error"
]
wanted_units = ["order", "eV", "eV", "arb. u.", "arb. u.", "eV", "eV"]
wanted_comments = ["", "{0}", "", "{0}", "", "{0}", ""]
wanted_titles = ",".join([wanted_titles[ii] for ii in wanted_indices])
wanted_units = ",".join([wanted_units[ii] for ii in wanted_indices])
wanted_comments = ",".join([wanted_comments[ii] for ii in wanted_indices])
for order in sb_included:
origin_import1 += ","+wanted_titles
origin_import2 += ","+wanted_units
origin_import3 += ","+wanted_comments.format(order)
origin_snip = origin_import1 + "\n" \
+ origin_import2 + "\n" \
+ origin_import3
header_total = '#' + included_spectra_str + '\n' + origin_total
header_snip = '#' + included_spectra_str + '\n' + origin_snip
if verbose:
print("the param_array is:", param_array)
if isinstance(file_name, list):
if isinstance(file_name[0], io.BytesIO):
np.savetxt(file_name[0], param_array, delimiter=',',
header=header_total, comments='', fmt='%0.6e')
np.savetxt(file_name[1], snipped_array, delimiter=',',
header=header_snip, comments='', fmt='%0.6e')
np.savetxt(file_name[2], norm_array, delimiter=',',
header=header_snip, comments='', fmt='%0.6e')
# Need to reset the file position if you want to read them
# immediately
# Is it better to do that here, or assume you'll do it later?
# I'm gonna assume here, because I can't currently think of a time
# when I'd want to be at the end of the file
[ii.seek(0) for ii in file_name]
if verbose:
print("Saved the file to bytes objects")
else:
if file_name:
norm_name = file_name + '_norm.txt'
snip_name = file_name + '_snip.txt'
file_name = file_name + '.txt'
np.savetxt(
os.path.join(folder_str, file_name), param_array,
delimiter=',', header=header_total, comments='', fmt='%0.6e'
)
np.savetxt(
os.path.join(folder_str, snip_name), snipped_array,
delimiter=',', header=header_snip, comments='', fmt='%0.6e'
)
np.savetxt(
os.path.join(folder_str, norm_name), norm_array,
delimiter=',', header=header_snip, comments='', fmt='%0.6e'
)
if verbose:
print("Saved the file.\nDirectory: {}".format(
os.path.join(folder_str, file_name)
)
)
else:
if verbose:
print("Didn't save")
return sb_included, param_array, snipped_array, norm_array
def save_parameter_sweep_vs_sideband(
spectrum_list, file_name, folder_str, param_name, unit, verbose=False,
wanted_indices=[1, 3, 4]
):
"""
Similar to save_parameter_sweep, but the data[:,0] column is sideband
number instead of series, and each set of columns correspond to a
series step. Pretty much compiles all of the fit parameters from the
files that are already saved and puts it into
one file to keep from polluting the Origin folder
:param spectrum_list:
:param file_name:
:param folder_str:
:param param_name:
:param unit:
:param verbose:
sb number is automatically prepended, so do not include in slicing list
[sb number, Freq (eV), Freq error (eV), Gauss area (arb.), Area error,
Gauss linewidth (eV), Linewidth error (eV)]
[ 0 , 1 , 2, , 3 , 4 ,
5 , 6 ]
:return:
"""
spectrum_list.sort(key=lambda x: x.parameters[param_name])
included_spectra = dict()
param_array = None
sb_included = []
# what parameters were included (for headers)
params = sorted([x.parameters[param_name] for x in spectrum_list])
for spec in spectrum_list:
sb_included = sorted(
list(
set(
sb_included + list(spec.full_dict.keys())
)
)
)
included_spectra[spec.fname.split('/')[-1]] = \
spec.parameters[param_name]
# If these are from summed spectra, then only the the first file name
# from that sum will show up here, which should be fine?
if verbose:
print("included names:", included_spectra)
print("sb_included:", sb_included)
param_array = np.array(sb_included)
for spec in spectrum_list:
temp_dict = spec.full_dict.copy()
# prevent breaking if no sidebands in spectrum
if not temp_dict:
if verbose:
print("No sidebands here? {}, {}".format(
spec.parameters["series"], spec.parameters["spec_step"]
)
)
continue
if verbose:
print(temp_dict)
# matrix for holding all of the sb information
# for a given spectrum
spec_matrix = None
for sb in sb_included:
blank = np.zeros(6)
sb_data = temp_dict.get(sb, blank)
try:
spec_matrix = np.row_stack((spec_matrix, sb_data))
# TODO: ensure Exception is not being used as control structure
# if it is not, narrow Exception class to specific error class
except Exception:
spec_matrix = sb_data
param_array = np.column_stack((param_array, spec_matrix))
# the indices we want from the param array
# 1- freq, 3-area, 4-area error
snip = wanted_indices
N = len(spectrum_list)
# run it out across all of the points across the param_array
snipped_indices = [0] + list(
np.array(snip*N)
+ 6 * np.array(sorted(
list(range(N)) * len(snip))
)
)
snipped_array = param_array[:, snipped_indices]
try:
os.mkdir(folder_str)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
snip_name = file_name + '_snip.txt'
file_name = file_name + '.txt'
try:
included_spectra_str = json.dumps(
included_spectra, sort_keys=True, indent=4, separators=(',', ': ')
)
# TODO: ensure Exception is not being used as control structure
# if it is not, narrow Exception class to specific error class
except Exception:
print("Source: save_parameter_sweep\nJSON FAILED")
return
included_spectra_str = included_spectra_str.replace('\n', '\n#')
included_spectra_str += '\n#' * (99 - included_spectra_str.count('\n'))
origin_import1 = "Sideband"
origin_import2 = "Order"
origin_import3 = "SB"
for param in params:
origin_import1 += \
",Frequency,error,Sideband strength,error,Linewidth,error"
origin_import2 += ",eV,,arb. u.,,meV,"
origin_import3 += ",{0},,{0},,{0},".format(param)
origin_total = origin_import1 + "\n" \
+ origin_import2 + "\n" \
+ origin_import3
# This little chunk will make a chunk block of header strings for the
# sliced data set which can be looped over
origin_import1 = "Sideband"
origin_import2 = "Order"
origin_import3 = "SB"
wanted_titles = [
"Sideband", "Frequency", "error", "Sideband strength", "error",
"Linewidth", "error"
]
wanted_units = ["order", "eV", "eV", "arb. u.", "arb. u.", "eV", "eV"]
wanted_comments = ["", "{0}", "", "{0}", "", "{0}", ""]
wanted_titles = ",".join([wanted_titles[ii] for ii in wanted_indices])
wanted_units = ",".join([wanted_units[ii] for ii in wanted_indices])
wanted_comments = ",".join([wanted_comments[ii] for ii in wanted_indices])
for param in params:
origin_import1 += "," + wanted_titles
origin_import2 += "," + wanted_units
origin_import3 += "," + wanted_comments.format(param)
origin_snip = origin_import1 + "\n" \
+ origin_import2 + "\n" \
+ origin_import3
header_total = '#' + included_spectra_str + '\n' + origin_total
header_snip = '#' + included_spectra_str + '\n' + origin_snip
# print "Spec header: ", spec_header
if verbose:
print("the param_array is:", param_array)
# allow passing false (or empty string) to prevent saving
if file_name:
np.savetxt(
os.path.join(folder_str, file_name), param_array, delimiter=',',
header=header_total, comments='', fmt='%0.6e'
)
np.savetxt(
os.path.join(folder_str, snip_name), snipped_array, delimiter=',',
header=header_snip, comments='', fmt='%0.6e'
)
if verbose:
print("Saved the file.\nDirectory: {}".format(
os.path.join(folder_str, file_name)
)
)
return None
def integrateData(data, t1, t2, ave=False):
"""
Integrate a discrete data set for a
given time period. Sums the data between
the given bounds and divides by dt. Optional
argument to divide by T = t2-t1 for calculating
averages.
data = 2D array. data[:,0] = t, data[:,1] = y
t1 = start of integration
t2 = end of integration
if data is a NxM, with M>=3, it will take the
third column to be the errors of the points,
and return the error as the quadrature sum
"""
t = data[:, 0]
y = data[:, 1]
if data.shape[0] >= 3:
errors = data[:, 2]
else:
errors = np.ones_like(y) * np.nan
gt = set(np.where(t > t1)[0])
lt = set(np.where(t < t2)[0])
# find the intersection of the sets
vals = list(gt & lt)
# Calculate the average
tot = np.sum(y[vals])
error = np.sqrt(np.sum(errors[vals] ** 2))
# Multiply by sampling
tot *= (t[1] - t[0])
error *= (t[1] - t[0])
if ave:
# Normalize by total width if you want an average
tot /= (t2 - t1)
errors /= (t2 - t1)
if not np.isnan(error):
return tot, error
return tot
def get_data_and_header(fname, returnOrigin=False):
"""
Given a file to a raw data file, returns the data
and the json decoded header.
Can choose to return the origin header as well
:param fname: Filename to open
:return: data, header (dict)
"""
with open(fname) as fh:
line = fh.readline()
header_string = ''
while line[0] == '#':
header_string += line[1:]
line = fh.readline()
# image files don't have an origin header
if not "Images" in fname:
oh = line
# last readline in loop removes first line in Origin Header
# strip the remaining two
oh += fh.readline()
# remove final \n
oh += fh.readline()[:-1]
# data = np.genfromtxt(fh, delimiter=',')
data = np.genfromtxt(fname, delimiter=',')
header = json.loads(header_string)
if returnOrigin:
return data, header, oh
return data, header
def natural_glob(*args):
# glob/python sort alphabetically, so 1, 10, 11, .., 2, 21,
# but I sometimes wnat "natural" sorting:
# 1, 2, 3, ..., 10, 11, 12, ..., 20, 21, 21 ...
# There's tons of stack overflows, so I grabbed one of them. I put it in
# here because I use it all the damned time. I also almost always use it
# when glob.glob'ing, so just internally do it that way
#
# This is taken from
# https://stackoverflow.com/questions/5967500/how-to-correctly-sort-a
# -string-with-a-number-inside
import re
def atoi(text):
try:
return int(text)
except ValueError:
return text
def natural_keys(text):
'''
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
'''
return [atoi(c) for c in re.split('(-?\d+)', text)]
return sorted(glob.glob(os.path.join(*args)), key=natural_keys)
def convertTime(timeStr):
"""
The data file headers have the timestamp of data collection. Sometimes you
want to convert that to numbers for data's sake, but I constantly
forget the functions to convert it from the time-stamp string.
So here you go.
:param timeStr: the time as a string from the data file
:return: int of the time since the epoch
"""
import time
return time.mktime(time.strptime(timeStr, "%x %X%p"))
| [
"os.mkdir",
"numpy.sum",
"json.dumps",
"numpy.isnan",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange",
"scipy.interpolate.interp1d",
"os.path.join",
"pyqtgraph.GraphicsLayoutWidget",
"pyqtgraph.QtGui.QApplication",
"numpy.atleast_2d",
"numpy.set_printoptions",
"json.loads",
"num... | [((202, 236), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'linewidth': '(500)'}), '(linewidth=500)\n', (221, 236), True, 'import numpy as np\n'), ((879, 898), 'numpy.array', 'np.array', (['raw_array'], {}), '(raw_array)\n', (887, 898), True, 'import numpy as np\n'), ((910, 977), 'scipy.ndimage.filters.median_filter', 'ndimage.filters.median_filter', (['d'], {'size': '(1, d.shape[1])', 'mode': '"""wrap"""'}), "(d, size=(1, d.shape[1]), mode='wrap')\n", (939, 977), True, 'import scipy.ndimage as ndimage\n'), ((4464, 4475), 'numpy.array', 'np.array', (['d'], {}), '(d)\n', (4472, 4475), True, 'import numpy as np\n'), ((31449, 31470), 'numpy.array', 'np.array', (['sb_included'], {}), '(sb_included)\n', (31457, 31470), True, 'import numpy as np\n'), ((36685, 36700), 'numpy.sum', 'np.sum', (['y[vals]'], {}), '(y[vals])\n', (36691, 36700), True, 'import numpy as np\n'), ((37861, 37896), 'numpy.genfromtxt', 'np.genfromtxt', (['fname'], {'delimiter': '""","""'}), "(fname, delimiter=',')\n", (37874, 37896), True, 'import numpy as np\n'), ((37911, 37936), 'json.loads', 'json.loads', (['header_string'], {}), '(header_string)\n', (37921, 37936), False, 'import json\n'), ((1549, 1574), 'pyqtgraph.QtGui.QApplication', 'pg.QtGui.QApplication', (['[]'], {}), '([])\n', (1570, 1574), True, 'import pyqtgraph as pg\n'), ((1590, 1615), 'pyqtgraph.GraphicsLayoutWidget', 'pg.GraphicsLayoutWidget', ([], {}), '()\n', (1613, 1615), True, 'import pyqtgraph as pg\n'), ((1698, 1712), 'pyqtgraph.ImageItem', 'pg.ImageItem', ([], {}), '()\n', (1710, 1712), True, 'import pyqtgraph as pg\n'), ((1786, 1807), 'pyqtgraph.HistogramLUTItem', 'pg.HistogramLUTItem', ([], {}), '()\n', (1805, 1807), True, 'import pyqtgraph as pg\n'), ((2199, 2224), 'pyqtgraph.GraphicsLayoutWidget', 'pg.GraphicsLayoutWidget', ([], {}), '()\n', (2222, 2224), True, 'import pyqtgraph as pg\n'), ((2312, 2326), 'pyqtgraph.ImageItem', 'pg.ImageItem', ([], {}), '()\n', (2324, 2326), True, 'import pyqtgraph as pg\n'), ((2395, 2416), 'pyqtgraph.HistogramLUTItem', 'pg.HistogramLUTItem', ([], {}), '()\n', (2414, 2416), True, 'import pyqtgraph as pg\n'), ((2685, 2710), 'pyqtgraph.GraphicsLayoutWidget', 'pg.GraphicsLayoutWidget', ([], {}), '()\n', (2708, 2710), True, 'import pyqtgraph as pg\n'), ((2789, 2803), 'pyqtgraph.ImageItem', 'pg.ImageItem', ([], {}), '()\n', (2801, 2803), True, 'import pyqtgraph as pg\n'), ((2878, 2899), 'pyqtgraph.HistogramLUTItem', 'pg.HistogramLUTItem', ([], {}), '()\n', (2897, 2899), True, 'import pyqtgraph as pg\n'), ((3937, 3947), 'numpy.mean', 'np.mean', (['p'], {}), '(p)\n', (3944, 3947), True, 'import numpy as np\n'), ((3981, 4006), 'pyqtgraph.GraphicsLayoutWidget', 'pg.GraphicsLayoutWidget', ([], {}), '()\n', (4004, 4006), True, 'import pyqtgraph as pg\n'), ((4091, 4105), 'pyqtgraph.ImageItem', 'pg.ImageItem', ([], {}), '()\n', (4103, 4105), True, 'import pyqtgraph as pg\n'), ((4179, 4200), 'pyqtgraph.HistogramLUTItem', 'pg.HistogramLUTItem', ([], {}), '()\n', (4198, 4200), True, 'import pyqtgraph as pg\n'), ((5726, 5735), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (5733, 5735), True, 'import matplotlib.pyplot as plt\n'), ((5744, 5766), 'matplotlib.pyplot.figure', 'plt.figure', (['"""Stitcher"""'], {}), "('Stitcher')\n", (5754, 5766), True, 'import matplotlib.pyplot as plt\n'), ((5809, 5827), 'matplotlib.pyplot.plot', 'plt.plot', (['*first.T'], {}), '(*first.T)\n', (5817, 5827), True, 'import matplotlib.pyplot as plt\n'), ((5836, 5855), 'matplotlib.pyplot.plot', 'plt.plot', (['*second.T'], {}), '(*second.T)\n', (5844, 5855), True, 'import matplotlib.pyplot as plt\n'), ((7406, 7415), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (7413, 7415), True, 'import matplotlib.pyplot as plt\n'), ((7424, 7446), 'matplotlib.pyplot.figure', 'plt.figure', (['"""Stitcher"""'], {}), "('Stitcher')\n", (7434, 7446), True, 'import matplotlib.pyplot as plt\n'), ((7489, 7507), 'matplotlib.pyplot.plot', 'plt.plot', (['*first.T'], {}), '(*first.T)\n', (7497, 7507), True, 'import matplotlib.pyplot as plt\n'), ((7516, 7535), 'matplotlib.pyplot.plot', 'plt.plot', (['*second.T'], {}), '(*second.T)\n', (7524, 7535), True, 'import matplotlib.pyplot as plt\n'), ((8381, 8400), 'numpy.array', 'np.array', (['shiftable'], {}), '(shiftable)\n', (8389, 8400), True, 'import numpy as np\n'), ((8421, 8440), 'numpy.array', 'np.array', (['immutable'], {}), '(immutable)\n', (8429, 8440), True, 'import numpy as np\n'), ((8772, 8798), 'scipy.interpolate.interp1d', 'spi.interp1d', (['*shiftable.T'], {}), '(*shiftable.T)\n', (8784, 8798), True, 'import scipy.interpolate as spi\n'), ((10110, 10137), 'matplotlib.pyplot.figure', 'plt.figure', (['firstFig.number'], {}), '(firstFig.number)\n', (10120, 10137), True, 'import matplotlib.pyplot as plt\n'), ((14399, 14419), 'os.mkdir', 'os.mkdir', (['folder_str'], {}), '(folder_str)\n', (14407, 14419), False, 'import os\n'), ((14686, 14764), 'json.dumps', 'json.dumps', (['included_spectra'], {'sort_keys': '(True)', 'indent': '(4)', 'separators': "(',', ': ')"}), "(included_spectra, sort_keys=True, indent=4, separators=(',', ': '))\n", (14696, 14764), False, 'import json\n'), ((16180, 16215), 'os.path.join', 'os.path.join', (['folder_str', 'file_name'], {}), '(folder_str, file_name)\n', (16192, 16215), False, 'import os\n'), ((16329, 16364), 'os.path.join', 'os.path.join', (['folder_str', 'snip_name'], {}), '(folder_str, snip_name)\n', (16341, 16364), False, 'import os\n'), ((16479, 16514), 'os.path.join', 'os.path.join', (['folder_str', 'norm_name'], {}), '(folder_str, norm_name)\n', (16491, 16514), False, 'import os\n'), ((25080, 25100), 'os.mkdir', 'os.mkdir', (['folder_str'], {}), '(folder_str)\n', (25088, 25100), False, 'import os\n'), ((25388, 25466), 'json.dumps', 'json.dumps', (['included_spectra'], {'sort_keys': '(True)', 'indent': '(4)', 'separators': "(',', ': ')"}), "(included_spectra, sort_keys=True, indent=4, separators=(',', ': '))\n", (25398, 25466), False, 'import json\n'), ((32449, 32492), 'numpy.column_stack', 'np.column_stack', (['(param_array, spec_matrix)'], {}), '((param_array, spec_matrix))\n', (32464, 32492), True, 'import numpy as np\n'), ((32911, 32931), 'os.mkdir', 'os.mkdir', (['folder_str'], {}), '(folder_str)\n', (32919, 32931), False, 'import os\n'), ((33158, 33236), 'json.dumps', 'json.dumps', (['included_spectra'], {'sort_keys': '(True)', 'indent': '(4)', 'separators': "(',', ': ')"}), "(included_spectra, sort_keys=True, indent=4, separators=(',', ': '))\n", (33168, 33236), False, 'import json\n'), ((36721, 36746), 'numpy.sum', 'np.sum', (['(errors[vals] ** 2)'], {}), '(errors[vals] ** 2)\n', (36727, 36746), True, 'import numpy as np\n'), ((36963, 36978), 'numpy.isnan', 'np.isnan', (['error'], {}), '(error)\n', (36971, 36978), True, 'import numpy as np\n'), ((39483, 39516), 'time.strptime', 'time.strptime', (['timeStr', '"""%x %X%p"""'], {}), "(timeStr, '%x %X%p')\n", (39496, 39516), False, 'import time\n'), ((988, 1008), 'numpy.median', 'np.median', (['d'], {'axis': '(1)'}), '(d, axis=1)\n', (997, 1008), True, 'import numpy as np\n'), ((1381, 1406), 'numpy.std', 'np.std', (['meanMedian[-100:]'], {}), '(meanMedian[-100:])\n', (1387, 1406), True, 'import numpy as np\n'), ((2089, 2106), 'numpy.sum', 'np.sum', (['d'], {'axis': '(1)'}), '(d, axis=1)\n', (2095, 2106), True, 'import numpy as np\n'), ((4366, 4383), 'numpy.sum', 'np.sum', (['d'], {'axis': '(1)'}), '(d, axis=1)\n', (4372, 4383), True, 'import numpy as np\n'), ((9512, 9602), 'matplotlib.pyplot.plot', 'plt.plot', (['*immutable[fOlIdx[0]:fOlIdx[1], :].T'], {'marker': '"""o"""', 'label': '"""imm"""', 'markersize': '(10)'}), "(*immutable[fOlIdx[0]:fOlIdx[1], :].T, marker='o', label='imm',\n markersize=10)\n", (9520, 9602), True, 'import matplotlib.pyplot as plt\n'), ((9657, 9742), 'matplotlib.pyplot.plot', 'plt.plot', (['immutable[fOlIdx[0]:fOlIdx[1], 0]', 'newShift'], {'marker': '"""o"""', 'label': '"""shift"""'}), "(immutable[fOlIdx[0]:fOlIdx[1], 0], newShift, marker='o', label='shift'\n )\n", (9665, 9742), True, 'import matplotlib.pyplot as plt\n'), ((12070, 12106), 'numpy.array', 'np.array', (['spec.sb_results[index, 1:]'], {}), '(spec.sb_results[index, 1:])\n', (12078, 12106), True, 'import numpy as np\n'), ((12226, 12237), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (12234, 12237), True, 'import numpy as np\n'), ((12742, 12780), 'numpy.hstack', 'np.hstack', (['(spec_data, temp_dict[key])'], {}), '((spec_data, temp_dict[key]))\n', (12751, 12780), True, 'import numpy as np\n'), ((12821, 12856), 'numpy.vstack', 'np.vstack', (['(param_array, spec_data)'], {}), '((param_array, spec_data))\n', (12830, 12856), True, 'import numpy as np\n'), ((13773, 13819), 'numpy.vstack', 'np.vstack', (['(snipped_array, param_array[:, ii])'], {}), '((snipped_array, param_array[:, ii]))\n', (13782, 13819), True, 'import numpy as np\n'), ((13845, 13888), 'numpy.vstack', 'np.vstack', (['(norm_array, param_array[:, ii])'], {}), '((norm_array, param_array[:, ii]))\n', (13854, 13888), True, 'import numpy as np\n'), ((23582, 23620), 'numpy.row_stack', 'np.row_stack', (['(param_array, spec_data)'], {}), '((param_array, spec_data))\n', (23594, 23620), True, 'import numpy as np\n'), ((27768, 27871), 'numpy.savetxt', 'np.savetxt', (['file_name[0]', 'param_array'], {'delimiter': '""","""', 'header': 'header_total', 'comments': '""""""', 'fmt': '"""%0.6e"""'}), "(file_name[0], param_array, delimiter=',', header=header_total,\n comments='', fmt='%0.6e')\n", (27778, 27871), True, 'import numpy as np\n'), ((27903, 28007), 'numpy.savetxt', 'np.savetxt', (['file_name[1]', 'snipped_array'], {'delimiter': '""","""', 'header': 'header_snip', 'comments': '""""""', 'fmt': '"""%0.6e"""'}), "(file_name[1], snipped_array, delimiter=',', header=header_snip,\n comments='', fmt='%0.6e')\n", (27913, 28007), True, 'import numpy as np\n'), ((28039, 28140), 'numpy.savetxt', 'np.savetxt', (['file_name[2]', 'norm_array'], {'delimiter': '""","""', 'header': 'header_snip', 'comments': '""""""', 'fmt': '"""%0.6e"""'}), "(file_name[2], norm_array, delimiter=',', header=header_snip,\n comments='', fmt='%0.6e')\n", (28049, 28140), True, 'import numpy as np\n'), ((32065, 32076), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (32073, 32076), True, 'import numpy as np\n'), ((35389, 35424), 'os.path.join', 'os.path.join', (['folder_str', 'file_name'], {}), '(folder_str, file_name)\n', (35401, 35424), False, 'import os\n'), ((35554, 35589), 'os.path.join', 'os.path.join', (['folder_str', 'snip_name'], {}), '(folder_str, snip_name)\n', (35566, 35589), False, 'import os\n'), ((36486, 36501), 'numpy.ones_like', 'np.ones_like', (['y'], {}), '(y)\n', (36498, 36501), True, 'import numpy as np\n'), ((36525, 36541), 'numpy.where', 'np.where', (['(t > t1)'], {}), '(t > t1)\n', (36533, 36541), True, 'import numpy as np\n'), ((36559, 36575), 'numpy.where', 'np.where', (['(t < t2)'], {}), '(t < t2)\n', (36567, 36575), True, 'import numpy as np\n'), ((39007, 39026), 'os.path.join', 'os.path.join', (['*args'], {}), '(*args)\n', (39019, 39026), False, 'import os\n'), ((2112, 2134), 'pyqtgraph.mkPen', 'pg.mkPen', (['"""w"""'], {'width': '(3)'}), "('w', width=3)\n", (2120, 2134), True, 'import pyqtgraph as pg\n'), ((2586, 2605), 'numpy.sum', 'np.sum', (['med'], {'axis': '(1)'}), '(med, axis=1)\n', (2592, 2605), True, 'import numpy as np\n'), ((3204, 3226), 'pyqtgraph.mkPen', 'pg.mkPen', (['"""w"""'], {'width': '(3)'}), "('w', width=3)\n", (3212, 3226), True, 'import pyqtgraph as pg\n'), ((12977, 12996), 'numpy.array', 'np.array', (['spec_data'], {}), '(spec_data)\n', (12985, 12996), True, 'import numpy as np\n'), ((13949, 13995), 'numpy.vstack', 'np.vstack', (['(snipped_array, param_array[:, ii])'], {}), '((snipped_array, param_array[:, ii]))\n', (13958, 13995), True, 'import numpy as np\n'), ((14020, 14046), 'numpy.max', 'np.max', (['param_array[:, ii]'], {}), '(param_array[:, ii])\n', (14026, 14046), True, 'import numpy as np\n'), ((14072, 14126), 'numpy.vstack', 'np.vstack', (['(norm_array, param_array[:, ii] / temp_max)'], {}), '((norm_array, param_array[:, ii] / temp_max))\n', (14081, 14126), True, 'import numpy as np\n'), ((16685, 16720), 'os.path.join', 'os.path.join', (['folder_str', 'file_name'], {}), '(folder_str, file_name)\n', (16697, 16720), False, 'import os\n'), ((23817, 23836), 'numpy.array', 'np.array', (['spec_data'], {}), '(spec_data)\n', (23825, 23836), True, 'import numpy as np\n'), ((28808, 28843), 'os.path.join', 'os.path.join', (['folder_str', 'file_name'], {}), '(folder_str, file_name)\n', (28820, 28843), False, 'import os\n'), ((28989, 29024), 'os.path.join', 'os.path.join', (['folder_str', 'snip_name'], {}), '(folder_str, snip_name)\n', (29001, 29024), False, 'import os\n'), ((29171, 29206), 'os.path.join', 'os.path.join', (['folder_str', 'norm_name'], {}), '(folder_str, norm_name)\n', (29183, 29206), False, 'import os\n'), ((32171, 32207), 'numpy.row_stack', 'np.row_stack', (['(spec_matrix, sb_data)'], {}), '((spec_matrix, sb_data))\n', (32183, 32207), True, 'import numpy as np\n'), ((32737, 32755), 'numpy.array', 'np.array', (['(snip * N)'], {}), '(snip * N)\n', (32745, 32755), True, 'import numpy as np\n'), ((35771, 35806), 'os.path.join', 'os.path.join', (['folder_str', 'file_name'], {}), '(folder_str, file_name)\n', (35783, 35806), False, 'import os\n'), ((38951, 38977), 're.split', 're.split', (['"""(-?\\\\d+)"""', 'text'], {}), "('(-?\\\\d+)', text)\n", (38959, 38977), False, 'import re\n'), ((9148, 9190), 'numpy.argwhere', 'np.argwhere', (['(immutable[:, 0] >= overlap[0])'], {}), '(immutable[:, 0] >= overlap[0])\n', (9159, 9190), True, 'import numpy as np\n'), ((9218, 9260), 'numpy.argwhere', 'np.argwhere', (['(immutable[:, 0] <= overlap[1])'], {}), '(immutable[:, 0] <= overlap[1])\n', (9229, 9260), True, 'import numpy as np\n'), ((14187, 14233), 'numpy.vstack', 'np.vstack', (['(snipped_array, param_array[:, ii])'], {}), '((snipped_array, param_array[:, ii]))\n', (14196, 14233), True, 'import numpy as np\n'), ((14259, 14313), 'numpy.vstack', 'np.vstack', (['(norm_array, param_array[:, ii] / temp_max)'], {}), '((norm_array, param_array[:, ii] / temp_max))\n', (14268, 14313), True, 'import numpy as np\n'), ((24220, 24238), 'numpy.array', 'np.array', (['(snip * N)'], {}), '(snip * N)\n', (24228, 24238), True, 'import numpy as np\n'), ((21980, 22010), 'numpy.atleast_2d', 'np.atleast_2d', (['spec.sb_results'], {}), '(spec.sb_results)\n', (21993, 22010), True, 'import numpy as np\n'), ((24916, 24928), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (24925, 24928), True, 'import numpy as np\n'), ((29421, 29456), 'os.path.join', 'os.path.join', (['folder_str', 'file_name'], {}), '(folder_str, file_name)\n', (29433, 29456), False, 'import os\n'), ((24676, 24688), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (24685, 24688), True, 'import numpy as np\n'), ((25018, 25030), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (25027, 25030), True, 'import numpy as np\n'), ((24788, 24800), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (24797, 24800), True, 'import numpy as np\n')] |
# direct to proper path
import os
import sys
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import cm, rcParams
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
import seaborn as sns
import itertools
from collections import defaultdict
import math
import json
sim_rec_path = 'all_recs_topnUCB.npy'
result_path = '../data/Results_Salis.csv'
whole_size = 4096
# data_path = '../data/Comparison_Data/Model_Comparisons.csv'
# data_df = pd.read_csv(data_path, header= 0)
print('Load simulated recommendation file from {}'.format(sim_rec_path))
sim_recs = np.array(np.load(sim_rec_path, allow_pickle = True))
n_trial, n_round, n_batch = sim_recs.shape
# sim_recs = np.concatenate(sim_recs, axis = 0)
print('sim_recs shape: ', np.array(sim_recs).shape)
print('Load result file from {}'.format(result_path))
df = pd.read_csv(result_path, header = 0)
print(df.sort_values(by = ['AVERAGE'])['AVERAGE'])
all_rec_averages = []
for i in range(n_trial):
rec_averages = []
for j in range(n_round):
rec_average = df.loc[df['RBS'].isin(sim_recs[i,j,:]), 'AVERAGE']
rec_averages.append(rec_average)
per_trial_max = np.sort(np.concatenate(rec_averages, axis = 0))[::-1][:3]
print('Trial {} round {} max {} '.format(i, j ,per_trial_max))
print()
all_rec_averages.append(rec_averages)
print('all rec average shape: ', np.array(all_rec_averages).shape)
# all_rec_averages = np.array(all_rec_averages)
# all_rec_averages = all_rec_averages.
| [
"sys.path.append",
"numpy.load",
"pandas.read_csv",
"numpy.array",
"os.path.join",
"numpy.concatenate"
] | [((993, 1027), 'pandas.read_csv', 'pd.read_csv', (['result_path'], {'header': '(0)'}), '(result_path, header=0)\n', (1004, 1027), True, 'import pandas as pd\n'), ((75, 93), 'os.path.join', 'os.path.join', (['""".."""'], {}), "('..')\n", (87, 93), False, 'import os\n'), ((131, 159), 'sys.path.append', 'sys.path.append', (['module_path'], {}), '(module_path)\n', (146, 159), False, 'import sys\n'), ((746, 786), 'numpy.load', 'np.load', (['sim_rec_path'], {'allow_pickle': '(True)'}), '(sim_rec_path, allow_pickle=True)\n', (753, 786), True, 'import numpy as np\n'), ((907, 925), 'numpy.array', 'np.array', (['sim_recs'], {}), '(sim_recs)\n', (915, 925), True, 'import numpy as np\n'), ((1534, 1560), 'numpy.array', 'np.array', (['all_rec_averages'], {}), '(all_rec_averages)\n', (1542, 1560), True, 'import numpy as np\n'), ((1326, 1362), 'numpy.concatenate', 'np.concatenate', (['rec_averages'], {'axis': '(0)'}), '(rec_averages, axis=0)\n', (1340, 1362), True, 'import numpy as np\n')] |
import pytest
from scipy import signal
from scipy.interpolate import interp1d
import numpy as np
from numpy import pi
# This package must first be installed with `pip install -e .` or similar
from waveform_analysis import ABC_weighting, A_weighting, A_weight
# It will plot things for sanity-checking if MPL is installed
try:
import matplotlib.pyplot as plt
mpl = True
except ImportError:
mpl = False
# ANSI S1.4-1983 Table AI "Exact frequency"
frequencies = np.array((10.00, 12.59, 15.85, 19.95, 25.12, 31.62, 39.81,
50.12, 65.10, 79.43, 100.00, 125.90, 158.50, 199.50,
251.20, 316.20, 398.10, 501.20, 631.00, 794.30,
1000.00, 1259.00, 1585.00, 1995.00, 2512.00, 3162.00,
3981.00, 5012.00, 6310.00, 7943.00, 10000.00,
12590.00, 15850.00, 19950.00, 25120.00, 31620.00,
39810.00, 50120.00, 63100.00, 79430.00, 100000.00,
))
responses = {}
# ANSI S1.4-1983 Table AI "A weighting"
responses['A'] = np.array((-70.4, -63.4, -56.7, -50.5, -44.7, -39.4, -34.6,
-30.2, -26.2, -22.5, -19.1, -16.1, -13.4, -10.9,
-8.6, -6.6, -4.8, -3.2, -1.9, -0.8, 0.0, +0.6,
+1.0, +1.2, +1.3, +1.2, +1.0, +0.5, -0.1, -1.1,
-2.5, -4.3, -6.6, -9.3, -12.4, -15.8, -19.3, -23.1,
-26.9, -30.8, -34.7,
))
# ANSI S1.4-1983 Table IV "B Weighting"
responses['B'] = np.array((-38.2, -33.2, -28.5, -24.2, -20.4, -17.1, -14.2,
-11.6, -9.3, -7.4, -5.6, -4.2, -3.0, -2.0, -1.3,
-0.8, -0.5, -0.3, -0.1, 0.0, 0.0, 0.0, 0.0, -0.1,
-0.2, -0.4, -0.7, -1.2, -1.9, -2.9, -4.3, -6.1,
-8.4, -11.1,
))
# ANSI S1.4-1983 Table IV "C Weighting"
responses['C'] = np.array((-14.3, -11.2, -8.5, -6.2, -4.4, -3.0, -2.0, -1.3,
-0.8, -0.5, -0.3, -0.2, -0.1, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, -0.1, -0.2, -0.3, -0.5,
-0.8, -1.3, -2.0, -3.0, -4.4, -6.2, -8.5, -11.2,
))
# ANSI S1.4-1983 Table AII "Type 0"
# Stricter than IEC 61672-1 (2002) Table 2 Class 1 (±1.1 dB at 1 kHz)
upper_limits = np.array((+2.0, +2.0, +2.0, +2.0, +1.5, +1.0, +1.0, +1.0, +1.0,
+1.0, +0.7, +0.7, +0.7, +0.7, +0.7, +0.7, +0.7, +0.7,
+0.7, +0.7, +0.7, +0.7, +0.7, +0.7, +0.7, +0.7, +0.7,
+1.0, +1.0, +1.0, +2.0, +2.0, +2.0, +2.0, +2.4, +2.8,
+3.3, +4.1, +4.9, +5.1, +5.6,
))
lower_limits = np.array((-5.0, -4.0, -3.0, -2.0, -1.5, -1.0, -1.0, -1.0, -1.0,
-1.0, -0.7, -0.7, -0.7, -0.7, -0.7, -0.7, -0.7, -0.7,
-0.7, -0.7, -0.7, -0.7, -0.7, -0.7, -0.7, -0.7, -0.7,
-1.0, -1.5, -2.0, -3.0, -3.0, -3.0, -3.0, -4.5, -6.2,
-7.9, -9.3, -10.9, -12.2, -14.3,
))
class TestABCWeighting(object):
def test_invalid_params(self):
with pytest.raises(ValueError):
ABC_weighting('D')
def test_freq_resp(self):
# Test that frequency response meets tolerance from ANSI S1.4-1983
for curve in {'A', 'B', 'C'}:
N = len(responses[curve]) # Number of frequencies in spec
f_test = frequencies[:N]
upper = responses[curve] + upper_limits[:N]
lower = responses[curve] + lower_limits[:N]
z, p, k = ABC_weighting(curve)
w, h = signal.freqs_zpk(z, p, k, 2*pi*f_test)
levels = 20 * np.log10(abs(h))
if mpl:
plt.figure(curve)
plt.title('{}-weighting limits (Type 0)'.format(curve))
plt.semilogx(f_test, levels, alpha=0.7, label='analog')
plt.semilogx(f_test, upper, 'r:', alpha=0.7)
plt.semilogx(f_test, lower, 'r:', alpha=0.7)
plt.grid(True, color='0.7', linestyle='-', which='major')
plt.grid(True, color='0.9', linestyle='-', which='minor')
plt.legend()
assert all(np.less_equal(levels, upper))
assert all(np.greater_equal(levels, lower))
class TestAWeighting(object):
def test_invalid_params(self):
with pytest.raises(TypeError):
A_weighting(fs='spam')
with pytest.raises(ValueError):
A_weighting(fs=10000, output='eggs')
def test_zpkbilinear_bug(self):
# https://github.com/scipy/scipy/pull/7504
# Copied a local version and fixed it, but just to make sure:
z, p, k = A_weighting(fs=48000, output='zpk')
assert k != 0
def test_freq_resp_ba(self):
# Test that frequency response meets tolerance from ANSI S1.4-1983
fs = 300000
b, a = A_weighting(fs)
w, h = signal.freqz(b, a, 2*pi*frequencies/fs)
levels = 20 * np.log10(abs(h))
if mpl:
plt.figure('A')
plt.semilogx(frequencies, levels, alpha=0.7, label='ba')
plt.legend()
assert all(np.less_equal(levels, responses['A'] + upper_limits))
assert all(np.greater_equal(levels, responses['A'] + lower_limits))
def test_freq_resp_zpk(self):
# Test that frequency response meets tolerance from ANSI S1.4-1983
fs = 270000
z, p, k = A_weighting(fs, 'zpk')
w, h = signal.freqz_zpk(z, p, k, 2*pi*frequencies/fs)
levels = 20 * np.log10(abs(h))
if mpl:
plt.figure('A')
plt.semilogx(frequencies, levels, alpha=0.7, label='zpk')
plt.legend()
assert all(np.less_equal(levels, responses['A'] + upper_limits))
assert all(np.greater_equal(levels, responses['A'] + lower_limits))
def test_freq_resp_sos(self):
# Test that frequency response meets tolerance from ANSI S1.4-1983
fs = 400000
sos = A_weighting(fs, output='sos')
w, h = signal.sosfreqz(sos, 2*pi*frequencies/fs)
levels = 20 * np.log10(abs(h))
if mpl:
plt.figure('A')
plt.semilogx(frequencies, levels, alpha=0.7, label='sos')
plt.legend()
assert all(np.less_equal(levels, responses['A'] + upper_limits))
assert all(np.greater_equal(levels, responses['A'] + lower_limits))
class TestAWeight(object):
def test_freq_resp(self):
# Test that frequency response meets tolerance from ANSI S1.4-1983
N = 40000
fs = 300000
impulse = signal.unit_impulse(N)
out = A_weight(impulse, fs)
freq = np.fft.rfftfreq(N, 1/fs)
levels = 20 * np.log10(abs(np.fft.rfft(out)))
if mpl:
plt.figure('A')
plt.semilogx(freq, levels, alpha=0.7, label='fft')
plt.legend()
plt.ylim(-80, +5)
# Interpolate FFT points to measure response at spec's frequencies
func = interp1d(freq, levels)
levels = func(frequencies)
assert all(np.less_equal(levels, responses['A'] + upper_limits))
assert all(np.greater_equal(levels, responses['A'] + lower_limits))
if __name__ == '__main__':
pytest.main([__file__])
| [
"numpy.fft.rfft",
"pytest.main",
"matplotlib.pyplot.figure",
"scipy.interpolate.interp1d",
"scipy.signal.sosfreqz",
"waveform_analysis.A_weighting",
"scipy.signal.freqs_zpk",
"waveform_analysis.A_weight",
"pytest.raises",
"numpy.less_equal",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend"... | [((475, 823), 'numpy.array', 'np.array', (['(10.0, 12.59, 15.85, 19.95, 25.12, 31.62, 39.81, 50.12, 65.1, 79.43, 100.0,\n 125.9, 158.5, 199.5, 251.2, 316.2, 398.1, 501.2, 631.0, 794.3, 1000.0, \n 1259.0, 1585.0, 1995.0, 2512.0, 3162.0, 3981.0, 5012.0, 6310.0, 7943.0,\n 10000.0, 12590.0, 15850.0, 19950.0, 25120.0, 31620.0, 39810.0, 50120.0,\n 63100.0, 79430.0, 100000.0)'], {}), '((10.0, 12.59, 15.85, 19.95, 25.12, 31.62, 39.81, 50.12, 65.1, \n 79.43, 100.0, 125.9, 158.5, 199.5, 251.2, 316.2, 398.1, 501.2, 631.0, \n 794.3, 1000.0, 1259.0, 1585.0, 1995.0, 2512.0, 3162.0, 3981.0, 5012.0, \n 6310.0, 7943.0, 10000.0, 12590.0, 15850.0, 19950.0, 25120.0, 31620.0, \n 39810.0, 50120.0, 63100.0, 79430.0, 100000.0))\n', (483, 823), True, 'import numpy as np\n'), ((1081, 1372), 'numpy.array', 'np.array', (['(-70.4, -63.4, -56.7, -50.5, -44.7, -39.4, -34.6, -30.2, -26.2, -22.5, -\n 19.1, -16.1, -13.4, -10.9, -8.6, -6.6, -4.8, -3.2, -1.9, -0.8, 0.0, +\n 0.6, +1.0, +1.2, +1.3, +1.2, +1.0, +0.5, -0.1, -1.1, -2.5, -4.3, -6.6, \n -9.3, -12.4, -15.8, -19.3, -23.1, -26.9, -30.8, -34.7)'], {}), '((-70.4, -63.4, -56.7, -50.5, -44.7, -39.4, -34.6, -30.2, -26.2, -\n 22.5, -19.1, -16.1, -13.4, -10.9, -8.6, -6.6, -4.8, -3.2, -1.9, -0.8, \n 0.0, +0.6, +1.0, +1.2, +1.3, +1.2, +1.0, +0.5, -0.1, -1.1, -2.5, -4.3, \n -6.6, -9.3, -12.4, -15.8, -19.3, -23.1, -26.9, -30.8, -34.7))\n', (1089, 1372), True, 'import numpy as np\n'), ((1580, 1814), 'numpy.array', 'np.array', (['(-38.2, -33.2, -28.5, -24.2, -20.4, -17.1, -14.2, -11.6, -9.3, -7.4, -5.6, \n -4.2, -3.0, -2.0, -1.3, -0.8, -0.5, -0.3, -0.1, 0.0, 0.0, 0.0, 0.0, -\n 0.1, -0.2, -0.4, -0.7, -1.2, -1.9, -2.9, -4.3, -6.1, -8.4, -11.1)'], {}), '((-38.2, -33.2, -28.5, -24.2, -20.4, -17.1, -14.2, -11.6, -9.3, -\n 7.4, -5.6, -4.2, -3.0, -2.0, -1.3, -0.8, -0.5, -0.3, -0.1, 0.0, 0.0, \n 0.0, 0.0, -0.1, -0.2, -0.4, -0.7, -1.2, -1.9, -2.9, -4.3, -6.1, -8.4, -\n 11.1))\n', (1588, 1814), True, 'import numpy as np\n'), ((1995, 2213), 'numpy.array', 'np.array', (['(-14.3, -11.2, -8.5, -6.2, -4.4, -3.0, -2.0, -1.3, -0.8, -0.5, -0.3, -0.2, \n -0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.1, -0.2, -0.3, -\n 0.5, -0.8, -1.3, -2.0, -3.0, -4.4, -6.2, -8.5, -11.2)'], {}), '((-14.3, -11.2, -8.5, -6.2, -4.4, -3.0, -2.0, -1.3, -0.8, -0.5, -\n 0.3, -0.2, -0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.1, -\n 0.2, -0.3, -0.5, -0.8, -1.3, -2.0, -3.0, -4.4, -6.2, -8.5, -11.2))\n', (2003, 2213), True, 'import numpy as np\n'), ((2436, 2704), 'numpy.array', 'np.array', (['(+2.0, +2.0, +2.0, +2.0, +1.5, +1.0, +1.0, +1.0, +1.0, +1.0, +0.7, +0.7, +\n 0.7, +0.7, +0.7, +0.7, +0.7, +0.7, +0.7, +0.7, +0.7, +0.7, +0.7, +0.7, \n +0.7, +0.7, +0.7, +1.0, +1.0, +1.0, +2.0, +2.0, +2.0, +2.0, +2.4, +2.8,\n +3.3, +4.1, +4.9, +5.1, +5.6)'], {}), '((+2.0, +2.0, +2.0, +2.0, +1.5, +1.0, +1.0, +1.0, +1.0, +1.0, +0.7,\n +0.7, +0.7, +0.7, +0.7, +0.7, +0.7, +0.7, +0.7, +0.7, +0.7, +0.7, +0.7,\n +0.7, +0.7, +0.7, +0.7, +1.0, +1.0, +1.0, +2.0, +2.0, +2.0, +2.0, +2.4,\n +2.8, +3.3, +4.1, +4.9, +5.1, +5.6))\n', (2444, 2704), True, 'import numpy as np\n'), ((2836, 3107), 'numpy.array', 'np.array', (['(-5.0, -4.0, -3.0, -2.0, -1.5, -1.0, -1.0, -1.0, -1.0, -1.0, -0.7, -0.7, -\n 0.7, -0.7, -0.7, -0.7, -0.7, -0.7, -0.7, -0.7, -0.7, -0.7, -0.7, -0.7, \n -0.7, -0.7, -0.7, -1.0, -1.5, -2.0, -3.0, -3.0, -3.0, -3.0, -4.5, -6.2,\n -7.9, -9.3, -10.9, -12.2, -14.3)'], {}), '((-5.0, -4.0, -3.0, -2.0, -1.5, -1.0, -1.0, -1.0, -1.0, -1.0, -0.7,\n -0.7, -0.7, -0.7, -0.7, -0.7, -0.7, -0.7, -0.7, -0.7, -0.7, -0.7, -0.7,\n -0.7, -0.7, -0.7, -0.7, -1.0, -1.5, -2.0, -3.0, -3.0, -3.0, -3.0, -4.5,\n -6.2, -7.9, -9.3, -10.9, -12.2, -14.3))\n', (2844, 3107), True, 'import numpy as np\n'), ((7447, 7470), 'pytest.main', 'pytest.main', (['[__file__]'], {}), '([__file__])\n', (7458, 7470), False, 'import pytest\n'), ((4887, 4922), 'waveform_analysis.A_weighting', 'A_weighting', ([], {'fs': '(48000)', 'output': '"""zpk"""'}), "(fs=48000, output='zpk')\n", (4898, 4922), False, 'from waveform_analysis import ABC_weighting, A_weighting, A_weight\n'), ((5089, 5104), 'waveform_analysis.A_weighting', 'A_weighting', (['fs'], {}), '(fs)\n', (5100, 5104), False, 'from waveform_analysis import ABC_weighting, A_weighting, A_weight\n'), ((5120, 5165), 'scipy.signal.freqz', 'signal.freqz', (['b', 'a', '(2 * pi * frequencies / fs)'], {}), '(b, a, 2 * pi * frequencies / fs)\n', (5132, 5165), False, 'from scipy import signal\n'), ((5636, 5658), 'waveform_analysis.A_weighting', 'A_weighting', (['fs', '"""zpk"""'], {}), "(fs, 'zpk')\n", (5647, 5658), False, 'from waveform_analysis import ABC_weighting, A_weighting, A_weight\n'), ((5674, 5726), 'scipy.signal.freqz_zpk', 'signal.freqz_zpk', (['z', 'p', 'k', '(2 * pi * frequencies / fs)'], {}), '(z, p, k, 2 * pi * frequencies / fs)\n', (5690, 5726), False, 'from scipy import signal\n'), ((6194, 6223), 'waveform_analysis.A_weighting', 'A_weighting', (['fs'], {'output': '"""sos"""'}), "(fs, output='sos')\n", (6205, 6223), False, 'from waveform_analysis import ABC_weighting, A_weighting, A_weight\n'), ((6239, 6286), 'scipy.signal.sosfreqz', 'signal.sosfreqz', (['sos', '(2 * pi * frequencies / fs)'], {}), '(sos, 2 * pi * frequencies / fs)\n', (6254, 6286), False, 'from scipy import signal\n'), ((6800, 6822), 'scipy.signal.unit_impulse', 'signal.unit_impulse', (['N'], {}), '(N)\n', (6819, 6822), False, 'from scipy import signal\n'), ((6837, 6858), 'waveform_analysis.A_weight', 'A_weight', (['impulse', 'fs'], {}), '(impulse, fs)\n', (6845, 6858), False, 'from waveform_analysis import ABC_weighting, A_weighting, A_weight\n'), ((6874, 6900), 'numpy.fft.rfftfreq', 'np.fft.rfftfreq', (['N', '(1 / fs)'], {}), '(N, 1 / fs)\n', (6889, 6900), True, 'import numpy as np\n'), ((7207, 7229), 'scipy.interpolate.interp1d', 'interp1d', (['freq', 'levels'], {}), '(freq, levels)\n', (7215, 7229), False, 'from scipy.interpolate import interp1d\n'), ((3305, 3330), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3318, 3330), False, 'import pytest\n'), ((3344, 3362), 'waveform_analysis.ABC_weighting', 'ABC_weighting', (['"""D"""'], {}), "('D')\n", (3357, 3362), False, 'from waveform_analysis import ABC_weighting, A_weighting, A_weight\n'), ((3750, 3770), 'waveform_analysis.ABC_weighting', 'ABC_weighting', (['curve'], {}), '(curve)\n', (3763, 3770), False, 'from waveform_analysis import ABC_weighting, A_weighting, A_weight\n'), ((3790, 3832), 'scipy.signal.freqs_zpk', 'signal.freqs_zpk', (['z', 'p', 'k', '(2 * pi * f_test)'], {}), '(z, p, k, 2 * pi * f_test)\n', (3806, 3832), False, 'from scipy import signal\n'), ((4560, 4584), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (4573, 4584), False, 'import pytest\n'), ((4598, 4620), 'waveform_analysis.A_weighting', 'A_weighting', ([], {'fs': '"""spam"""'}), "(fs='spam')\n", (4609, 4620), False, 'from waveform_analysis import ABC_weighting, A_weighting, A_weight\n'), ((4635, 4660), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4648, 4660), False, 'import pytest\n'), ((4674, 4710), 'waveform_analysis.A_weighting', 'A_weighting', ([], {'fs': '(10000)', 'output': '"""eggs"""'}), "(fs=10000, output='eggs')\n", (4685, 4710), False, 'from waveform_analysis import ABC_weighting, A_weighting, A_weight\n'), ((5228, 5243), 'matplotlib.pyplot.figure', 'plt.figure', (['"""A"""'], {}), "('A')\n", (5238, 5243), True, 'import matplotlib.pyplot as plt\n'), ((5256, 5312), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (['frequencies', 'levels'], {'alpha': '(0.7)', 'label': '"""ba"""'}), "(frequencies, levels, alpha=0.7, label='ba')\n", (5268, 5312), True, 'import matplotlib.pyplot as plt\n'), ((5325, 5337), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5335, 5337), True, 'import matplotlib.pyplot as plt\n'), ((5358, 5410), 'numpy.less_equal', 'np.less_equal', (['levels', "(responses['A'] + upper_limits)"], {}), "(levels, responses['A'] + upper_limits)\n", (5371, 5410), True, 'import numpy as np\n'), ((5431, 5486), 'numpy.greater_equal', 'np.greater_equal', (['levels', "(responses['A'] + lower_limits)"], {}), "(levels, responses['A'] + lower_limits)\n", (5447, 5486), True, 'import numpy as np\n'), ((5789, 5804), 'matplotlib.pyplot.figure', 'plt.figure', (['"""A"""'], {}), "('A')\n", (5799, 5804), True, 'import matplotlib.pyplot as plt\n'), ((5817, 5874), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (['frequencies', 'levels'], {'alpha': '(0.7)', 'label': '"""zpk"""'}), "(frequencies, levels, alpha=0.7, label='zpk')\n", (5829, 5874), True, 'import matplotlib.pyplot as plt\n'), ((5887, 5899), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5897, 5899), True, 'import matplotlib.pyplot as plt\n'), ((5920, 5972), 'numpy.less_equal', 'np.less_equal', (['levels', "(responses['A'] + upper_limits)"], {}), "(levels, responses['A'] + upper_limits)\n", (5933, 5972), True, 'import numpy as np\n'), ((5993, 6048), 'numpy.greater_equal', 'np.greater_equal', (['levels', "(responses['A'] + lower_limits)"], {}), "(levels, responses['A'] + lower_limits)\n", (6009, 6048), True, 'import numpy as np\n'), ((6349, 6364), 'matplotlib.pyplot.figure', 'plt.figure', (['"""A"""'], {}), "('A')\n", (6359, 6364), True, 'import matplotlib.pyplot as plt\n'), ((6377, 6434), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (['frequencies', 'levels'], {'alpha': '(0.7)', 'label': '"""sos"""'}), "(frequencies, levels, alpha=0.7, label='sos')\n", (6389, 6434), True, 'import matplotlib.pyplot as plt\n'), ((6447, 6459), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6457, 6459), True, 'import matplotlib.pyplot as plt\n'), ((6480, 6532), 'numpy.less_equal', 'np.less_equal', (['levels', "(responses['A'] + upper_limits)"], {}), "(levels, responses['A'] + upper_limits)\n", (6493, 6532), True, 'import numpy as np\n'), ((6553, 6608), 'numpy.greater_equal', 'np.greater_equal', (['levels', "(responses['A'] + lower_limits)"], {}), "(levels, responses['A'] + lower_limits)\n", (6569, 6608), True, 'import numpy as np\n'), ((6982, 6997), 'matplotlib.pyplot.figure', 'plt.figure', (['"""A"""'], {}), "('A')\n", (6992, 6997), True, 'import matplotlib.pyplot as plt\n'), ((7010, 7060), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (['freq', 'levels'], {'alpha': '(0.7)', 'label': '"""fft"""'}), "(freq, levels, alpha=0.7, label='fft')\n", (7022, 7060), True, 'import matplotlib.pyplot as plt\n'), ((7073, 7085), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7083, 7085), True, 'import matplotlib.pyplot as plt\n'), ((7098, 7115), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-80)', '(+5)'], {}), '(-80, +5)\n', (7106, 7115), True, 'import matplotlib.pyplot as plt\n'), ((7284, 7336), 'numpy.less_equal', 'np.less_equal', (['levels', "(responses['A'] + upper_limits)"], {}), "(levels, responses['A'] + upper_limits)\n", (7297, 7336), True, 'import numpy as np\n'), ((7357, 7412), 'numpy.greater_equal', 'np.greater_equal', (['levels', "(responses['A'] + lower_limits)"], {}), "(levels, responses['A'] + lower_limits)\n", (7373, 7412), True, 'import numpy as np\n'), ((3909, 3926), 'matplotlib.pyplot.figure', 'plt.figure', (['curve'], {}), '(curve)\n', (3919, 3926), True, 'import matplotlib.pyplot as plt\n'), ((4015, 4070), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (['f_test', 'levels'], {'alpha': '(0.7)', 'label': '"""analog"""'}), "(f_test, levels, alpha=0.7, label='analog')\n", (4027, 4070), True, 'import matplotlib.pyplot as plt\n'), ((4087, 4131), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (['f_test', 'upper', '"""r:"""'], {'alpha': '(0.7)'}), "(f_test, upper, 'r:', alpha=0.7)\n", (4099, 4131), True, 'import matplotlib.pyplot as plt\n'), ((4148, 4192), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (['f_test', 'lower', '"""r:"""'], {'alpha': '(0.7)'}), "(f_test, lower, 'r:', alpha=0.7)\n", (4160, 4192), True, 'import matplotlib.pyplot as plt\n'), ((4209, 4266), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {'color': '"""0.7"""', 'linestyle': '"""-"""', 'which': '"""major"""'}), "(True, color='0.7', linestyle='-', which='major')\n", (4217, 4266), True, 'import matplotlib.pyplot as plt\n'), ((4283, 4340), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {'color': '"""0.9"""', 'linestyle': '"""-"""', 'which': '"""minor"""'}), "(True, color='0.9', linestyle='-', which='minor')\n", (4291, 4340), True, 'import matplotlib.pyplot as plt\n'), ((4357, 4369), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4367, 4369), True, 'import matplotlib.pyplot as plt\n'), ((4394, 4422), 'numpy.less_equal', 'np.less_equal', (['levels', 'upper'], {}), '(levels, upper)\n', (4407, 4422), True, 'import numpy as np\n'), ((4447, 4478), 'numpy.greater_equal', 'np.greater_equal', (['levels', 'lower'], {}), '(levels, lower)\n', (4463, 4478), True, 'import numpy as np\n'), ((6934, 6950), 'numpy.fft.rfft', 'np.fft.rfft', (['out'], {}), '(out)\n', (6945, 6950), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.